12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492 |
- from sklearn.cluster import KMeans
- import json
- from sklearn.decomposition import PCA
- from scipy.stats import linregress
- import matplotlib
- import matplotlib.pyplot as plt
- from sklearn.ensemble import IsolationForest
- matplotlib.rcParams['font.family']='SimHei'
- from scipy.spatial.distance import euclidean
- from io import BytesIO,StringIO
- from scipy.fft import fft, ifft
- import pandas as pd
- from flask import Flask, request, jsonify
- from io import BytesIO
- import numpy as np
- import random
- import requests
- import pywt
- from scipy.interpolate import interp1d
- app = Flask(__name__)
- @app.route('/hdck', methods=['POST'])
- def process_data():
- try:
- # 获取请求数据
- data = request.json
- url = data.get('url', '')
- # 读取 CSV 数据
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
- # 故障检测结果(调用滑动窗口方法)
- detection_status = sliding_window_fault_detection(df)
- # 返回 JSON 结果
- return jsonify({
- 'status': 200,
- 'data': detection_status,
- 'msg': '故障检测完成'
- })
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- #循环读取
- def read_csv_from_urls(urls):
- try:
- index = 0
- res = np.array([])
- for url in urls:
- response = requests.get(url)
- response.raise_for_status()
- csv_file = BytesIO(response.content)
- data1 = pd.read_csv(csv_file, encoding='utf-8')
- data1 = np.array(data1)[1:, :]
- if index == 0:
- res = data1
- index = 1
- else:
- res = np.append(res, data1, axis=0)
- data = res
- print("CSV 文件读取成功")
- return data
- except requests.exceptions.RequestException as e:
- print(f"获取文件时发生错误: {e}")
- return None
- except Exception as e:
- print(f"读取 CSV 文件时发生错误: {e}")
- return None
- # 读取 CSV 数据
- def read_csv_from_url(url):
- try:
- response = requests.get(url)
- response.raise_for_status()
- print("文件获取成功")
- csv_file = BytesIO(response.content)
- data = pd.read_csv(csv_file, encoding='utf-8')
- print("CSV 文件读取成功")
- return data
- except requests.exceptions.RequestException as e:
- print(f"获取文件时发生错误: {e}")
- return None
- except Exception as e:
- print(f"读取 CSV 文件时发生错误: {e}")
- return None
- # 基于滑动窗口的故障检测(统一检测)
- def sliding_window_fault_detection(data, window_size=5, threshold=2):
- """ 故障检测,基于滑动窗口均值和标准差检测 """
- # 定义统一检测的阈值
- thresholds = {
- '发动机排气温度': 1000,
- '燃油流量': 200,
- '高压压气机转子转数': 8000,
- '风扇转子转速': 3000,
- '氧气输出流量': 100,
- '氧气浓度': 15,
- '供氧压力': 1.5,
- '氧分压值': (20, 30)
- }
- # 遍历所有指标
- for param, limit in thresholds.items():
- if param in data.columns:
- rolling_mean = data[param].rolling(window=window_size).mean()
- rolling_std = data[param].rolling(window=window_size).std()
- # 判断是否异常
- if isinstance(limit, tuple): # 如果是区间阈值
- anomalies = (data[param] < limit[0]) | (data[param] > limit[1])
- else:
- anomalies = (data[param] - rolling_mean).abs() > threshold * rolling_std
- if anomalies.any():
- return "状态异常" # 如果任一指标异常,直接返回状态异常
- return "无故障" # 如果所有指标均正常,返回状态正常
- # 基于k-Means聚类的故障检测(统一检测)
- @app.route('/kmeans', methods=['POST'])
- def process_data_kmeans():
- try:
- # 获取请求数据
- data = request.json
- url = data.get('url', '')
- # 读取 CSV 数据
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
- # 故障检测结果(调用k-Means方法)
- detection_status = kmeans_fault_detection(df)
- # 返回 JSON 结果
- return jsonify({
- 'status': 200,
- 'data': detection_status,
- 'msg': '故障检测完成'
- })
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- def kmeans_fault_detection(data):
- """ 基于 k-Means 的故障检测 """
- features = ['发动机排气温度', '燃油流量', '高压压气机转子转数', '风扇转子转速',
- '氧气输出流量', '氧气浓度', '供氧压力', '氧分压值']
- valid_features = [col for col in features if col in data.columns]
- if not valid_features:
- return "状态正常" # 如果没有有效的特征列,返回正常状态
- X = data[valid_features].dropna()
- kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
- cluster_centers = kmeans.cluster_centers_
- # 计算距离矩阵
- distances = np.linalg.norm(X.values[:, None, :] - cluster_centers, axis=2)
- # 判断是否存在异常
- if (distances > np.percentile(distances, 90)).any():
- return "状态异常" # 如果距离异常,返回状态异常
- return "无故障" # 如果无异常,返回状态正常
- # 基于阈值判断的故障诊断算法
- @app.route('/yzpd', methods=['POST'])
- def process_threshold():
- try:
- data = request.json
- url = data.get('url', '')
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
- # 执行故障诊断
- diagnosis = threshold_based_diagnosis(df)
- return jsonify({
- 'status': 200,
- 'data': diagnosis,
- 'msg': '故障诊断完成'
- })
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- def threshold_based_diagnosis(data):
- """ 基于阈值的故障诊断 """
- thresholds = {
- '发动机排气温度': 1000,
- '燃油流量': 200,
- '高压压气机转子转数': 8000,
- '风扇转子转速': 3000,
- '氧气输出流量': 100,
- '氧气浓度': 15,
- '供氧压力': 1.5,
- '氧分压值': (20, 30)
- }
- abnormal_params = []
- for param, limit in thresholds.items():
- if param in data.columns:
- values = data[param]
- if isinstance(limit, tuple): # 区间阈值
- if (values < limit[0]).any() or (values > limit[1]).any():
- abnormal_params.append(f"{param}有误")
- else: # 单一阈值
- if (values > limit).any():
- abnormal_params.append(f"{param}有误")
- return abnormal_params if abnormal_params else ["正常"]
- ### 方法二:基于马氏距离的故障诊断
- @app.route('/msjl', methods=['POST'])
- def process_mahalanobis():
- try:
- data = request.json
- url = data.get('url', '')
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
- # 执行故障诊断
- diagnosis = mahalanobis_based_diagnosis(df)
- return jsonify({
- 'status': 200,
- 'data': diagnosis,
- 'msg': '故障诊断完成'
- })
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- def mahalanobis_based_diagnosis(data):
- """ 基于马氏距离的故障诊断 """
- features = ['发动机排气温度', '燃油流量', '高压压气机转子转数', '风扇转子转速',
- '氧气输出流量', '氧气浓度', '供氧压力', '氧分压值']
- valid_features = [col for col in features if col in data.columns]
- if not valid_features:
- return ["正常"]
- # 检查数据是否足够
- values = data[valid_features].dropna()
- if values.empty or len(values) < 2: # 至少需要两个样本计算协方差
- return ["数据不足,无法计算马氏距离"]
- # 计算均值和协方差矩阵
- try:
- mean = values.mean().values
- cov_matrix = np.cov(values.T)
- # 检查协方差矩阵是否是可逆的
- if np.linalg.det(cov_matrix) == 0:
- return ["协方差矩阵不可逆,无法计算马氏距离"]
- cov_inv = np.linalg.inv(cov_matrix)
- except Exception as e:
- return [f"协方差计算错误: {e}"]
- abnormal_params = []
- for _, row in values.iterrows():
- x = row.values.reshape(-1, 1) # 转为列向量
- diff = x - mean.reshape(-1, 1)
- try:
- distance = np.sqrt(diff.T @ cov_inv @ diff)[0][0] # 计算马氏距离
- threshold = np.percentile(distance, 95) # 设定一个异常距离阈值
- if distance > threshold:
- abnormal_params.append(f"{row.name}有误") # 使用行索引标注异常
- except Exception as e:
- abnormal_params.append(f"马氏距离计算错误: {e}")
- return abnormal_params if abnormal_params else ["正常"]
- ### 方法三:基于PCA的故障诊断
- @app.route('/pca', methods=['POST'])
- def process_pca():
- try:
- data = request.json
- url = data.get('url', '')
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
- # 执行故障诊断
- diagnosis = pca_based_diagnosis(df)
- return jsonify({
- 'status': 200,
- 'data': diagnosis,
- 'msg': '故障诊断完成'
- })
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- def pca_based_diagnosis(data):
- """ 基于PCA的故障诊断 """
- features = ['发动机排气温度', '燃油流量', '高压压气机转子转数', '风扇转子转速',
- '氧气输出流量', '氧气浓度', '供氧压力', '氧分压值']
- valid_features = [col for col in features if col in data.columns]
- if not valid_features:
- return ["正常"]
- values = data[valid_features].dropna()
- pca = PCA(n_components=0.9) # 保留90%的方差
- pca.fit(values)
- # 计算重建误差
- data_reconstructed = pca.inverse_transform(pca.transform(values))
- reconstruction_error = np.mean((values - data_reconstructed) ** 2, axis=1)
- abnormal_params = []
- for feature in valid_features:
- if np.any(reconstruction_error > np.percentile(reconstruction_error, 95)):
- abnormal_params.append(f"{feature}有误")
- return abnormal_params if abnormal_params else ["正常"]
- # 基于线性归一算法的退化评估
- @app.route('/xxgy', methods=['POST'])
- def process_data4():
- try:
- data = request.json
- # 检查请求体中是否包含 url
- url = data.get('url', '')
- # 读取数据
- df = read_csv_from_url(url)
- if df is None:
- print("数据获取失败,程序终止。")
- return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
- # 退化评估
- degradation_results = calculate_all_degradation(df)
- results = format_degradation_results(degradation_results)
- return jsonify({
- 'status': 200,
- 'data': results,
- 'msg': '退化评估完成'
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'data': None,
- 'msg': str(e)
- })
- # 退化计算函数,假设通过简单的标准化(归一化)计算退化程度
- def calculate_degradation(parameter_data):
- # 假设退化度 = (当前值 - 最小值) / (最大值 - 最小值) * 100
- min_val = parameter_data.min()
- max_val = parameter_data.max()
- degradation = (parameter_data - min_val) / (max_val - min_val) * 100
- return degradation
- # 计算各项退化程度
- def calculate_all_degradation(data):
- # 对数据中的每一列进行退化计算
- degradation_results = {}
- # 假设我们选择几个重要的列进行退化计算
- parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
- for param in parameters:
- if param in data.columns:
- degradation_results[param] = calculate_degradation(data[param])
- return degradation_results
- # 格式化退化结果
- def format_degradation_results(degradation_results):
- print("退化评估结果:")
- results = {}
- for param, degradation in degradation_results.items():
- avg_degradation = np.mean(degradation) # 退化的平均值
- print(f"{param} 退化程度: {avg_degradation:.2f}%")
- results[param] = f"退化程度:{avg_degradation:.2f}%"
- return results
- # 基于趋势分析法的退化评估
- @app.route('/qsfx', methods=['POST'])
- def process_data5():
- try:
- # 获取请求中的 URL
- data = request.json
- url = data.get('url', '')
- # 检查 URL 是否有效
- if not url:
- return jsonify({
- 'status': 400,
- 'data': None,
- 'msg': '请求体中未提供 URL'
- })
- # 读取 CSV 数据
- csv_data = read_csv_from_url(url)
- if csv_data is None:
- return jsonify({
- 'status': 500,
- 'data': None,
- 'msg': '无法获取或读取 CSV 文件'
- })
- # 趋势分析法退化评估
- trend_results = calculate_trend_degradation(csv_data)
- # 返回 JSON 格式的结果
- return jsonify({
- 'status': 200,
- 'data': trend_results,
- 'msg': '退化评估完成'
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'data': None,
- 'msg': str(e)
- })
- # 基于趋势分析法算法
- def calculate_trend_degradation(data):
- results = {}
- parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
- for param in parameters:
- if param in data.columns:
- y = pd.to_numeric(data[param], errors='coerce').dropna()
- if len(y) < 2:
- results[param] = {
- '退化趋势': '数据不足,无法计算',
- '退化百分比': 'N/A'
- }
- continue
- x = pd.Series(range(len(y))) # 用数据索引作为 x 轴
- initial_value = y.iloc[0] # 假设初始值为第一点数据
- # 执行线性回归
- slope, intercept, r_value, p_value, std_err = linregress(x, y)
- # 计算退化百分比并取绝对值
- final_change = slope * len(y) # 总的变化量
- degradation_percentage = abs((final_change / initial_value) * 100) if initial_value != 0 else 0
- # 判断退化趋势
- if degradation_percentage > 20:
- trend = "显著退化"
- elif 10 < degradation_percentage <= 20:
- trend = "中度退化"
- elif 0 < degradation_percentage <= 10:
- trend = "轻微退化"
- else:
- trend = "无明显退化"
- results[param] = {
- '退化趋势': trend,
- '退化百分比': f"{degradation_percentage:.2f}%" # 保留两位小数
- }
- return results
- # 基于统计方法的退化评估
- @app.route('/tjthpg', methods=['POST'])
- def process_data6():
- try:
- # 获取请求中的 URL
- data = request.json
- url = data.get('url', '')
- # 检查 URL 是否有效
- if not url:
- return jsonify({
- 'status': 400,
- 'data': None,
- 'msg': '请求体中未提供 URL'
- })
- # 读取 CSV 数据
- csv_data = read_csv_from_url(url)
- if csv_data is None:
- return jsonify({
- 'status': 500,
- 'data': None,
- 'msg': '无法获取或读取 CSV 文件'
- })
- # 统计方法退化评估
- statistics_results = calculate_statistics_degradation(csv_data)
- # 返回 JSON 格式的结果
- return jsonify({
- 'status': 200,
- 'data': statistics_results,
- 'msg': '退化评估完成'
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'data': None,
- 'msg': str(e)
- })
- # 基于统计的方法算法
- def calculate_statistics_degradation(data):
- results = {}
- parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
- for param in parameters:
- if param in data.columns:
- mean_val = data[param].mean()
- var_val = data[param].var()
- max_val = data[param].max()
- min_val = data[param].min()
- baseline_mean = data[param].iloc[0] # 假设第一行数据是基准值
- # 计算退化指标
- mean_shift = mean_val - baseline_mean
- degradation_percentage = abs(mean_shift / baseline_mean) * 100 # 退化程度百分比
- degradation_level = (
- "显著退化" if degradation_percentage > 10 else
- "轻微退化" if degradation_percentage > 0 else
- "未退化"
- )
- results[param] = {
- '均值': f"{mean_val:.2f}",
- '方差': f"{var_val:.2f}",
- '最大值': f"{max_val:.2f}",
- '最小值': f"{min_val:.2f}",
- '均值偏移': f"{mean_shift:.2f}",
- '退化百分比': f"{degradation_percentage:.2f}%",
- '退化程度': degradation_level
- }
- return results
- # 基于阈值比较的退化评估
- @app.route('/yzthpg', methods=['POST'])
- def process_data7():
- try:
- global thresholds
- # 获取请求中的 URL
- data = request.json
- url = data.get('url', '')
- # 检查 URL 是否有效
- if not url:
- return jsonify({
- 'status': 400,
- 'data': None,
- 'msg': '请求体中未提供 URL'
- })
- # 读取 CSV 数据
- csv_data = read_csv_from_url(url)
- if csv_data is None:
- return jsonify({
- 'status': 500,
- 'data': None,
- 'msg': '无法获取或读取 CSV 文件'
- })
- # 阈值设定
- thresholds = {
- '发动机排气温度': (300, 1000), # 示例温度范围
- '燃油流量': (10, 50), # 示例流量范围
- '氧气输出流量': (5, 100),
- '氧气浓度': (0.1, 0.3), # 假设浓度范围
- '供氧压力': (0.5, 1.5) # 假设压力范围
- }
- # 调用退化评估方法
- threshold_results = calculate_threshold_degradation(csv_data)
- # 返回 JSON 格式的结果
- return jsonify({
- 'status': 200,
- 'data': threshold_results,
- 'msg': '退化评估完成'
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'data': None,
- 'msg': str(e)
- })
- # 基于阈值比较的退化评估算法
- def calculate_threshold_degradation(data):
- results = {}
- for param in ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']:
- if param in data.columns:
- values = pd.to_numeric(data[param], errors='coerce').dropna()
- initial_value = values.iloc[0] # 假设第一点为初始值
- # 判断退化百分比
- current_mean = values.mean()
- if initial_value == 0:
- degradation_percentage = 0
- else:
- degradation_percentage = abs((current_mean - initial_value) / initial_value) * 100
- # 分类退化趋势
- if degradation_percentage > 20:
- trend = "显著退化"
- elif 10 < degradation_percentage <= 20:
- trend = "中度退化"
- elif 0 < degradation_percentage <= 10:
- trend = "轻微退化"
- else:
- trend = "无明显退化"
- # 保存结果
- results[param] = {
- '当前均值': f"{current_mean:.2f}",
- '初始值': f"{initial_value:.2f}",
- '退化百分比': f"{degradation_percentage:.2f}%",
- '退化趋势': trend
- }
- return results
- # 基于简单阈值和趋势分析的故障预测
- @app.route('/jdyzqsfx', methods=['POST'])
- def process_trend():
- try:
- data = request.json
- url = data.get('url', '')
- # 读取数据
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '无法获取或读取 CSV 文件'})
- # 执行故障预测
- prediction = trend_analysis_prediction(df)
- # 返回结果
- return jsonify({
- 'status': 200,
- 'data': {'故障概率': f"{prediction:.2f}%"},
- 'msg': '故障预测完成'
- })
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- def trend_analysis_prediction(data):
- """ 使用趋势分析预测故障概率 """
- features = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度']
- slopes = []
- for feature in features:
- if feature in data.columns:
- series = pd.to_numeric(data[feature], errors='coerce').dropna()
- if len(series) > 1:
- x = range(len(series))
- slope, _, _, _, _ = linregress(x, series)
- slopes.append(slope)
- # 计算故障概率(假设正斜率为异常)
- fault_probability = sum(1 for slope in slopes if slope > 0) / len(features) * 100
- return fault_probability
- # ARIMA 模型故障预测
- # Isolation Forest 异常检测故障预测
- @app.route('/ycjc', methods=['POST'])
- def process_isolation():
- try:
- data = request.json
- url = data.get('url', '')
- # 读取数据
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '无法获取或读取 CSV 文件'})
- # 执行故障预测
- prediction = isolation_forest_prediction(df)
- # 返回结果
- return jsonify({
- 'status': 200,
- 'data': {'故障概率': f"{prediction:.2f}%"},
- 'msg': '故障预测完成'
- })
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- def isolation_forest_prediction(data):
- features = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度']
- total_points = 0
- fault_points = 0
- for feature in features:
- if feature in data.columns:
- series = pd.to_numeric(data[feature], errors='coerce').dropna().values.reshape(-1, 1)
- if len(series) > 10: # 数据点必须足够多
- isolation_forest = IsolationForest(contamination=0.1, random_state=42)
- isolation_forest.fit(series)
- predictions = isolation_forest.predict(series)
- fault_points += np.sum(predictions == -1) # 异常点数量
- total_points += len(series)
- fault_probability = (fault_points / total_points) * 100 if total_points > 0 else 0
- return fault_probability
- # 基于线性回归的寿命预测
- @app.route('/xxhg', methods=['POST'])
- def process_data11():
- try:
- # 获取请求中的 URL
- data = request.json
- url = data.get('url', '')
- # 检查 URL 是否有效
- if not url:
- return jsonify({'status': 400, 'data': None, 'msg': '请求体中未提供 URL'})
- # 读取 CSV 数据
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '无法获取或读取 CSV 文件'})
- # 预测发动机和氧浓缩器寿命
- engine_life = linear_regression_predict_life1(df, "发动机排气温度", 800)
- # 返回 JSON 结果
- return jsonify({
- 'status': 200,
- 'data': {
- '剩余寿命': f"{engine_life:.2f} 小时" if engine_life is not None else "数据缺失,无法预测",
- },
- 'msg': '寿命预测完成'
- })
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- # 线性回归预测寿命
- def linear_regression_predict_life1(data, column_name, failure_threshold, max_life=50000, min_life=50):
- if column_name not in data.columns:
- return "数据缺失,无法预测"
- # 数据平滑处理,减少噪声干扰
- y = pd.to_numeric(data[column_name], errors='coerce').dropna()
- if len(y) < 2:
- return "数据不足,无法预测"
- y_smoothed = y.rolling(window=5, min_periods=1).mean() # 滑动平均平滑数据
- x = np.arange(len(y_smoothed))
- # 执行线性回归
- slope, intercept, r_value, p_value, std_err = linregress(x, y_smoothed)
- # 确保斜率为负(下降趋势),如果斜率接近零,给出最低预测寿命
- if slope >= -0.01: # 设置最低斜率阈值,防止结果过小
- slope = -0.01
- # 计算剩余寿命
- current_value = y_smoothed.iloc[-1]
- time_to_failure = (failure_threshold - current_value) / abs(slope)
- time_to_failure = max(min_life, min(max_life, time_to_failure)) # 约束寿命范围
- return round(time_to_failure, 2) # 保留两位小数
- # 指数衰减寿命预测
- @app.route('/exp', methods=['POST'])
- def process_data_exp_life():
- try:
- # 获取请求中的 URL
- data = request.json
- url = data.get('url', '')
- # 检查 URL 是否有效
- if not url:
- return jsonify({'status': 400, 'data': None, 'msg': '请求体中未提供 URL'})
- # 读取 CSV 数据
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '无法获取或读取 CSV 文件'})
- # 使用指数衰减模型预测寿命
- engine_life = exponential_decay_predict_life(df, "发动机排气温度", 800)
- # 返回 JSON 结果
- return jsonify({
- 'status': 200,
- 'data': {
- '剩余寿命': engine_life,
- },
- 'msg': '寿命预测完成'
- })
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- # 基于指数衰减模型的寿命预测
- def exponential_decay_predict_life(data, column_name, failure_threshold, max_life=50000, min_life=50):
- if column_name not in data.columns:
- return "数据缺失,无法预测"
- # 提取时间序列
- y = pd.to_numeric(data[column_name], errors='coerce').dropna()
- if len(y) < 2:
- return "数据不足,无法预测"
- # 数据平滑处理
- y_smoothed = y.rolling(window=5, min_periods=1).mean() # 滑动平均平滑数据
- x = np.arange(len(y_smoothed))
- # 对数据取对数,拟合指数模型
- log_y = np.log(y_smoothed)
- slope, intercept, r_value, p_value, std_err = linregress(x, log_y)
- # 计算剩余寿命
- try:
- time_to_failure = (np.log(failure_threshold) - intercept) / slope
- time_to_failure = max(min_life, min(max_life, time_to_failure)) # 约束寿命范围
- return f"{round(time_to_failure, 2)} 小时" # 保留两位小数
- except ValueError:
- return "预测失败,可能是数据或阈值无效"
- # 虚警检测-CACFAR算法
- @app.route('/CACFAR', methods=['POST'])
- def process_data13():
- try:
- # 获取请求中的 URL
- data = request.json
- url = data.get('url', '')
- # 检查 URL 是否有效
- if not url:
- return jsonify({
- 'status': 400,
- 'msg': 'No URL provided in the request body.'
- })
- # 读取 CSV 数据
- csv_data = read_csv_from_url(url)
- if csv_data is None:
- return jsonify({
- 'status': 500,
- 'msg': 'Failed to fetch or read the CSV file.'
- })
- # 计算虚警抑制结果
- suppression_results = calculate_all_suppression(csv_data)
- # 获取虚警抑制结果
- results = print_suppression_results(suppression_results)
- # 返回 JSON 格式的结果
- return jsonify({
- 'status': 200,
- 'msg': '虚警抑制完成',
- 'results': results
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- # CACFAR 虚警抑制函数
- def cacfar(data, guard_cells, training_cells, threshold_factor=1.5, censoring_factor=0.1):
- """
- 使用 CACFAR 算法进行虚警抑制。
- 参数:
- data: 输入的雷达信号数据 (1D 数组)
- guard_cells: 护卫单元的数量
- training_cells: 训练单元的数量
- threshold_factor: 阈值因子,决定虚警率
- censoring_factor: 截断因子,去除训练单元中最大和最小的异常值
- 返回:
- detected: 二值化的检测结果 (1D 数组,1 表示检测到目标,0 表示无目标)
- """
- n = len(data)
- half_guard_cells = guard_cells // 2
- half_training_cells = training_cells // 2
- detected = np.zeros(n)
- # 遍历整个数据集
- for i in range(half_guard_cells + half_training_cells, n - half_guard_cells - half_training_cells):
- # 提取训练单元区域(不包括护卫单元)
- training_data = np.concatenate([
- data[i - half_training_cells - guard_cells:i - half_guard_cells],
- data[i + half_guard_cells + 1:i + half_guard_cells + training_cells]
- ])
- # 排序训练单元数据,并去除最小和最大的 censoring_factor 比例的数据
- sorted_training_data = np.sort(training_data)
- censoring_count = int(len(sorted_training_data) * censoring_factor)
- censored_training_data = sorted_training_data[censoring_count:-censoring_count]
- # 计算背景噪声估计:使用截断后的数据的中位数
- noise_estimate = np.median(censored_training_data)
- # 计算检测阈值
- threshold = threshold_factor * noise_estimate
- # 比较数据值与阈值,进行目标检测
- if data[i] > threshold:
- detected[i] = 1 # 检测到目标
- return detected
- # 计算各项虚警抑制结果
- def calculate_all_suppression(data):
- suppression_results = {}
- parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
- # 设置 CACFAR 参数
- guard_cells = 4
- training_cells = 16
- threshold_factor = 1.5 # 阈值因子
- censoring_factor = 0.1 # 截断因子
- for param in parameters:
- if param in data.columns:
- # 应用虚警抑制 (CACFAR)
- column_data = pd.to_numeric(data[param], errors='coerce')
- suppression_results[param] = cacfar(column_data.values, guard_cells, training_cells, threshold_factor, censoring_factor)
- return suppression_results
- # 输出虚警抑制结果,并返回 JSON 格式结果
- def print_suppression_results(suppression_results):
- print("虚警抑制结果:")
- results = {}
- for param, detected in suppression_results.items():
- detected_percentage = np.mean(detected) * 100 # 计算检测到目标的百分比
- result_str = f"虚警抑制结果: {detected_percentage:.2f}%"
- print(result_str)
- results[param] = f"虚警抑制结果: {detected_percentage:.2f}%"
- return results
- # 虚警抑制-均值类CFAR检测算法
- @app.route('/MeanCFAR', methods=['POST'])
- def process_data14():
- try:
- # 获取请求中的 URL
- data = request.json
- url = data.get('url', '')
- # 检查 URL 是否有效
- if not url:
- return jsonify({
- 'status': 400,
- 'msg': 'No URL provided in the request body.'
- })
- # 读取 CSV 数据
- csv_data = read_csv_from_url(url)
- if csv_data is None:
- return jsonify({
- 'status': 500,
- 'msg': 'Failed to fetch or read the CSV file.'
- })
- # 计算虚警抑制结果
- suppression_results = calculate_all_suppression1(csv_data)
- # 获取虚警抑制结果
- results = print_suppression_results1(suppression_results)
- # 返回 JSON 格式的结果
- return jsonify({
- 'status': 200,
- 'msg': '虚警抑制完成',
- 'results': results
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- # 均值类 CFAR 虚警抑制函数 (Mean-CFAR)
- def mean_cfar(data, guard_cells, training_cells, threshold_factor=1.5):
- """
- 使用均值类 CFAR (Mean-CFAR) 算法进行虚警抑制。
- 参数:
- data: 输入的雷达信号数据 (1D 数组)
- guard_cells: 护卫单元的数量
- training_cells: 训练单元的数量
- threshold_factor: 阈值因子,决定虚警率
- 返回:
- detected: 二值化的检测结果 (1D 数组,1 表示检测到目标,0 表示无目标)
- """
- n = len(data)
- half_guard_cells = guard_cells // 2
- half_training_cells = training_cells // 2
- detected = np.zeros(n)
- # 遍历整个数据集
- for i in range(half_guard_cells + half_training_cells, n - half_guard_cells - half_training_cells):
- # 提取训练单元区域(不包括护卫单元)
- training_data = np.concatenate([
- data[i - half_training_cells - guard_cells:i - half_guard_cells],
- data[i + half_guard_cells + 1:i + half_guard_cells + training_cells]
- ])
- # 计算训练单元的均值
- noise_estimate = np.mean(training_data)
- # 计算检测阈值
- threshold = threshold_factor * noise_estimate
- # 比较数据值与阈值,进行目标检测
- if data[i] > threshold:
- detected[i] = 1 # 检测到目标
- return detected
- # 计算各项虚警抑制结果
- def calculate_all_suppression1(data):
- suppression_results = {}
- parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
- # 设置 Mean-CFAR 参数
- guard_cells = 4
- training_cells = 16
- threshold_factor = 1.5 # 阈值因子
- for param in parameters:
- if param in data.columns:
- # 应用虚警抑制 (Mean-CFAR)
- column_data = pd.to_numeric(data[param], errors='coerce')
- suppression_results[param] = mean_cfar(column_data.values, guard_cells, training_cells, threshold_factor)
- return suppression_results
- # 输出虚警抑制结果,并返回 JSON 格式结果
- def print_suppression_results1(suppression_results):
- print("虚警抑制结果:")
- results = {}
- for param, detected in suppression_results.items():
- detected_percentage = np.mean(detected) * 100 # 计算检测到目标的百分比
- result_str = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
- print(result_str)
- results[param] = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
- return results
- # 虚警抑制-可变指数CFAR检测算法
- @app.route('/V_CACFAR', methods=['POST'])
- def process_data15():
- try:
- # 获取请求中的 URL
- data = request.json
- url = data.get('url', '')
- # 检查 URL 是否有效
- if not url:
- return jsonify({
- 'status': 400,
- 'msg': 'No URL provided in the request body.'
- })
- # 读取 CSV 数据
- csv_data = read_csv_from_url(url)
- if csv_data is None:
- return jsonify({
- 'status': 500,
- 'msg': 'Failed to fetch or read the CSV file.'
- })
- # 计算虚警抑制结果
- suppression_results = calculate_all_suppression2(csv_data)
- # 获取虚警抑制结果
- results = print_suppression_results2(suppression_results)
- # 返回 JSON 格式的结果
- return jsonify({
- 'status': 200,
- 'msg': '虚警抑制完成',
- 'results': results
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- # 可变指数 CFAR 虚警抑制函数 (V-CA-CFAR)
- def v_ca_cfar(data, guard_cells, training_cells, alpha=2, threshold_factor=1.5):
- """
- 使用可变指数 CFAR (V-CA-CFAR) 算法进行虚警抑制。
- 参数:
- data: 输入的雷达信号数据 (1D 数组)
- guard_cells: 护卫单元的数量
- training_cells: 训练单元的数量
- alpha: 指数加权系数(控制加权的“速度”)
- threshold_factor: 阈值因子,决定虚警率
- 返回:
- detected: 二值化的检测结果 (1D 数组,1 表示检测到目标,0 表示无目标)
- """
- n = len(data)
- half_guard_cells = guard_cells // 2
- half_training_cells = training_cells // 2
- detected = np.zeros(n)
- # 遍历整个数据集
- for i in range(half_guard_cells + half_training_cells, n - half_guard_cells - half_training_cells):
- # 提取训练单元区域(不包括护卫单元)
- training_data = np.concatenate([
- data[i - half_training_cells - guard_cells:i - half_guard_cells],
- data[i + half_guard_cells + 1:i + half_guard_cells + training_cells]
- ])
- # 计算训练数据的加权指数
- weighted_noise_estimate = np.sum(training_data * np.exp(-alpha * np.arange(len(training_data)))) / np.sum(np.exp(-alpha * np.arange(len(training_data))))
- # 计算检测阈值
- threshold = threshold_factor * weighted_noise_estimate
- # 比较数据值与阈值,进行目标检测
- if data[i] > threshold:
- detected[i] = 1 # 检测到目标
- return detected
- # 计算各项虚警抑制结果
- def calculate_all_suppression2(data):
- suppression_results = {}
- parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
- # 设置 V-CA-CFAR 参数
- guard_cells = 4
- training_cells = 16
- alpha = 2 # 指数加权系数
- threshold_factor = 1.5 # 阈值因子
- for param in parameters:
- if param in data.columns:
- # 应用虚警抑制 (V-CA-CFAR)
- column_data = pd.to_numeric(data[param], errors='coerce')
- suppression_results[param] = v_ca_cfar(column_data.values, guard_cells, training_cells, alpha, threshold_factor)
- return suppression_results
- # 输出虚警抑制结果,并返回 JSON 格式结果
- def print_suppression_results2(suppression_results):
- print("虚警抑制结果:")
- results = {}
- for param, detected in suppression_results.items():
- detected_percentage = np.mean(detected) * 100 # 计算检测到目标的百分比
- result_str = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
- print(result_str)
- results[param] = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
- return results
- # 虚警检测-OSCFAR算法
- @app.route('/OSCFAR', methods=['POST'])
- def process_data16():
- try:
- # 获取请求中的 URL
- data = request.json
- url = data.get('url', '')
- # 检查 URL 是否有效
- if not url:
- return jsonify({
- 'status': 400,
- 'msg': 'No URL provided in the request body.'
- })
- # 读取 CSV 数据
- csv_data = read_csv_from_url(url)
- if csv_data is None:
- return jsonify({
- 'status': 500,
- 'msg': 'Failed to fetch or read the CSV file.'
- })
- # 计算虚警抑制结果
- suppression_results = calculate_all_suppression3(csv_data)
- # 获取虚警抑制结果
- results = print_suppression_results3(suppression_results)
- # 返回 JSON 格式的结果
- return jsonify({
- 'status': 200,
- 'msg': '虚警抑制完成',
- 'results': results
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- # OSCFAR 虚警抑制函数
- def os_cfar(data, guard_cells, training_cells, threshold_factor=1.5):
- """
- 使用 OS-CFAR 算法进行虚警抑制。
- 参数:
- data: 输入的雷达信号数据 (1D 数组)
- guard_cells: 护卫单元的数量
- training_cells: 训练单元的数量
- threshold_factor: 阈值因子,决定虚警率
- 返回:
- detected: 二值化的检测结果 (1D 数组,1 表示检测到目标,0 表示无目标)
- """
- n = len(data)
- half_guard_cells = guard_cells // 2
- half_training_cells = training_cells // 2
- detected = np.zeros(n)
- # 遍历整个数据集
- for i in range(half_guard_cells + half_training_cells, n - half_guard_cells - half_training_cells):
- # 提取训练单元区域(不包括护卫单元)
- training_data = np.concatenate([
- data[i - half_training_cells - guard_cells:i - half_guard_cells],
- data[i + half_guard_cells + 1:i + half_guard_cells + training_cells]
- ])
- # 排序训练单元数据,找到第 K 个最小值作为背景噪声估计
- sorted_training_data = np.sort(training_data)
- noise_estimate = sorted_training_data[training_cells // 2] # 中位数估计
- # 计算检测阈值
- threshold = threshold_factor * noise_estimate
- # 比较数据值与阈值,进行目标检测
- if data[i] > threshold:
- detected[i] = 1 # 检测到目标
- return detected
- # 计算各项虚警抑制结果
- def calculate_all_suppression3(data):
- suppression_results = {}
- parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
- # 设置 OSCFAR 参数
- guard_cells = 4
- training_cells = 16
- threshold_factor = 1.5 # 阈值因子
- for param in parameters:
- if param in data.columns:
- # 应用虚警抑制 (OSCFAR)
- column_data = pd.to_numeric(data[param], errors='coerce')
- suppression_results[param] = os_cfar(column_data.values, guard_cells, training_cells, threshold_factor)
- return suppression_results
- # 输出虚警抑制结果,并返回 JSON 格式结果
- def print_suppression_results3(suppression_results):
- print("虚警抑制结果:")
- results = {}
- for param, detected in suppression_results.items():
- detected_percentage = np.mean(detected) * 100 # 计算检测到目标的百分比
- result_str = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
- print(result_str)
- results[param] = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
- return results
- '''1补全:1平均值填充'''
- @app.route('/ave', methods=['POST'])
- def process_data1():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- response = requests.get(url)
- # 检查下载是否成功
- if response.status_code != 200:
- error_message = {'status': 500, 'message': 'Failed to download CSV data'}
- return jsonify(error_message)
- # 将 CSV 数据转换为 Pandas DataFrame
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- df = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
- if "时间" in df.columns:
- time_column = df["时间"].values # 获取时间列
- # 过滤掉“时间”列,只保留数值列
- df = df.drop(columns="时间")
- else:
- time_column = None
- df = df
- # 分离数值型数据
- numerical_cols = df.select_dtypes(include=[np.number]).columns
- # 填充数值型数据的缺失值,使用均值填充
- for col in numerical_cols:
- df[col].fillna(df[col].mean(), inplace=True)
- # 分离分类型数据
- categorical_cols = df.select_dtypes(include=['object']).columns
- # 填充分类型数据的缺失值,使用众数填充
- for col in categorical_cols:
- df[col].fillna(df[col].mode()[0], inplace=True)
- # 如果时间列存在,将其添加回去
- if time_column is not None:
- df.insert(0, "时间", time_column)
- # 构建返回数据,只返回文件路径
- return jsonify({
- 'status': 200,
- 'msg': '补全完成',
- 'data': df.to_dict(orient='records')
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- '''1补全:2中位数填充'''
- @app.route('/upper', methods=['POST'])
- def process_data_upper():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- result = data.get('result', '')
- response = requests.get(url)
- # 检查下载是否成功
- if response.status_code != 200:
- error_message = {'status': 500, 'message': 'Failed to download CSV data'}
- return jsonify(error_message)
- # 将 CSV 数据转换为 Pandas DataFrame
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- df = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
- if "时间" in df.columns:
- time_column = df["时间"].values # 获取时间列
- # 过滤掉“时间”列,只保留数值列
- df = df.drop(columns="时间")
- else:
- time_column = None
- other_columns = df
- # 分离数值型数据
- numerical_cols = df.select_dtypes(include=[np.number]).columns
- # 填充数值型数据的缺失值,使用中位数填充
- for col in numerical_cols:
- df[col].fillna(df[col].median(), inplace=True)
- # 分离分类型数据
- categorical_cols = df.select_dtypes(include=['object']).columns
- # 填充分类型数据的缺失值,使用众数填充
- for col in categorical_cols:
- df[col].fillna(df[col].mode()[0], inplace=True)
- # 构建返回数据,只返回文件路径
- return jsonify({
- 'status': 200,
- 'msg': '补全完成',
- 'data': df.to_dict(orient='records')
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- '''1补全:3线性插值'''
- def linear_interpolate(signal):
- """ Perform linear interpolation to fill missing data (NaN values). """
- return signal.interpolate(method='linear', limit_direction='both')
- @app.route('/line', methods=['POST'])
- def process_data_line():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- response = requests.get(url)
- # 检查下载是否成功
- if response.status_code != 200:
- error_message = {'status': 500, 'message': 'Failed to download CSV data'}
- return jsonify(error_message)
- # 将 CSV 数据转换为 Pandas DataFrame
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- noisy_signal = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
- # 处理异常值(如空字符串、None)
- noisy_signal = noisy_signal.replace([None, '', 'NaN'], np.nan)
- # 如果“时间”列存在,分离时间列
- if "时间" in noisy_signal.columns:
- time_column = noisy_signal["时间"].values # 获取时间列
- # 过滤掉“时间”列,只保留数值列
- other_columns = noisy_signal.drop(columns="时间")
- else:
- time_column = None
- other_columns = noisy_signal
- # 创建插值后的 DataFrame
- interpolated_signals = pd.DataFrame()
- # 遍历所有数值列并应用插值
- for column in other_columns.columns:
- try:
- # 确保列数据是数值类型
- column_data = pd.to_numeric(other_columns[column], errors='coerce')
- interpolated_signals[column] = linear_interpolate(column_data)
- except Exception as e:
- print(f"处理列 {column} 时出错: {e}")
- interpolated_signals[column] = other_columns[column] # 如果出错,保留原数据
- # 如果时间列存在,将其添加回去
- if time_column is not None:
- interpolated_signals.insert(0, "时间", time_column)
- # 返回插值后的数据
- return jsonify({
- 'status': 200,
- 'msg': '补全完成',
- 'data': interpolated_signals.to_dict(orient='records')
- })
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- # 移动平均法
- def moving_average(signal, window_size):
- return np.convolve(signal, np.ones(window_size) / window_size, mode='same')
- @app.route('/movave', methods=['POST'])
- def set_file_address1():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- result = data.get('result', '')
- response = requests.get(url)
- response.raise_for_status() # 检查请求是否成功
- # 使用BytesIO读取文件内容
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- noisy_signal = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
- # 如果“时间”列存在,分离时间列
- if "时间" in noisy_signal.columns:
- time_column = noisy_signal["时间"].values # 获取时间列
- # 过滤掉“时间”列,只保留数值列
- other_columns = noisy_signal.drop(columns="时间")
- else:
- time_column = None
- other_columns = noisy_signal
- # 应用去噪方法
- window_size = 10 # 窗口宽度
- denoised_signals = pd.DataFrame()
- for column in other_columns.columns:
- denoised_signals[column] = moving_average(other_columns[column].values, window_size)
- # 如果有时间列,将其添加回去
- if time_column is not None:
- denoised_signals.insert(0, '时间', time_column)
- return jsonify({'status': 200,
- 'msg': '去噪完成',
- 'data': denoised_signals.to_dict(orient='records')})
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- # 傅里叶变换去噪
- def fft_denoise(signal, cutoff_freq, fs):
- signal_fft = fft(signal)
- frequencies = np.fft.fftfreq(len(signal), 1/fs)
- signal_fft[np.abs(frequencies) > cutoff_freq] = 0
- return ifft(signal_fft).real
- @app.route('/fft', methods=['POST'])
- def process_data_fft():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- result=data.get('result','')
- response = requests.get(url)
- response.raise_for_status() # 检查请求是否成功
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- noisy_signal = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
- fs = 1000 # 采样频率
- cutoff_freq = 10 # 截止频率
- # 如果“时间”列存在,分离时间列
- if "时间" in noisy_signal.columns:
- time_column = noisy_signal["时间"].values # 获取时间列
- # 过滤掉“时间”列,只保留数值列
- other_columns = noisy_signal.drop(columns="时间")
- else:
- time_column = None
- other_columns = noisy_signal
- # 创建去噪后的 DataFrame
- denoised_signals = pd.DataFrame()
- # 遍历所有数值列并应用去噪
- for column in other_columns.columns:
- try:
- # 确保列数据是数值类型
- column_data = pd.to_numeric(other_columns[column], errors='coerce')
- denoised_signals[column] = fft_denoise(column_data.values, cutoff_freq, fs)
- except Exception as e:
- print(f"处理列 {column} 时出错: {e}")
- denoised_signals[column] = other_columns[column] # 如果出错,保留原数据
- # 如果时间列存在,将其添加回去
- if time_column is not None:
- denoised_signals.insert(0, "时间", time_column)
- return jsonify({ 'status': 200,
- 'msg': '去噪完成',
- 'data':denoised_signals.to_dict(orient='records')})
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- def augment_data1(data, sample_factor=2):
- """
- Augment the data by randomly sampling rows and adding them back.
- sample_factor determines how many times the data should be sampled.
- """
- augmented_data = []
- # Calculate the number of samples to create
- num_samples = len(data) * sample_factor
- # Randomly sample from the original data
- for _ in range(num_samples):
- sampled_row = random.choice(data)
- augmented_data.append(sampled_row)
- return augmented_data
- @app.route('/sjcy', methods=['POST'])
- def process_data_augmentation1():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- response = requests.get(url)
- response.raise_for_status() # 检查请求是否成功
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- signal_data = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
- # If the "时间" column exists, separate it
- if "时间" in signal_data.columns:
- time_column = signal_data["时间"].values # Extract time column
- signal_data = signal_data.drop(columns="时间") # Remove time column for processing
- else:
- time_column = None
- # Apply random sampling-based data augmentation
- augmented_signals = augment_data1(signal_data.to_dict(orient='records'))
- # Convert augmented data back to a DataFrame
- augmented_df = pd.DataFrame(augmented_signals)
- # Apply a limit to the number of rows in the response
- max_rows = 1000 # Adjust this as needed
- if len(augmented_df) > max_rows:
- augmented_df = augmented_df.head(max_rows)
- # Ensure that the time column length matches the augmented data length
- if time_column is not None:
- if len(time_column) != len(augmented_df):
- time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
- augmented_df.insert(0, "时间", time_column)
- # Combine the original and augmented data (append the augmented data below the original data)
- combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
- # Prepare response
- result = {
- 'status': 200,
- 'msg': '数据扩充完成',
- 'data': combined_df.to_dict(orient='records')
- }
- return jsonify(result)
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- # 2、数据扰动算法
- # 对数据集中的值进行小幅度的随机变化,以生成新的数据点,增加数据集的多样性。
- def augment_data2(data, noise_factor=0.01, perturb_factor=0.05):
- """
- Augment the data by applying small perturbations or noise.
- noise_factor controls the magnitude of random noise added to the data.
- perturb_factor controls how much random perturbation is applied.
- """
- augmented_data = []
- for row in data:
- augmented_row = {}
- for column, value in row.items():
- # Convert to numeric if it's not already (ignores non-numeric columns like "时间")
- try:
- value = float(value)
- except ValueError:
- augmented_row[column] = value # Keep non-numeric columns (e.g., time column) unchanged
- continue
- # Apply random noise within a perturbation range
- perturbation = random.uniform(1 - perturb_factor, 1 + perturb_factor) # Random scaling factor
- noisy_value = value * perturbation # Apply the perturbation
- # Optionally, add some Gaussian noise on top of the perturbation
- noise = random.gauss(0, noise_factor) # Gaussian noise
- augmented_row[column] = noisy_value + noise
- augmented_data.append(augmented_row)
- return augmented_data
- @app.route('/sjrd', methods=['POST'])
- def process_data_augmentation2():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- response = requests.get(url)
- response.raise_for_status() # 检查请求是否成功
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- signal_data = pd.read_csv(csv_data, header=0,encoding='utf-8')
- # If the "时间" column exists, separate it
- if "时间" in signal_data.columns:
- time_column = signal_data["时间"].values # Extract time column
- signal_data = signal_data.drop(columns="时间") # Remove time column for processing
- else:
- time_column = None
- # Apply data perturbation-based augmentation
- augmented_signals = augment_data2(signal_data.to_dict(orient='records'))
- # Convert augmented data back to a DataFrame
- augmented_df = pd.DataFrame(augmented_signals)
- # Apply a limit to the number of rows in the response
- max_rows = 1000 # Adjust this as needed
- if len(augmented_df) > max_rows:
- augmented_df = augmented_df.head(max_rows)
- # Ensure that the time column length matches the augmented data length
- if time_column is not None:
- if len(time_column) != len(augmented_df):
- time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
- augmented_df.insert(0, "时间", time_column)
- # Combine the original and augmented data (append the augmented data below the original data)
- combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
- # Prepare response
- result = {
- 'status': 200,
- 'msg': '数据扩充完成',
- 'data': combined_df.to_dict(orient='records')
- }
- return jsonify(result)
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- # 3、Wavelet变换算法
- # 使用小波变换对信号或图像进行多分辨率分析,可以用于数据压缩或去噪。
- def augment_data_with_wavelet(data, wavelet='db1', level=2, noise_factor=0.01):
- """
- Augment the data by applying wavelet transform, perturbing the coefficients, and performing inverse transform.
- :param data: The original data.
- :param wavelet: The type of wavelet to use (e.g., 'db1', 'haar', etc.).
- :param level: The level of decomposition in the wavelet transform.
- :param noise_factor: The standard deviation of noise to add to the high-frequency coefficients.
- :return: The augmented data after wavelet-based perturbation.
- """
- augmented_data = []
- for row in data:
- augmented_row = {}
- for column, value in row.items():
- try:
- value = float(value)
- except ValueError:
- augmented_row[column] = value # Keep non-numeric columns unchanged
- continue
- # Perform wavelet decomposition on the value (reshape to 1D array if necessary)
- coeffs = pywt.wavedec([value], wavelet, level=level)
- # Perturb the high-frequency coefficients (detail coefficients) with noise
- for i in range(1, len(coeffs)):
- coeffs[i] += np.random.normal(0, noise_factor, size=coeffs[i].shape)
- # Reconstruct the signal after perturbation
- perturbed_value = pywt.waverec(coeffs, wavelet)[0]
- augmented_row[column] = perturbed_value
- augmented_data.append(augmented_row)
- return augmented_data
- @app.route('/wal', methods=['POST'])
- def process_data_augmentation3():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- response = requests.get(url)
- response.raise_for_status() # 检查请求是否成功
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- signal_data = pd.read_csv(csv_data, header=0, encoding='utf-8')
- # If the "时间" column exists, separate it
- if "时间" in signal_data.columns:
- time_column = signal_data["时间"].values # Extract time column
- signal_data = signal_data.drop(columns="时间") # Remove time column for processing
- else:
- time_column = None
- # Apply wavelet-based data augmentation
- augmented_signals = augment_data_with_wavelet(signal_data.to_dict(orient='records'))
- # Convert augmented data back to a DataFrame
- augmented_df = pd.DataFrame(augmented_signals)
- # Apply a limit to the number of rows in the response
- max_rows = 1000 # Adjust this as needed
- if len(augmented_df) > max_rows:
- augmented_df = augmented_df.head(max_rows)
- # Ensure that the time column length matches the augmented data length
- if time_column is not None:
- if len(time_column) != len(augmented_df):
- time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
- augmented_df.insert(0, "时间", time_column)
- # Combine the original and augmented data (append the augmented data below the original data)
- combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
- # Prepare response
- result = {
- 'status': 200,
- 'msg': '数据扩充完成',
- 'data': combined_df.to_dict(orient='records')
- }
- return jsonify(result)
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- # 4、小波系数扰动算法
- # 在小波变换的基础上,对小波系数进行扰动,以生成新的数据表示,用于数据增强。
- def augment_data_with_wavelet_coeffs(data, wavelet='db1', level=2, noise_factor=0.01, scale_factor=0.05):
- """
- Augment the data by perturbing the wavelet coefficients.
- :param data: The original data.
- :param wavelet: The type of wavelet to use (e.g., 'db1', 'haar', etc.).
- :param level: The level of decomposition in the wavelet transform.
- :param noise_factor: The standard deviation of noise to add to the high-frequency coefficients.
- :param scale_factor: The factor to scale the wavelet coefficients.
- :return: The augmented data after perturbing the wavelet coefficients.
- """
- augmented_data = []
- for row in data:
- augmented_row = {}
- for column, value in row.items():
- try:
- value = float(value)
- except ValueError:
- augmented_row[column] = value # Keep non-numeric columns unchanged
- continue
- # Perform wavelet decomposition on the value (reshape to 1D array if necessary)
- coeffs = pywt.wavedec([value], wavelet, level=level)
- # Perturb the coefficients (add noise and/or scale)
- for i in range(1, len(coeffs)):
- # Add noise to the high-frequency coefficients (i > 0)
- coeffs[i] += np.random.normal(0, noise_factor, size=coeffs[i].shape)
- # Optionally, scale the coefficients to create variation
- coeffs[i] *= (1 + np.random.uniform(-scale_factor, scale_factor))
- # Reconstruct the signal after perturbing the coefficients
- perturbed_value = pywt.waverec(coeffs, wavelet)[0]
- augmented_row[column] = perturbed_value
- augmented_data.append(augmented_row)
- return augmented_data
- @app.route('/xbrd', methods=['POST'])
- def process_data_augmentation4():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- response = requests.get(url)
- response.raise_for_status() # 检查请求是否成功
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- signal_data = pd.read_csv(csv_data, header=0, encoding='utf-8')
- # If the "时间" column exists, separate it
- if "时间" in signal_data.columns:
- time_column = signal_data["时间"].values # Extract time column
- signal_data = signal_data.drop(columns="时间") # Remove time column for processing
- else:
- time_column = None
- # Apply wavelet-based data augmentation
- augmented_signals = augment_data_with_wavelet_coeffs(signal_data.to_dict(orient='records'))
- # Convert augmented data back to a DataFrame
- augmented_df = pd.DataFrame(augmented_signals)
- # Apply a limit to the number of rows in the response
- max_rows = 1000 # Adjust this as needed
- if len(augmented_df) > max_rows:
- augmented_df = augmented_df.head(max_rows)
- # Ensure that the time column length matches the augmented data length
- if time_column is not None:
- if len(time_column) != len(augmented_df):
- time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
- augmented_df.insert(0, "时间", time_column)
- # Combine the original and augmented data (append the augmented data below the original data)
- combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
- # Prepare response
- result = {
- 'status': 200,
- 'msg': '数据扩充完成',
- 'data': combined_df.to_dict(orient='records')
- }
- return jsonify(result)
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- # 5、小波线性插值算法
- #结合小波变换和线性插值技术,用于数据的插值和平滑处理,可以用于数据扩充。
- def wavelet_linear_interpolation(data, wavelet='db1', level=2):
- """
- Apply wavelet-based linear interpolation on the data to augment it.
- :param data: The original data.
- :param wavelet: The type of wavelet to use (e.g., 'db1', 'haar', etc.).
- :param level: The level of decomposition in the wavelet transform.
- :return: The augmented data after wavelet-based interpolation.
- """
- augmented_data = []
- for row in data:
- augmented_row = {}
- for column, value in row.items():
- try:
- value = float(value)
- except ValueError:
- augmented_row[column] = value # Keep non-numeric columns unchanged
- continue
- # Perform wavelet decomposition on the value (reshape to 1D array if necessary)
- coeffs = pywt.wavedec([value], wavelet, level=level)
- # Perform linear interpolation on the low-frequency (approximated) coefficients
- low_freq = coeffs[0] # Low-frequency component (approximated signal)
- high_freqs = coeffs[1:] # High-frequency components (details)
- # Check the length of low_freq before interpolation
- if len(low_freq) < 2:
- augmented_row[column] = value # If too short, return original value
- continue
- # Linear interpolation on the low-frequency coefficients
- interp_func = interp1d(np.arange(len(low_freq)), low_freq, kind='linear', fill_value='extrapolate')
- interpolated_low_freq = interp_func(np.linspace(0, len(low_freq)-1, num=len(low_freq) * 2)) # Insert points
- # Linear interpolation on high-frequency coefficients (details)
- interpolated_high_freqs = []
- for freq in high_freqs:
- if len(freq) < 2: # Skip if the frequency component has less than 2 points
- interpolated_high_freqs.append(freq)
- continue
- interp_func = interp1d(np.arange(len(freq)), freq, kind='linear', fill_value='extrapolate')
- interpolated_high_freq = interp_func(np.linspace(0, len(freq)-1, num=len(freq) * 2)) # Insert points
- interpolated_high_freqs.append(interpolated_high_freq)
- # Combine the interpolated low-frequency and high-frequency coefficients
- new_coeffs = [interpolated_low_freq] + interpolated_high_freqs
- # Reconstruct the signal after interpolation
- perturbed_value = pywt.waverec(new_coeffs, wavelet)[0]
- augmented_row[column] = perturbed_value
- augmented_data.append(augmented_row)
- return augmented_data
- @app.route('/xbcz', methods=['POST'])
- def process_data_augmentation5():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- response = requests.get(url)
- response.raise_for_status() # 检查请求是否成功
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- signal_data = pd.read_csv(csv_data, header=0,encoding='utf-8')
- # If the "时间" column exists, separate it
- if "时间" in signal_data.columns:
- time_column = signal_data["时间"].values # Extract time column
- signal_data = signal_data.drop(columns="时间") # Remove time column for processing
- else:
- time_column = None
- # Apply wavelet-based linear interpolation data augmentation
- augmented_signals = wavelet_linear_interpolation(signal_data.to_dict(orient='records'))
- # Convert augmented data back to a DataFrame
- augmented_df = pd.DataFrame(augmented_signals)
- # Apply a limit to the number of rows in the response
- max_rows = 1000 # Adjust this as needed
- if len(augmented_df) > max_rows:
- augmented_df = augmented_df.head(max_rows)
- # Ensure that the time column length matches the augmented data length
- if time_column is not None:
- if len(time_column) != len(augmented_df):
- time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
- augmented_df.insert(0, "时间", time_column)
- # Combine the original and augmented data (append the augmented data below the original data)
- combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
- # Prepare response
- result = {
- 'status': 200,
- 'msg': '数据扩充完成',
- 'data': combined_df.to_dict(orient='records')
- }
- return jsonify(result)
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- #6添加噪声
- def augment_data(data, noise_factor=0.01):
- augmented_data = []
- # Iterate over each row of the data and apply noise
- for row in data:
- augmented_row = {}
- for column, value in row.items():
- # Convert to numeric if it's not already (ignores non-numeric columns like "时间")
- try:
- value = float(value)
- except ValueError:
- augmented_row[column] = value # Keep non-numeric columns (e.g., time column) unchanged
- continue
- # Apply Gaussian noise
- noise = random.gauss(0, noise_factor) # Add Gaussian noise
- noisy_value = value + noise
- augmented_row[column] = noisy_value
- augmented_data.append(augmented_row)
- return augmented_data
- @app.route('/zs', methods=['POST'])
- def process_data_augmentation6():
- try:
- data = request.json
- # 获取URL并下载文件
- url = data.get('url', '')
- response = requests.get(url)
- response.raise_for_status() # 检查请求是否成功
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- signal_data = pd.read_csv(csv_data, header=0,encoding='utf-8')
- # If the "时间" column exists, separate it
- if "时间" in signal_data.columns:
- time_column = signal_data["时间"].values # Extract time column
- signal_data = signal_data.drop(columns="时间") # Remove time column for processing
- else:
- time_column = None
- # Apply data augmentation (only noise)
- augmented_signals = augment_data(signal_data.to_dict(orient='records'))
- # Convert augmented data back to a DataFrame
- augmented_df = pd.DataFrame(augmented_signals)
- # Ensure that the time column length matches the augmented data length
- if time_column is not None:
- # If time_column exists, we can duplicate it to match the augmented data length
- if len(time_column) != len(augmented_df):
- time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
- augmented_df.insert(0, "时间", time_column)
- # Combine the original and augmented data (append the augmented data below the original data)
- combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
- # Prepare response
- result = {
- 'status': 200,
- 'msg': '数据扩充完成',
- 'data': combined_df.to_dict(orient='records')
- }
- return jsonify(result)
- except Exception as e:
- return jsonify({
- 'status': 500,
- 'msg': str(e)
- })
- def process_paiwen(df):
- # 从请求中获取数据
- df=np.array(df)
- youmen =df[1:, 0]
- paiwen = df[1:, 1]
- fs = 1000
- # 采样点数
- num_fft = len(youmen) * 4
- index = 1
- plot_index = 0
- str_res1 = '不存在摆动数据'
- str_res2 = '摆动位置为:'
- pos_res = []
- for i in range(len(youmen)):
- if index == 1:
- paiwen_res = []
- youmen_res = []
- paiwen_res.append(paiwen[i])
- youmen_res.append(youmen[i])
- index += 1
- else:
- if abs(youmen[i] - youmen[i - 1]) < 1 and youmen[i] > 1:
- paiwen_res.append(paiwen[i])
- youmen_res.append(youmen[i])
- index += 1
- else:
- if index > 300:
- pos_res.append(i)
- pos_res.append(index)
- paiwen_res = paiwen_res[100:]
- youmen_res = youmen_res[100:]
- mean = np.mean(paiwen_res)
- std = np.std(paiwen_res)
- data = (paiwen_res - mean) / std
- mean2 = np.mean(youmen_res)
- std2 = np.std(youmen_res)
- data2 = (youmen_res - mean2) / std2
- # b, a = signal.butter(data, [1, 5], 'low', analog=True)
- # w, h = signal.freqs(b, a)
- # plt.semilogx(w, 20 * np.log10(abs(h)))
- Y = fft(data, num_fft)
- # np.savetxt('I:\\灵巧\\'+str(i)+'Y.csv', Y, delimiter=',')
- Y = np.abs(Y)
- res = 20 * np.log10(Y[:num_fft // 2])
- # np.savetxt('I:\\灵巧\\'+str(i)+'res.csv', res, delimiter=',')
- # ax = plt.subplot(512)
- # ax.set_title('fft transform')
- index2 = 0
- for m in range(60):
- index_j = 0
- for j in range(998):
- if (Y[j + 1] - m) * (Y[j + 2] - m) < 0:
- index_j += 1
- if index_j > index2:
- index2 = index_j
- if index2 > 10:
- str_res2 = str_res2 + str(i - index + 101) + '-' + str(i) + ';'
- k = np.arange(i - index + 101, i, 1)
- if plot_index == 0:
- plt.plot(k, paiwen_res, 'r', zorder=2, label='摆动数据')
- plot_index = 1
- if plot_index == 1:
- plt.plot(k, paiwen_res, 'r', zorder=2)
- # plt.plot(youmen_res)
- # plt.show()
- index = 1
- plt.plot(paiwen, 'b', zorder=1, label='正常数据')
- plt.legend()
- plt.savefig('baidong.jpg', dpi=600)
- if plot_index == 1:
- str_res = str_res2
- if plot_index == 0:
- str_res = str_res1
- return str_res
- # 排气温度摆动
- @app.route('/pwbd', methods=['POST'])
- def process_data_pqbd():
- try:
- # 获取请求数据
- data = request.json
- url = data.get('url', '')
- # 读取 CSV 数据
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
- str_res=process_paiwen(df)
- file_path = 'baidong.jpg'
- files = {'file': open(file_path, 'rb')}
- response = requests.post('http://localhost:9090/als/resource/oss/upload', files=files)
- files['file'].close()
- res = json.loads(response.text)
- print(res)
- if res["code"] == 200:
- data1 = {"result": str_res, "ossId": res.get('data', {}).get('ossId', None)}
- result = {'status': 200, 'data': data1} # 最终返回结果
- return jsonify(result)
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- def process_maoci(df):
- data=np.array(df)
- data=data[1:,:]
- data0= data[
- np.where(
- (data[:, 1] == 1)
- )
- ]
- maoci=data0[:,0]
- mean=np.mean(maoci)
- str_res1 = '不存在毛刺数据'
- str_res2 = '毛刺数据位置为:'
- time_list=np.where((data[:, 1] == 1) & (abs(data[:, 0]- mean)>1))
- time_list=np.array(time_list)[0,:]
- plt.figure()
- plt.scatter(time_list,data[time_list,0],c='r', marker='o', zorder=1,label='毛刺数据')
- plt.plot(data[:, 0], c='b', zorder=2, label='正常数据')
- plt.legend()
- plt.savefig('maoci.jpg', dpi=600)
- if time_list.shape[0] > 0:
- for i in time_list:
- str_res2 = str_res2 + str(i) + ';'
- str_res = str_res2
- else:
- str_res = str_res1
- return str_res
- # 毛刺检测
- @app.route('/maoci', methods=['POST'])
- def process_data_maoci():
- try:
- # 获取请求数据
- data = request.json
- url = data.get('url', '')
- # 读取 CSV 数据
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
- str_res=process_maoci(df)
- file_path = 'maoci.jpg'
- files = {'file': open(file_path, 'rb')}
- response = requests.post('http://localhost:9090/als/resource/oss/upload', files=files)
- files['file'].close()
- res = json.loads(response.text)
- print(res)
- if res["code"] == 200:
- data1 = {"result": str_res, "ossId": res.get('data', {}).get('ossId', None)}
- result = {'status': 200, 'data': data1} # 最终返回结果
- return jsonify(result)
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- def process_tiaobian(df):
- youliang = np.array(df)
- youliang =youliang[1:]
- index = 0
- plot_index = 0
- str_res1 = '不存在跳变数据'
- str_res2 = '跳变位置为:'
- for i in np.arange(50, len(youliang) - 50):
- if (youliang[i + index + 1] - youliang[i]) / youliang[i] > 0.5:
- index += 1
- if index > 0 and (youliang[i + index + 1] - youliang[i]) / youliang[i] < 0.5:
- str_res2 = str_res2 + str(i - index - 1) + '-' + str(i) + ';'
- k = np.arange(i - index - 1, i + 1, 1)
- if plot_index == 0:
- plt.plot(k, youliang[i - index - 1: i + 1], 'r', zorder=2, label='跳变数据')
- plot_index = 1
- if plot_index == 1:
- plt.plot(k, youliang[i - index - 1: i + 1], 'r', zorder=2)
- index = 0
- plt.plot(youliang, 'b', zorder=1, label='正常数据')
- plt.legend()
- plt.savefig('tiaobian.jpg', dpi=600)
- if plot_index == 1:
- str_res = str_res2
- if plot_index == 0:
- str_res = str_res1
- return str_res
- # 数据跳变
- @app.route('/tiaobian', methods=['POST'])
- def process_data_tiaobian():
- try:
- # 获取请求数据
- data = request.json
- url = data.get('url', '')
- # 读取 CSV 数据
- df = read_csv_from_url(url)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
- str_res=process_tiaobian(df)
- file_path = 'tiaobian.jpg'
- files = {'file': open(file_path, 'rb')}
- response = requests.post('http://localhost:9090/als/resource/oss/upload', files=files)
- files['file'].close()
- res = json.loads(response.text)
- print(res)
- if res["code"] == 200:
- data1 = {"result": str_res, "ossId": res.get('data', {}).get('ossId', None)}
- result = {'status': 200, 'data': data1} # 最终返回结果
- return jsonify(result)
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- # 数据跳变
- def process_tuihua_YNQ(df):
- data=df
- a_4 = data[
- np.where(
- (data[:, 7] > 1800)&(data[:, 7] < 2150)
- )
- ]
- plt.plot(a_4[:,2], label='氧气浓缩器退化数据')
- plt.savefig('tuihua_YNQ.jpg', dpi=600)
- @app.route('/tuihua_YNQ', methods=['POST'])
- def process_data_tuihua_YNQ():
- try:
- # 获取请求数据
- data = request.json
- urls = data.get('urls', '')
- # 读取 CSV 数据
- df = read_csv_from_urls(urls)
- if df is None:
- return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
- process_tuihua_YNQ(df)
- file_path = 'tuihua_YNQ.jpg'
- files = {'file': open(file_path, 'rb')}
- response = requests.post('http://localhost:9090/als/resource/oss/upload', files=files)
- files['file'].close()
- res = json.loads(response.text)
- if res["code"] == 200:
- data1 = { "ossId": res.get('data', {}).get('ossId', None)}
- result = {'status': 200, 'data': data1} # 最终返回结果
- return jsonify(result)
- except Exception as e:
- return jsonify({'status': 500, 'data': None, 'msg': str(e)})
- # 主程序入口
- if __name__ == "__main__":
- app.run(debug=True, port=8888)
|