all-api-v1.py 83 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492
  1. from sklearn.cluster import KMeans
  2. import json
  3. from sklearn.decomposition import PCA
  4. from scipy.stats import linregress
  5. import matplotlib
  6. import matplotlib.pyplot as plt
  7. from sklearn.ensemble import IsolationForest
  8. matplotlib.rcParams['font.family']='SimHei'
  9. from scipy.spatial.distance import euclidean
  10. from io import BytesIO,StringIO
  11. from scipy.fft import fft, ifft
  12. import pandas as pd
  13. from flask import Flask, request, jsonify
  14. from io import BytesIO
  15. import numpy as np
  16. import random
  17. import requests
  18. import pywt
  19. from scipy.interpolate import interp1d
  20. app = Flask(__name__)
  21. @app.route('/hdck', methods=['POST'])
  22. def process_data():
  23. try:
  24. # 获取请求数据
  25. data = request.json
  26. url = data.get('url', '')
  27. # 读取 CSV 数据
  28. df = read_csv_from_url(url)
  29. if df is None:
  30. return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
  31. # 故障检测结果(调用滑动窗口方法)
  32. detection_status = sliding_window_fault_detection(df)
  33. # 返回 JSON 结果
  34. return jsonify({
  35. 'status': 200,
  36. 'data': detection_status,
  37. 'msg': '故障检测完成'
  38. })
  39. except Exception as e:
  40. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  41. #循环读取
  42. def read_csv_from_urls(urls):
  43. try:
  44. index = 0
  45. res = np.array([])
  46. for url in urls:
  47. response = requests.get(url)
  48. response.raise_for_status()
  49. csv_file = BytesIO(response.content)
  50. data1 = pd.read_csv(csv_file, encoding='utf-8')
  51. data1 = np.array(data1)[1:, :]
  52. if index == 0:
  53. res = data1
  54. index = 1
  55. else:
  56. res = np.append(res, data1, axis=0)
  57. data = res
  58. print("CSV 文件读取成功")
  59. return data
  60. except requests.exceptions.RequestException as e:
  61. print(f"获取文件时发生错误: {e}")
  62. return None
  63. except Exception as e:
  64. print(f"读取 CSV 文件时发生错误: {e}")
  65. return None
  66. # 读取 CSV 数据
  67. def read_csv_from_url(url):
  68. try:
  69. response = requests.get(url)
  70. response.raise_for_status()
  71. print("文件获取成功")
  72. csv_file = BytesIO(response.content)
  73. data = pd.read_csv(csv_file, encoding='utf-8')
  74. print("CSV 文件读取成功")
  75. return data
  76. except requests.exceptions.RequestException as e:
  77. print(f"获取文件时发生错误: {e}")
  78. return None
  79. except Exception as e:
  80. print(f"读取 CSV 文件时发生错误: {e}")
  81. return None
  82. # 基于滑动窗口的故障检测(统一检测)
  83. def sliding_window_fault_detection(data, window_size=5, threshold=2):
  84. """ 故障检测,基于滑动窗口均值和标准差检测 """
  85. # 定义统一检测的阈值
  86. thresholds = {
  87. '发动机排气温度': 1000,
  88. '燃油流量': 200,
  89. '高压压气机转子转数': 8000,
  90. '风扇转子转速': 3000,
  91. '氧气输出流量': 100,
  92. '氧气浓度': 15,
  93. '供氧压力': 1.5,
  94. '氧分压值': (20, 30)
  95. }
  96. # 遍历所有指标
  97. for param, limit in thresholds.items():
  98. if param in data.columns:
  99. rolling_mean = data[param].rolling(window=window_size).mean()
  100. rolling_std = data[param].rolling(window=window_size).std()
  101. # 判断是否异常
  102. if isinstance(limit, tuple): # 如果是区间阈值
  103. anomalies = (data[param] < limit[0]) | (data[param] > limit[1])
  104. else:
  105. anomalies = (data[param] - rolling_mean).abs() > threshold * rolling_std
  106. if anomalies.any():
  107. return "状态异常" # 如果任一指标异常,直接返回状态异常
  108. return "无故障" # 如果所有指标均正常,返回状态正常
  109. # 基于k-Means聚类的故障检测(统一检测)
  110. @app.route('/kmeans', methods=['POST'])
  111. def process_data_kmeans():
  112. try:
  113. # 获取请求数据
  114. data = request.json
  115. url = data.get('url', '')
  116. # 读取 CSV 数据
  117. df = read_csv_from_url(url)
  118. if df is None:
  119. return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
  120. # 故障检测结果(调用k-Means方法)
  121. detection_status = kmeans_fault_detection(df)
  122. # 返回 JSON 结果
  123. return jsonify({
  124. 'status': 200,
  125. 'data': detection_status,
  126. 'msg': '故障检测完成'
  127. })
  128. except Exception as e:
  129. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  130. def kmeans_fault_detection(data):
  131. """ 基于 k-Means 的故障检测 """
  132. features = ['发动机排气温度', '燃油流量', '高压压气机转子转数', '风扇转子转速',
  133. '氧气输出流量', '氧气浓度', '供氧压力', '氧分压值']
  134. valid_features = [col for col in features if col in data.columns]
  135. if not valid_features:
  136. return "状态正常" # 如果没有有效的特征列,返回正常状态
  137. X = data[valid_features].dropna()
  138. kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
  139. cluster_centers = kmeans.cluster_centers_
  140. # 计算距离矩阵
  141. distances = np.linalg.norm(X.values[:, None, :] - cluster_centers, axis=2)
  142. # 判断是否存在异常
  143. if (distances > np.percentile(distances, 90)).any():
  144. return "状态异常" # 如果距离异常,返回状态异常
  145. return "无故障" # 如果无异常,返回状态正常
  146. # 基于阈值判断的故障诊断算法
  147. @app.route('/yzpd', methods=['POST'])
  148. def process_threshold():
  149. try:
  150. data = request.json
  151. url = data.get('url', '')
  152. df = read_csv_from_url(url)
  153. if df is None:
  154. return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
  155. # 执行故障诊断
  156. diagnosis = threshold_based_diagnosis(df)
  157. return jsonify({
  158. 'status': 200,
  159. 'data': diagnosis,
  160. 'msg': '故障诊断完成'
  161. })
  162. except Exception as e:
  163. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  164. def threshold_based_diagnosis(data):
  165. """ 基于阈值的故障诊断 """
  166. thresholds = {
  167. '发动机排气温度': 1000,
  168. '燃油流量': 200,
  169. '高压压气机转子转数': 8000,
  170. '风扇转子转速': 3000,
  171. '氧气输出流量': 100,
  172. '氧气浓度': 15,
  173. '供氧压力': 1.5,
  174. '氧分压值': (20, 30)
  175. }
  176. abnormal_params = []
  177. for param, limit in thresholds.items():
  178. if param in data.columns:
  179. values = data[param]
  180. if isinstance(limit, tuple): # 区间阈值
  181. if (values < limit[0]).any() or (values > limit[1]).any():
  182. abnormal_params.append(f"{param}有误")
  183. else: # 单一阈值
  184. if (values > limit).any():
  185. abnormal_params.append(f"{param}有误")
  186. return abnormal_params if abnormal_params else ["正常"]
  187. ### 方法二:基于马氏距离的故障诊断
  188. @app.route('/msjl', methods=['POST'])
  189. def process_mahalanobis():
  190. try:
  191. data = request.json
  192. url = data.get('url', '')
  193. df = read_csv_from_url(url)
  194. if df is None:
  195. return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
  196. # 执行故障诊断
  197. diagnosis = mahalanobis_based_diagnosis(df)
  198. return jsonify({
  199. 'status': 200,
  200. 'data': diagnosis,
  201. 'msg': '故障诊断完成'
  202. })
  203. except Exception as e:
  204. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  205. def mahalanobis_based_diagnosis(data):
  206. """ 基于马氏距离的故障诊断 """
  207. features = ['发动机排气温度', '燃油流量', '高压压气机转子转数', '风扇转子转速',
  208. '氧气输出流量', '氧气浓度', '供氧压力', '氧分压值']
  209. valid_features = [col for col in features if col in data.columns]
  210. if not valid_features:
  211. return ["正常"]
  212. # 检查数据是否足够
  213. values = data[valid_features].dropna()
  214. if values.empty or len(values) < 2: # 至少需要两个样本计算协方差
  215. return ["数据不足,无法计算马氏距离"]
  216. # 计算均值和协方差矩阵
  217. try:
  218. mean = values.mean().values
  219. cov_matrix = np.cov(values.T)
  220. # 检查协方差矩阵是否是可逆的
  221. if np.linalg.det(cov_matrix) == 0:
  222. return ["协方差矩阵不可逆,无法计算马氏距离"]
  223. cov_inv = np.linalg.inv(cov_matrix)
  224. except Exception as e:
  225. return [f"协方差计算错误: {e}"]
  226. abnormal_params = []
  227. for _, row in values.iterrows():
  228. x = row.values.reshape(-1, 1) # 转为列向量
  229. diff = x - mean.reshape(-1, 1)
  230. try:
  231. distance = np.sqrt(diff.T @ cov_inv @ diff)[0][0] # 计算马氏距离
  232. threshold = np.percentile(distance, 95) # 设定一个异常距离阈值
  233. if distance > threshold:
  234. abnormal_params.append(f"{row.name}有误") # 使用行索引标注异常
  235. except Exception as e:
  236. abnormal_params.append(f"马氏距离计算错误: {e}")
  237. return abnormal_params if abnormal_params else ["正常"]
  238. ### 方法三:基于PCA的故障诊断
  239. @app.route('/pca', methods=['POST'])
  240. def process_pca():
  241. try:
  242. data = request.json
  243. url = data.get('url', '')
  244. df = read_csv_from_url(url)
  245. if df is None:
  246. return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
  247. # 执行故障诊断
  248. diagnosis = pca_based_diagnosis(df)
  249. return jsonify({
  250. 'status': 200,
  251. 'data': diagnosis,
  252. 'msg': '故障诊断完成'
  253. })
  254. except Exception as e:
  255. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  256. def pca_based_diagnosis(data):
  257. """ 基于PCA的故障诊断 """
  258. features = ['发动机排气温度', '燃油流量', '高压压气机转子转数', '风扇转子转速',
  259. '氧气输出流量', '氧气浓度', '供氧压力', '氧分压值']
  260. valid_features = [col for col in features if col in data.columns]
  261. if not valid_features:
  262. return ["正常"]
  263. values = data[valid_features].dropna()
  264. pca = PCA(n_components=0.9) # 保留90%的方差
  265. pca.fit(values)
  266. # 计算重建误差
  267. data_reconstructed = pca.inverse_transform(pca.transform(values))
  268. reconstruction_error = np.mean((values - data_reconstructed) ** 2, axis=1)
  269. abnormal_params = []
  270. for feature in valid_features:
  271. if np.any(reconstruction_error > np.percentile(reconstruction_error, 95)):
  272. abnormal_params.append(f"{feature}有误")
  273. return abnormal_params if abnormal_params else ["正常"]
  274. # 基于线性归一算法的退化评估
  275. @app.route('/xxgy', methods=['POST'])
  276. def process_data4():
  277. try:
  278. data = request.json
  279. # 检查请求体中是否包含 url
  280. url = data.get('url', '')
  281. # 读取数据
  282. df = read_csv_from_url(url)
  283. if df is None:
  284. print("数据获取失败,程序终止。")
  285. return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
  286. # 退化评估
  287. degradation_results = calculate_all_degradation(df)
  288. results = format_degradation_results(degradation_results)
  289. return jsonify({
  290. 'status': 200,
  291. 'data': results,
  292. 'msg': '退化评估完成'
  293. })
  294. except Exception as e:
  295. return jsonify({
  296. 'status': 500,
  297. 'data': None,
  298. 'msg': str(e)
  299. })
  300. # 退化计算函数,假设通过简单的标准化(归一化)计算退化程度
  301. def calculate_degradation(parameter_data):
  302. # 假设退化度 = (当前值 - 最小值) / (最大值 - 最小值) * 100
  303. min_val = parameter_data.min()
  304. max_val = parameter_data.max()
  305. degradation = (parameter_data - min_val) / (max_val - min_val) * 100
  306. return degradation
  307. # 计算各项退化程度
  308. def calculate_all_degradation(data):
  309. # 对数据中的每一列进行退化计算
  310. degradation_results = {}
  311. # 假设我们选择几个重要的列进行退化计算
  312. parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
  313. for param in parameters:
  314. if param in data.columns:
  315. degradation_results[param] = calculate_degradation(data[param])
  316. return degradation_results
  317. # 格式化退化结果
  318. def format_degradation_results(degradation_results):
  319. print("退化评估结果:")
  320. results = {}
  321. for param, degradation in degradation_results.items():
  322. avg_degradation = np.mean(degradation) # 退化的平均值
  323. print(f"{param} 退化程度: {avg_degradation:.2f}%")
  324. results[param] = f"退化程度:{avg_degradation:.2f}%"
  325. return results
  326. # 基于趋势分析法的退化评估
  327. @app.route('/qsfx', methods=['POST'])
  328. def process_data5():
  329. try:
  330. # 获取请求中的 URL
  331. data = request.json
  332. url = data.get('url', '')
  333. # 检查 URL 是否有效
  334. if not url:
  335. return jsonify({
  336. 'status': 400,
  337. 'data': None,
  338. 'msg': '请求体中未提供 URL'
  339. })
  340. # 读取 CSV 数据
  341. csv_data = read_csv_from_url(url)
  342. if csv_data is None:
  343. return jsonify({
  344. 'status': 500,
  345. 'data': None,
  346. 'msg': '无法获取或读取 CSV 文件'
  347. })
  348. # 趋势分析法退化评估
  349. trend_results = calculate_trend_degradation(csv_data)
  350. # 返回 JSON 格式的结果
  351. return jsonify({
  352. 'status': 200,
  353. 'data': trend_results,
  354. 'msg': '退化评估完成'
  355. })
  356. except Exception as e:
  357. return jsonify({
  358. 'status': 500,
  359. 'data': None,
  360. 'msg': str(e)
  361. })
  362. # 基于趋势分析法算法
  363. def calculate_trend_degradation(data):
  364. results = {}
  365. parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
  366. for param in parameters:
  367. if param in data.columns:
  368. y = pd.to_numeric(data[param], errors='coerce').dropna()
  369. if len(y) < 2:
  370. results[param] = {
  371. '退化趋势': '数据不足,无法计算',
  372. '退化百分比': 'N/A'
  373. }
  374. continue
  375. x = pd.Series(range(len(y))) # 用数据索引作为 x 轴
  376. initial_value = y.iloc[0] # 假设初始值为第一点数据
  377. # 执行线性回归
  378. slope, intercept, r_value, p_value, std_err = linregress(x, y)
  379. # 计算退化百分比并取绝对值
  380. final_change = slope * len(y) # 总的变化量
  381. degradation_percentage = abs((final_change / initial_value) * 100) if initial_value != 0 else 0
  382. # 判断退化趋势
  383. if degradation_percentage > 20:
  384. trend = "显著退化"
  385. elif 10 < degradation_percentage <= 20:
  386. trend = "中度退化"
  387. elif 0 < degradation_percentage <= 10:
  388. trend = "轻微退化"
  389. else:
  390. trend = "无明显退化"
  391. results[param] = {
  392. '退化趋势': trend,
  393. '退化百分比': f"{degradation_percentage:.2f}%" # 保留两位小数
  394. }
  395. return results
  396. # 基于统计方法的退化评估
  397. @app.route('/tjthpg', methods=['POST'])
  398. def process_data6():
  399. try:
  400. # 获取请求中的 URL
  401. data = request.json
  402. url = data.get('url', '')
  403. # 检查 URL 是否有效
  404. if not url:
  405. return jsonify({
  406. 'status': 400,
  407. 'data': None,
  408. 'msg': '请求体中未提供 URL'
  409. })
  410. # 读取 CSV 数据
  411. csv_data = read_csv_from_url(url)
  412. if csv_data is None:
  413. return jsonify({
  414. 'status': 500,
  415. 'data': None,
  416. 'msg': '无法获取或读取 CSV 文件'
  417. })
  418. # 统计方法退化评估
  419. statistics_results = calculate_statistics_degradation(csv_data)
  420. # 返回 JSON 格式的结果
  421. return jsonify({
  422. 'status': 200,
  423. 'data': statistics_results,
  424. 'msg': '退化评估完成'
  425. })
  426. except Exception as e:
  427. return jsonify({
  428. 'status': 500,
  429. 'data': None,
  430. 'msg': str(e)
  431. })
  432. # 基于统计的方法算法
  433. def calculate_statistics_degradation(data):
  434. results = {}
  435. parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
  436. for param in parameters:
  437. if param in data.columns:
  438. mean_val = data[param].mean()
  439. var_val = data[param].var()
  440. max_val = data[param].max()
  441. min_val = data[param].min()
  442. baseline_mean = data[param].iloc[0] # 假设第一行数据是基准值
  443. # 计算退化指标
  444. mean_shift = mean_val - baseline_mean
  445. degradation_percentage = abs(mean_shift / baseline_mean) * 100 # 退化程度百分比
  446. degradation_level = (
  447. "显著退化" if degradation_percentage > 10 else
  448. "轻微退化" if degradation_percentage > 0 else
  449. "未退化"
  450. )
  451. results[param] = {
  452. '均值': f"{mean_val:.2f}",
  453. '方差': f"{var_val:.2f}",
  454. '最大值': f"{max_val:.2f}",
  455. '最小值': f"{min_val:.2f}",
  456. '均值偏移': f"{mean_shift:.2f}",
  457. '退化百分比': f"{degradation_percentage:.2f}%",
  458. '退化程度': degradation_level
  459. }
  460. return results
  461. # 基于阈值比较的退化评估
  462. @app.route('/yzthpg', methods=['POST'])
  463. def process_data7():
  464. try:
  465. global thresholds
  466. # 获取请求中的 URL
  467. data = request.json
  468. url = data.get('url', '')
  469. # 检查 URL 是否有效
  470. if not url:
  471. return jsonify({
  472. 'status': 400,
  473. 'data': None,
  474. 'msg': '请求体中未提供 URL'
  475. })
  476. # 读取 CSV 数据
  477. csv_data = read_csv_from_url(url)
  478. if csv_data is None:
  479. return jsonify({
  480. 'status': 500,
  481. 'data': None,
  482. 'msg': '无法获取或读取 CSV 文件'
  483. })
  484. # 阈值设定
  485. thresholds = {
  486. '发动机排气温度': (300, 1000), # 示例温度范围
  487. '燃油流量': (10, 50), # 示例流量范围
  488. '氧气输出流量': (5, 100),
  489. '氧气浓度': (0.1, 0.3), # 假设浓度范围
  490. '供氧压力': (0.5, 1.5) # 假设压力范围
  491. }
  492. # 调用退化评估方法
  493. threshold_results = calculate_threshold_degradation(csv_data)
  494. # 返回 JSON 格式的结果
  495. return jsonify({
  496. 'status': 200,
  497. 'data': threshold_results,
  498. 'msg': '退化评估完成'
  499. })
  500. except Exception as e:
  501. return jsonify({
  502. 'status': 500,
  503. 'data': None,
  504. 'msg': str(e)
  505. })
  506. # 基于阈值比较的退化评估算法
  507. def calculate_threshold_degradation(data):
  508. results = {}
  509. for param in ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']:
  510. if param in data.columns:
  511. values = pd.to_numeric(data[param], errors='coerce').dropna()
  512. initial_value = values.iloc[0] # 假设第一点为初始值
  513. # 判断退化百分比
  514. current_mean = values.mean()
  515. if initial_value == 0:
  516. degradation_percentage = 0
  517. else:
  518. degradation_percentage = abs((current_mean - initial_value) / initial_value) * 100
  519. # 分类退化趋势
  520. if degradation_percentage > 20:
  521. trend = "显著退化"
  522. elif 10 < degradation_percentage <= 20:
  523. trend = "中度退化"
  524. elif 0 < degradation_percentage <= 10:
  525. trend = "轻微退化"
  526. else:
  527. trend = "无明显退化"
  528. # 保存结果
  529. results[param] = {
  530. '当前均值': f"{current_mean:.2f}",
  531. '初始值': f"{initial_value:.2f}",
  532. '退化百分比': f"{degradation_percentage:.2f}%",
  533. '退化趋势': trend
  534. }
  535. return results
  536. # 基于简单阈值和趋势分析的故障预测
  537. @app.route('/jdyzqsfx', methods=['POST'])
  538. def process_trend():
  539. try:
  540. data = request.json
  541. url = data.get('url', '')
  542. # 读取数据
  543. df = read_csv_from_url(url)
  544. if df is None:
  545. return jsonify({'status': 500, 'data': None, 'msg': '无法获取或读取 CSV 文件'})
  546. # 执行故障预测
  547. prediction = trend_analysis_prediction(df)
  548. # 返回结果
  549. return jsonify({
  550. 'status': 200,
  551. 'data': {'故障概率': f"{prediction:.2f}%"},
  552. 'msg': '故障预测完成'
  553. })
  554. except Exception as e:
  555. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  556. def trend_analysis_prediction(data):
  557. """ 使用趋势分析预测故障概率 """
  558. features = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度']
  559. slopes = []
  560. for feature in features:
  561. if feature in data.columns:
  562. series = pd.to_numeric(data[feature], errors='coerce').dropna()
  563. if len(series) > 1:
  564. x = range(len(series))
  565. slope, _, _, _, _ = linregress(x, series)
  566. slopes.append(slope)
  567. # 计算故障概率(假设正斜率为异常)
  568. fault_probability = sum(1 for slope in slopes if slope > 0) / len(features) * 100
  569. return fault_probability
  570. # ARIMA 模型故障预测
  571. # Isolation Forest 异常检测故障预测
  572. @app.route('/ycjc', methods=['POST'])
  573. def process_isolation():
  574. try:
  575. data = request.json
  576. url = data.get('url', '')
  577. # 读取数据
  578. df = read_csv_from_url(url)
  579. if df is None:
  580. return jsonify({'status': 500, 'data': None, 'msg': '无法获取或读取 CSV 文件'})
  581. # 执行故障预测
  582. prediction = isolation_forest_prediction(df)
  583. # 返回结果
  584. return jsonify({
  585. 'status': 200,
  586. 'data': {'故障概率': f"{prediction:.2f}%"},
  587. 'msg': '故障预测完成'
  588. })
  589. except Exception as e:
  590. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  591. def isolation_forest_prediction(data):
  592. features = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度']
  593. total_points = 0
  594. fault_points = 0
  595. for feature in features:
  596. if feature in data.columns:
  597. series = pd.to_numeric(data[feature], errors='coerce').dropna().values.reshape(-1, 1)
  598. if len(series) > 10: # 数据点必须足够多
  599. isolation_forest = IsolationForest(contamination=0.1, random_state=42)
  600. isolation_forest.fit(series)
  601. predictions = isolation_forest.predict(series)
  602. fault_points += np.sum(predictions == -1) # 异常点数量
  603. total_points += len(series)
  604. fault_probability = (fault_points / total_points) * 100 if total_points > 0 else 0
  605. return fault_probability
  606. # 基于线性回归的寿命预测
  607. @app.route('/xxhg', methods=['POST'])
  608. def process_data11():
  609. try:
  610. # 获取请求中的 URL
  611. data = request.json
  612. url = data.get('url', '')
  613. # 检查 URL 是否有效
  614. if not url:
  615. return jsonify({'status': 400, 'data': None, 'msg': '请求体中未提供 URL'})
  616. # 读取 CSV 数据
  617. df = read_csv_from_url(url)
  618. if df is None:
  619. return jsonify({'status': 500, 'data': None, 'msg': '无法获取或读取 CSV 文件'})
  620. # 预测发动机和氧浓缩器寿命
  621. engine_life = linear_regression_predict_life1(df, "发动机排气温度", 800)
  622. # 返回 JSON 结果
  623. return jsonify({
  624. 'status': 200,
  625. 'data': {
  626. '剩余寿命': f"{engine_life:.2f} 小时" if engine_life is not None else "数据缺失,无法预测",
  627. },
  628. 'msg': '寿命预测完成'
  629. })
  630. except Exception as e:
  631. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  632. # 线性回归预测寿命
  633. def linear_regression_predict_life1(data, column_name, failure_threshold, max_life=50000, min_life=50):
  634. if column_name not in data.columns:
  635. return "数据缺失,无法预测"
  636. # 数据平滑处理,减少噪声干扰
  637. y = pd.to_numeric(data[column_name], errors='coerce').dropna()
  638. if len(y) < 2:
  639. return "数据不足,无法预测"
  640. y_smoothed = y.rolling(window=5, min_periods=1).mean() # 滑动平均平滑数据
  641. x = np.arange(len(y_smoothed))
  642. # 执行线性回归
  643. slope, intercept, r_value, p_value, std_err = linregress(x, y_smoothed)
  644. # 确保斜率为负(下降趋势),如果斜率接近零,给出最低预测寿命
  645. if slope >= -0.01: # 设置最低斜率阈值,防止结果过小
  646. slope = -0.01
  647. # 计算剩余寿命
  648. current_value = y_smoothed.iloc[-1]
  649. time_to_failure = (failure_threshold - current_value) / abs(slope)
  650. time_to_failure = max(min_life, min(max_life, time_to_failure)) # 约束寿命范围
  651. return round(time_to_failure, 2) # 保留两位小数
  652. # 指数衰减寿命预测
  653. @app.route('/exp', methods=['POST'])
  654. def process_data_exp_life():
  655. try:
  656. # 获取请求中的 URL
  657. data = request.json
  658. url = data.get('url', '')
  659. # 检查 URL 是否有效
  660. if not url:
  661. return jsonify({'status': 400, 'data': None, 'msg': '请求体中未提供 URL'})
  662. # 读取 CSV 数据
  663. df = read_csv_from_url(url)
  664. if df is None:
  665. return jsonify({'status': 500, 'data': None, 'msg': '无法获取或读取 CSV 文件'})
  666. # 使用指数衰减模型预测寿命
  667. engine_life = exponential_decay_predict_life(df, "发动机排气温度", 800)
  668. # 返回 JSON 结果
  669. return jsonify({
  670. 'status': 200,
  671. 'data': {
  672. '剩余寿命': engine_life,
  673. },
  674. 'msg': '寿命预测完成'
  675. })
  676. except Exception as e:
  677. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  678. # 基于指数衰减模型的寿命预测
  679. def exponential_decay_predict_life(data, column_name, failure_threshold, max_life=50000, min_life=50):
  680. if column_name not in data.columns:
  681. return "数据缺失,无法预测"
  682. # 提取时间序列
  683. y = pd.to_numeric(data[column_name], errors='coerce').dropna()
  684. if len(y) < 2:
  685. return "数据不足,无法预测"
  686. # 数据平滑处理
  687. y_smoothed = y.rolling(window=5, min_periods=1).mean() # 滑动平均平滑数据
  688. x = np.arange(len(y_smoothed))
  689. # 对数据取对数,拟合指数模型
  690. log_y = np.log(y_smoothed)
  691. slope, intercept, r_value, p_value, std_err = linregress(x, log_y)
  692. # 计算剩余寿命
  693. try:
  694. time_to_failure = (np.log(failure_threshold) - intercept) / slope
  695. time_to_failure = max(min_life, min(max_life, time_to_failure)) # 约束寿命范围
  696. return f"{round(time_to_failure, 2)} 小时" # 保留两位小数
  697. except ValueError:
  698. return "预测失败,可能是数据或阈值无效"
  699. # 虚警检测-CACFAR算法
  700. @app.route('/CACFAR', methods=['POST'])
  701. def process_data13():
  702. try:
  703. # 获取请求中的 URL
  704. data = request.json
  705. url = data.get('url', '')
  706. # 检查 URL 是否有效
  707. if not url:
  708. return jsonify({
  709. 'status': 400,
  710. 'msg': 'No URL provided in the request body.'
  711. })
  712. # 读取 CSV 数据
  713. csv_data = read_csv_from_url(url)
  714. if csv_data is None:
  715. return jsonify({
  716. 'status': 500,
  717. 'msg': 'Failed to fetch or read the CSV file.'
  718. })
  719. # 计算虚警抑制结果
  720. suppression_results = calculate_all_suppression(csv_data)
  721. # 获取虚警抑制结果
  722. results = print_suppression_results(suppression_results)
  723. # 返回 JSON 格式的结果
  724. return jsonify({
  725. 'status': 200,
  726. 'msg': '虚警抑制完成',
  727. 'results': results
  728. })
  729. except Exception as e:
  730. return jsonify({
  731. 'status': 500,
  732. 'msg': str(e)
  733. })
  734. # CACFAR 虚警抑制函数
  735. def cacfar(data, guard_cells, training_cells, threshold_factor=1.5, censoring_factor=0.1):
  736. """
  737. 使用 CACFAR 算法进行虚警抑制。
  738. 参数:
  739. data: 输入的雷达信号数据 (1D 数组)
  740. guard_cells: 护卫单元的数量
  741. training_cells: 训练单元的数量
  742. threshold_factor: 阈值因子,决定虚警率
  743. censoring_factor: 截断因子,去除训练单元中最大和最小的异常值
  744. 返回:
  745. detected: 二值化的检测结果 (1D 数组,1 表示检测到目标,0 表示无目标)
  746. """
  747. n = len(data)
  748. half_guard_cells = guard_cells // 2
  749. half_training_cells = training_cells // 2
  750. detected = np.zeros(n)
  751. # 遍历整个数据集
  752. for i in range(half_guard_cells + half_training_cells, n - half_guard_cells - half_training_cells):
  753. # 提取训练单元区域(不包括护卫单元)
  754. training_data = np.concatenate([
  755. data[i - half_training_cells - guard_cells:i - half_guard_cells],
  756. data[i + half_guard_cells + 1:i + half_guard_cells + training_cells]
  757. ])
  758. # 排序训练单元数据,并去除最小和最大的 censoring_factor 比例的数据
  759. sorted_training_data = np.sort(training_data)
  760. censoring_count = int(len(sorted_training_data) * censoring_factor)
  761. censored_training_data = sorted_training_data[censoring_count:-censoring_count]
  762. # 计算背景噪声估计:使用截断后的数据的中位数
  763. noise_estimate = np.median(censored_training_data)
  764. # 计算检测阈值
  765. threshold = threshold_factor * noise_estimate
  766. # 比较数据值与阈值,进行目标检测
  767. if data[i] > threshold:
  768. detected[i] = 1 # 检测到目标
  769. return detected
  770. # 计算各项虚警抑制结果
  771. def calculate_all_suppression(data):
  772. suppression_results = {}
  773. parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
  774. # 设置 CACFAR 参数
  775. guard_cells = 4
  776. training_cells = 16
  777. threshold_factor = 1.5 # 阈值因子
  778. censoring_factor = 0.1 # 截断因子
  779. for param in parameters:
  780. if param in data.columns:
  781. # 应用虚警抑制 (CACFAR)
  782. column_data = pd.to_numeric(data[param], errors='coerce')
  783. suppression_results[param] = cacfar(column_data.values, guard_cells, training_cells, threshold_factor, censoring_factor)
  784. return suppression_results
  785. # 输出虚警抑制结果,并返回 JSON 格式结果
  786. def print_suppression_results(suppression_results):
  787. print("虚警抑制结果:")
  788. results = {}
  789. for param, detected in suppression_results.items():
  790. detected_percentage = np.mean(detected) * 100 # 计算检测到目标的百分比
  791. result_str = f"虚警抑制结果: {detected_percentage:.2f}%"
  792. print(result_str)
  793. results[param] = f"虚警抑制结果: {detected_percentage:.2f}%"
  794. return results
  795. # 虚警抑制-均值类CFAR检测算法
  796. @app.route('/MeanCFAR', methods=['POST'])
  797. def process_data14():
  798. try:
  799. # 获取请求中的 URL
  800. data = request.json
  801. url = data.get('url', '')
  802. # 检查 URL 是否有效
  803. if not url:
  804. return jsonify({
  805. 'status': 400,
  806. 'msg': 'No URL provided in the request body.'
  807. })
  808. # 读取 CSV 数据
  809. csv_data = read_csv_from_url(url)
  810. if csv_data is None:
  811. return jsonify({
  812. 'status': 500,
  813. 'msg': 'Failed to fetch or read the CSV file.'
  814. })
  815. # 计算虚警抑制结果
  816. suppression_results = calculate_all_suppression1(csv_data)
  817. # 获取虚警抑制结果
  818. results = print_suppression_results1(suppression_results)
  819. # 返回 JSON 格式的结果
  820. return jsonify({
  821. 'status': 200,
  822. 'msg': '虚警抑制完成',
  823. 'results': results
  824. })
  825. except Exception as e:
  826. return jsonify({
  827. 'status': 500,
  828. 'msg': str(e)
  829. })
  830. # 均值类 CFAR 虚警抑制函数 (Mean-CFAR)
  831. def mean_cfar(data, guard_cells, training_cells, threshold_factor=1.5):
  832. """
  833. 使用均值类 CFAR (Mean-CFAR) 算法进行虚警抑制。
  834. 参数:
  835. data: 输入的雷达信号数据 (1D 数组)
  836. guard_cells: 护卫单元的数量
  837. training_cells: 训练单元的数量
  838. threshold_factor: 阈值因子,决定虚警率
  839. 返回:
  840. detected: 二值化的检测结果 (1D 数组,1 表示检测到目标,0 表示无目标)
  841. """
  842. n = len(data)
  843. half_guard_cells = guard_cells // 2
  844. half_training_cells = training_cells // 2
  845. detected = np.zeros(n)
  846. # 遍历整个数据集
  847. for i in range(half_guard_cells + half_training_cells, n - half_guard_cells - half_training_cells):
  848. # 提取训练单元区域(不包括护卫单元)
  849. training_data = np.concatenate([
  850. data[i - half_training_cells - guard_cells:i - half_guard_cells],
  851. data[i + half_guard_cells + 1:i + half_guard_cells + training_cells]
  852. ])
  853. # 计算训练单元的均值
  854. noise_estimate = np.mean(training_data)
  855. # 计算检测阈值
  856. threshold = threshold_factor * noise_estimate
  857. # 比较数据值与阈值,进行目标检测
  858. if data[i] > threshold:
  859. detected[i] = 1 # 检测到目标
  860. return detected
  861. # 计算各项虚警抑制结果
  862. def calculate_all_suppression1(data):
  863. suppression_results = {}
  864. parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
  865. # 设置 Mean-CFAR 参数
  866. guard_cells = 4
  867. training_cells = 16
  868. threshold_factor = 1.5 # 阈值因子
  869. for param in parameters:
  870. if param in data.columns:
  871. # 应用虚警抑制 (Mean-CFAR)
  872. column_data = pd.to_numeric(data[param], errors='coerce')
  873. suppression_results[param] = mean_cfar(column_data.values, guard_cells, training_cells, threshold_factor)
  874. return suppression_results
  875. # 输出虚警抑制结果,并返回 JSON 格式结果
  876. def print_suppression_results1(suppression_results):
  877. print("虚警抑制结果:")
  878. results = {}
  879. for param, detected in suppression_results.items():
  880. detected_percentage = np.mean(detected) * 100 # 计算检测到目标的百分比
  881. result_str = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
  882. print(result_str)
  883. results[param] = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
  884. return results
  885. # 虚警抑制-可变指数CFAR检测算法
  886. @app.route('/V_CACFAR', methods=['POST'])
  887. def process_data15():
  888. try:
  889. # 获取请求中的 URL
  890. data = request.json
  891. url = data.get('url', '')
  892. # 检查 URL 是否有效
  893. if not url:
  894. return jsonify({
  895. 'status': 400,
  896. 'msg': 'No URL provided in the request body.'
  897. })
  898. # 读取 CSV 数据
  899. csv_data = read_csv_from_url(url)
  900. if csv_data is None:
  901. return jsonify({
  902. 'status': 500,
  903. 'msg': 'Failed to fetch or read the CSV file.'
  904. })
  905. # 计算虚警抑制结果
  906. suppression_results = calculate_all_suppression2(csv_data)
  907. # 获取虚警抑制结果
  908. results = print_suppression_results2(suppression_results)
  909. # 返回 JSON 格式的结果
  910. return jsonify({
  911. 'status': 200,
  912. 'msg': '虚警抑制完成',
  913. 'results': results
  914. })
  915. except Exception as e:
  916. return jsonify({
  917. 'status': 500,
  918. 'msg': str(e)
  919. })
  920. # 可变指数 CFAR 虚警抑制函数 (V-CA-CFAR)
  921. def v_ca_cfar(data, guard_cells, training_cells, alpha=2, threshold_factor=1.5):
  922. """
  923. 使用可变指数 CFAR (V-CA-CFAR) 算法进行虚警抑制。
  924. 参数:
  925. data: 输入的雷达信号数据 (1D 数组)
  926. guard_cells: 护卫单元的数量
  927. training_cells: 训练单元的数量
  928. alpha: 指数加权系数(控制加权的“速度”)
  929. threshold_factor: 阈值因子,决定虚警率
  930. 返回:
  931. detected: 二值化的检测结果 (1D 数组,1 表示检测到目标,0 表示无目标)
  932. """
  933. n = len(data)
  934. half_guard_cells = guard_cells // 2
  935. half_training_cells = training_cells // 2
  936. detected = np.zeros(n)
  937. # 遍历整个数据集
  938. for i in range(half_guard_cells + half_training_cells, n - half_guard_cells - half_training_cells):
  939. # 提取训练单元区域(不包括护卫单元)
  940. training_data = np.concatenate([
  941. data[i - half_training_cells - guard_cells:i - half_guard_cells],
  942. data[i + half_guard_cells + 1:i + half_guard_cells + training_cells]
  943. ])
  944. # 计算训练数据的加权指数
  945. weighted_noise_estimate = np.sum(training_data * np.exp(-alpha * np.arange(len(training_data)))) / np.sum(np.exp(-alpha * np.arange(len(training_data))))
  946. # 计算检测阈值
  947. threshold = threshold_factor * weighted_noise_estimate
  948. # 比较数据值与阈值,进行目标检测
  949. if data[i] > threshold:
  950. detected[i] = 1 # 检测到目标
  951. return detected
  952. # 计算各项虚警抑制结果
  953. def calculate_all_suppression2(data):
  954. suppression_results = {}
  955. parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
  956. # 设置 V-CA-CFAR 参数
  957. guard_cells = 4
  958. training_cells = 16
  959. alpha = 2 # 指数加权系数
  960. threshold_factor = 1.5 # 阈值因子
  961. for param in parameters:
  962. if param in data.columns:
  963. # 应用虚警抑制 (V-CA-CFAR)
  964. column_data = pd.to_numeric(data[param], errors='coerce')
  965. suppression_results[param] = v_ca_cfar(column_data.values, guard_cells, training_cells, alpha, threshold_factor)
  966. return suppression_results
  967. # 输出虚警抑制结果,并返回 JSON 格式结果
  968. def print_suppression_results2(suppression_results):
  969. print("虚警抑制结果:")
  970. results = {}
  971. for param, detected in suppression_results.items():
  972. detected_percentage = np.mean(detected) * 100 # 计算检测到目标的百分比
  973. result_str = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
  974. print(result_str)
  975. results[param] = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
  976. return results
  977. # 虚警检测-OSCFAR算法
  978. @app.route('/OSCFAR', methods=['POST'])
  979. def process_data16():
  980. try:
  981. # 获取请求中的 URL
  982. data = request.json
  983. url = data.get('url', '')
  984. # 检查 URL 是否有效
  985. if not url:
  986. return jsonify({
  987. 'status': 400,
  988. 'msg': 'No URL provided in the request body.'
  989. })
  990. # 读取 CSV 数据
  991. csv_data = read_csv_from_url(url)
  992. if csv_data is None:
  993. return jsonify({
  994. 'status': 500,
  995. 'msg': 'Failed to fetch or read the CSV file.'
  996. })
  997. # 计算虚警抑制结果
  998. suppression_results = calculate_all_suppression3(csv_data)
  999. # 获取虚警抑制结果
  1000. results = print_suppression_results3(suppression_results)
  1001. # 返回 JSON 格式的结果
  1002. return jsonify({
  1003. 'status': 200,
  1004. 'msg': '虚警抑制完成',
  1005. 'results': results
  1006. })
  1007. except Exception as e:
  1008. return jsonify({
  1009. 'status': 500,
  1010. 'msg': str(e)
  1011. })
  1012. # OSCFAR 虚警抑制函数
  1013. def os_cfar(data, guard_cells, training_cells, threshold_factor=1.5):
  1014. """
  1015. 使用 OS-CFAR 算法进行虚警抑制。
  1016. 参数:
  1017. data: 输入的雷达信号数据 (1D 数组)
  1018. guard_cells: 护卫单元的数量
  1019. training_cells: 训练单元的数量
  1020. threshold_factor: 阈值因子,决定虚警率
  1021. 返回:
  1022. detected: 二值化的检测结果 (1D 数组,1 表示检测到目标,0 表示无目标)
  1023. """
  1024. n = len(data)
  1025. half_guard_cells = guard_cells // 2
  1026. half_training_cells = training_cells // 2
  1027. detected = np.zeros(n)
  1028. # 遍历整个数据集
  1029. for i in range(half_guard_cells + half_training_cells, n - half_guard_cells - half_training_cells):
  1030. # 提取训练单元区域(不包括护卫单元)
  1031. training_data = np.concatenate([
  1032. data[i - half_training_cells - guard_cells:i - half_guard_cells],
  1033. data[i + half_guard_cells + 1:i + half_guard_cells + training_cells]
  1034. ])
  1035. # 排序训练单元数据,找到第 K 个最小值作为背景噪声估计
  1036. sorted_training_data = np.sort(training_data)
  1037. noise_estimate = sorted_training_data[training_cells // 2] # 中位数估计
  1038. # 计算检测阈值
  1039. threshold = threshold_factor * noise_estimate
  1040. # 比较数据值与阈值,进行目标检测
  1041. if data[i] > threshold:
  1042. detected[i] = 1 # 检测到目标
  1043. return detected
  1044. # 计算各项虚警抑制结果
  1045. def calculate_all_suppression3(data):
  1046. suppression_results = {}
  1047. parameters = ['发动机排气温度', '燃油流量', '氧气输出流量', '氧气浓度', '供氧压力']
  1048. # 设置 OSCFAR 参数
  1049. guard_cells = 4
  1050. training_cells = 16
  1051. threshold_factor = 1.5 # 阈值因子
  1052. for param in parameters:
  1053. if param in data.columns:
  1054. # 应用虚警抑制 (OSCFAR)
  1055. column_data = pd.to_numeric(data[param], errors='coerce')
  1056. suppression_results[param] = os_cfar(column_data.values, guard_cells, training_cells, threshold_factor)
  1057. return suppression_results
  1058. # 输出虚警抑制结果,并返回 JSON 格式结果
  1059. def print_suppression_results3(suppression_results):
  1060. print("虚警抑制结果:")
  1061. results = {}
  1062. for param, detected in suppression_results.items():
  1063. detected_percentage = np.mean(detected) * 100 # 计算检测到目标的百分比
  1064. result_str = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
  1065. print(result_str)
  1066. results[param] = f"{param} 虚警抑制结果: {detected_percentage:.2f}%"
  1067. return results
  1068. '''1补全:1平均值填充'''
  1069. @app.route('/ave', methods=['POST'])
  1070. def process_data1():
  1071. try:
  1072. data = request.json
  1073. # 获取URL并下载文件
  1074. url = data.get('url', '')
  1075. response = requests.get(url)
  1076. # 检查下载是否成功
  1077. if response.status_code != 200:
  1078. error_message = {'status': 500, 'message': 'Failed to download CSV data'}
  1079. return jsonify(error_message)
  1080. # 将 CSV 数据转换为 Pandas DataFrame
  1081. response.encoding = 'UTF-8'
  1082. csv_data = StringIO(response.text)
  1083. df = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
  1084. if "时间" in df.columns:
  1085. time_column = df["时间"].values # 获取时间列
  1086. # 过滤掉“时间”列,只保留数值列
  1087. df = df.drop(columns="时间")
  1088. else:
  1089. time_column = None
  1090. df = df
  1091. # 分离数值型数据
  1092. numerical_cols = df.select_dtypes(include=[np.number]).columns
  1093. # 填充数值型数据的缺失值,使用均值填充
  1094. for col in numerical_cols:
  1095. df[col].fillna(df[col].mean(), inplace=True)
  1096. # 分离分类型数据
  1097. categorical_cols = df.select_dtypes(include=['object']).columns
  1098. # 填充分类型数据的缺失值,使用众数填充
  1099. for col in categorical_cols:
  1100. df[col].fillna(df[col].mode()[0], inplace=True)
  1101. # 如果时间列存在,将其添加回去
  1102. if time_column is not None:
  1103. df.insert(0, "时间", time_column)
  1104. # 构建返回数据,只返回文件路径
  1105. return jsonify({
  1106. 'status': 200,
  1107. 'msg': '补全完成',
  1108. 'data': df.to_dict(orient='records')
  1109. })
  1110. except Exception as e:
  1111. return jsonify({
  1112. 'status': 500,
  1113. 'msg': str(e)
  1114. })
  1115. '''1补全:2中位数填充'''
  1116. @app.route('/upper', methods=['POST'])
  1117. def process_data_upper():
  1118. try:
  1119. data = request.json
  1120. # 获取URL并下载文件
  1121. url = data.get('url', '')
  1122. result = data.get('result', '')
  1123. response = requests.get(url)
  1124. # 检查下载是否成功
  1125. if response.status_code != 200:
  1126. error_message = {'status': 500, 'message': 'Failed to download CSV data'}
  1127. return jsonify(error_message)
  1128. # 将 CSV 数据转换为 Pandas DataFrame
  1129. response.encoding = 'UTF-8'
  1130. csv_data = StringIO(response.text)
  1131. df = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
  1132. if "时间" in df.columns:
  1133. time_column = df["时间"].values # 获取时间列
  1134. # 过滤掉“时间”列,只保留数值列
  1135. df = df.drop(columns="时间")
  1136. else:
  1137. time_column = None
  1138. other_columns = df
  1139. # 分离数值型数据
  1140. numerical_cols = df.select_dtypes(include=[np.number]).columns
  1141. # 填充数值型数据的缺失值,使用中位数填充
  1142. for col in numerical_cols:
  1143. df[col].fillna(df[col].median(), inplace=True)
  1144. # 分离分类型数据
  1145. categorical_cols = df.select_dtypes(include=['object']).columns
  1146. # 填充分类型数据的缺失值,使用众数填充
  1147. for col in categorical_cols:
  1148. df[col].fillna(df[col].mode()[0], inplace=True)
  1149. # 构建返回数据,只返回文件路径
  1150. return jsonify({
  1151. 'status': 200,
  1152. 'msg': '补全完成',
  1153. 'data': df.to_dict(orient='records')
  1154. })
  1155. except Exception as e:
  1156. return jsonify({
  1157. 'status': 500,
  1158. 'msg': str(e)
  1159. })
  1160. '''1补全:3线性插值'''
  1161. def linear_interpolate(signal):
  1162. """ Perform linear interpolation to fill missing data (NaN values). """
  1163. return signal.interpolate(method='linear', limit_direction='both')
  1164. @app.route('/line', methods=['POST'])
  1165. def process_data_line():
  1166. try:
  1167. data = request.json
  1168. # 获取URL并下载文件
  1169. url = data.get('url', '')
  1170. response = requests.get(url)
  1171. # 检查下载是否成功
  1172. if response.status_code != 200:
  1173. error_message = {'status': 500, 'message': 'Failed to download CSV data'}
  1174. return jsonify(error_message)
  1175. # 将 CSV 数据转换为 Pandas DataFrame
  1176. response.encoding = 'UTF-8'
  1177. csv_data = StringIO(response.text)
  1178. noisy_signal = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
  1179. # 处理异常值(如空字符串、None)
  1180. noisy_signal = noisy_signal.replace([None, '', 'NaN'], np.nan)
  1181. # 如果“时间”列存在,分离时间列
  1182. if "时间" in noisy_signal.columns:
  1183. time_column = noisy_signal["时间"].values # 获取时间列
  1184. # 过滤掉“时间”列,只保留数值列
  1185. other_columns = noisy_signal.drop(columns="时间")
  1186. else:
  1187. time_column = None
  1188. other_columns = noisy_signal
  1189. # 创建插值后的 DataFrame
  1190. interpolated_signals = pd.DataFrame()
  1191. # 遍历所有数值列并应用插值
  1192. for column in other_columns.columns:
  1193. try:
  1194. # 确保列数据是数值类型
  1195. column_data = pd.to_numeric(other_columns[column], errors='coerce')
  1196. interpolated_signals[column] = linear_interpolate(column_data)
  1197. except Exception as e:
  1198. print(f"处理列 {column} 时出错: {e}")
  1199. interpolated_signals[column] = other_columns[column] # 如果出错,保留原数据
  1200. # 如果时间列存在,将其添加回去
  1201. if time_column is not None:
  1202. interpolated_signals.insert(0, "时间", time_column)
  1203. # 返回插值后的数据
  1204. return jsonify({
  1205. 'status': 200,
  1206. 'msg': '补全完成',
  1207. 'data': interpolated_signals.to_dict(orient='records')
  1208. })
  1209. except Exception as e:
  1210. return jsonify({
  1211. 'status': 500,
  1212. 'msg': str(e)
  1213. })
  1214. # 移动平均法
  1215. def moving_average(signal, window_size):
  1216. return np.convolve(signal, np.ones(window_size) / window_size, mode='same')
  1217. @app.route('/movave', methods=['POST'])
  1218. def set_file_address1():
  1219. try:
  1220. data = request.json
  1221. # 获取URL并下载文件
  1222. url = data.get('url', '')
  1223. result = data.get('result', '')
  1224. response = requests.get(url)
  1225. response.raise_for_status() # 检查请求是否成功
  1226. # 使用BytesIO读取文件内容
  1227. response.encoding = 'UTF-8'
  1228. csv_data = StringIO(response.text)
  1229. noisy_signal = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
  1230. # 如果“时间”列存在,分离时间列
  1231. if "时间" in noisy_signal.columns:
  1232. time_column = noisy_signal["时间"].values # 获取时间列
  1233. # 过滤掉“时间”列,只保留数值列
  1234. other_columns = noisy_signal.drop(columns="时间")
  1235. else:
  1236. time_column = None
  1237. other_columns = noisy_signal
  1238. # 应用去噪方法
  1239. window_size = 10 # 窗口宽度
  1240. denoised_signals = pd.DataFrame()
  1241. for column in other_columns.columns:
  1242. denoised_signals[column] = moving_average(other_columns[column].values, window_size)
  1243. # 如果有时间列,将其添加回去
  1244. if time_column is not None:
  1245. denoised_signals.insert(0, '时间', time_column)
  1246. return jsonify({'status': 200,
  1247. 'msg': '去噪完成',
  1248. 'data': denoised_signals.to_dict(orient='records')})
  1249. except Exception as e:
  1250. return jsonify({
  1251. 'status': 500,
  1252. 'msg': str(e)
  1253. })
  1254. # 傅里叶变换去噪
  1255. def fft_denoise(signal, cutoff_freq, fs):
  1256. signal_fft = fft(signal)
  1257. frequencies = np.fft.fftfreq(len(signal), 1/fs)
  1258. signal_fft[np.abs(frequencies) > cutoff_freq] = 0
  1259. return ifft(signal_fft).real
  1260. @app.route('/fft', methods=['POST'])
  1261. def process_data_fft():
  1262. try:
  1263. data = request.json
  1264. # 获取URL并下载文件
  1265. url = data.get('url', '')
  1266. result=data.get('result','')
  1267. response = requests.get(url)
  1268. response.raise_for_status() # 检查请求是否成功
  1269. response.encoding = 'UTF-8'
  1270. csv_data = StringIO(response.text)
  1271. noisy_signal = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
  1272. fs = 1000 # 采样频率
  1273. cutoff_freq = 10 # 截止频率
  1274. # 如果“时间”列存在,分离时间列
  1275. if "时间" in noisy_signal.columns:
  1276. time_column = noisy_signal["时间"].values # 获取时间列
  1277. # 过滤掉“时间”列,只保留数值列
  1278. other_columns = noisy_signal.drop(columns="时间")
  1279. else:
  1280. time_column = None
  1281. other_columns = noisy_signal
  1282. # 创建去噪后的 DataFrame
  1283. denoised_signals = pd.DataFrame()
  1284. # 遍历所有数值列并应用去噪
  1285. for column in other_columns.columns:
  1286. try:
  1287. # 确保列数据是数值类型
  1288. column_data = pd.to_numeric(other_columns[column], errors='coerce')
  1289. denoised_signals[column] = fft_denoise(column_data.values, cutoff_freq, fs)
  1290. except Exception as e:
  1291. print(f"处理列 {column} 时出错: {e}")
  1292. denoised_signals[column] = other_columns[column] # 如果出错,保留原数据
  1293. # 如果时间列存在,将其添加回去
  1294. if time_column is not None:
  1295. denoised_signals.insert(0, "时间", time_column)
  1296. return jsonify({ 'status': 200,
  1297. 'msg': '去噪完成',
  1298. 'data':denoised_signals.to_dict(orient='records')})
  1299. except Exception as e:
  1300. return jsonify({
  1301. 'status': 500,
  1302. 'msg': str(e)
  1303. })
  1304. def augment_data1(data, sample_factor=2):
  1305. """
  1306. Augment the data by randomly sampling rows and adding them back.
  1307. sample_factor determines how many times the data should be sampled.
  1308. """
  1309. augmented_data = []
  1310. # Calculate the number of samples to create
  1311. num_samples = len(data) * sample_factor
  1312. # Randomly sample from the original data
  1313. for _ in range(num_samples):
  1314. sampled_row = random.choice(data)
  1315. augmented_data.append(sampled_row)
  1316. return augmented_data
  1317. @app.route('/sjcy', methods=['POST'])
  1318. def process_data_augmentation1():
  1319. try:
  1320. data = request.json
  1321. # 获取URL并下载文件
  1322. url = data.get('url', '')
  1323. response = requests.get(url)
  1324. response.raise_for_status() # 检查请求是否成功
  1325. response.encoding = 'UTF-8'
  1326. csv_data = StringIO(response.text)
  1327. signal_data = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
  1328. # If the "时间" column exists, separate it
  1329. if "时间" in signal_data.columns:
  1330. time_column = signal_data["时间"].values # Extract time column
  1331. signal_data = signal_data.drop(columns="时间") # Remove time column for processing
  1332. else:
  1333. time_column = None
  1334. # Apply random sampling-based data augmentation
  1335. augmented_signals = augment_data1(signal_data.to_dict(orient='records'))
  1336. # Convert augmented data back to a DataFrame
  1337. augmented_df = pd.DataFrame(augmented_signals)
  1338. # Apply a limit to the number of rows in the response
  1339. max_rows = 1000 # Adjust this as needed
  1340. if len(augmented_df) > max_rows:
  1341. augmented_df = augmented_df.head(max_rows)
  1342. # Ensure that the time column length matches the augmented data length
  1343. if time_column is not None:
  1344. if len(time_column) != len(augmented_df):
  1345. time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
  1346. augmented_df.insert(0, "时间", time_column)
  1347. # Combine the original and augmented data (append the augmented data below the original data)
  1348. combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
  1349. # Prepare response
  1350. result = {
  1351. 'status': 200,
  1352. 'msg': '数据扩充完成',
  1353. 'data': combined_df.to_dict(orient='records')
  1354. }
  1355. return jsonify(result)
  1356. except Exception as e:
  1357. return jsonify({
  1358. 'status': 500,
  1359. 'msg': str(e)
  1360. })
  1361. # 2、数据扰动算法
  1362. # 对数据集中的值进行小幅度的随机变化,以生成新的数据点,增加数据集的多样性。
  1363. def augment_data2(data, noise_factor=0.01, perturb_factor=0.05):
  1364. """
  1365. Augment the data by applying small perturbations or noise.
  1366. noise_factor controls the magnitude of random noise added to the data.
  1367. perturb_factor controls how much random perturbation is applied.
  1368. """
  1369. augmented_data = []
  1370. for row in data:
  1371. augmented_row = {}
  1372. for column, value in row.items():
  1373. # Convert to numeric if it's not already (ignores non-numeric columns like "时间")
  1374. try:
  1375. value = float(value)
  1376. except ValueError:
  1377. augmented_row[column] = value # Keep non-numeric columns (e.g., time column) unchanged
  1378. continue
  1379. # Apply random noise within a perturbation range
  1380. perturbation = random.uniform(1 - perturb_factor, 1 + perturb_factor) # Random scaling factor
  1381. noisy_value = value * perturbation # Apply the perturbation
  1382. # Optionally, add some Gaussian noise on top of the perturbation
  1383. noise = random.gauss(0, noise_factor) # Gaussian noise
  1384. augmented_row[column] = noisy_value + noise
  1385. augmented_data.append(augmented_row)
  1386. return augmented_data
  1387. @app.route('/sjrd', methods=['POST'])
  1388. def process_data_augmentation2():
  1389. try:
  1390. data = request.json
  1391. # 获取URL并下载文件
  1392. url = data.get('url', '')
  1393. response = requests.get(url)
  1394. response.raise_for_status() # 检查请求是否成功
  1395. response.encoding = 'UTF-8'
  1396. csv_data = StringIO(response.text)
  1397. signal_data = pd.read_csv(csv_data, header=0,encoding='utf-8')
  1398. # If the "时间" column exists, separate it
  1399. if "时间" in signal_data.columns:
  1400. time_column = signal_data["时间"].values # Extract time column
  1401. signal_data = signal_data.drop(columns="时间") # Remove time column for processing
  1402. else:
  1403. time_column = None
  1404. # Apply data perturbation-based augmentation
  1405. augmented_signals = augment_data2(signal_data.to_dict(orient='records'))
  1406. # Convert augmented data back to a DataFrame
  1407. augmented_df = pd.DataFrame(augmented_signals)
  1408. # Apply a limit to the number of rows in the response
  1409. max_rows = 1000 # Adjust this as needed
  1410. if len(augmented_df) > max_rows:
  1411. augmented_df = augmented_df.head(max_rows)
  1412. # Ensure that the time column length matches the augmented data length
  1413. if time_column is not None:
  1414. if len(time_column) != len(augmented_df):
  1415. time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
  1416. augmented_df.insert(0, "时间", time_column)
  1417. # Combine the original and augmented data (append the augmented data below the original data)
  1418. combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
  1419. # Prepare response
  1420. result = {
  1421. 'status': 200,
  1422. 'msg': '数据扩充完成',
  1423. 'data': combined_df.to_dict(orient='records')
  1424. }
  1425. return jsonify(result)
  1426. except Exception as e:
  1427. return jsonify({
  1428. 'status': 500,
  1429. 'msg': str(e)
  1430. })
  1431. # 3、Wavelet变换算法
  1432. # 使用小波变换对信号或图像进行多分辨率分析,可以用于数据压缩或去噪。
  1433. def augment_data_with_wavelet(data, wavelet='db1', level=2, noise_factor=0.01):
  1434. """
  1435. Augment the data by applying wavelet transform, perturbing the coefficients, and performing inverse transform.
  1436. :param data: The original data.
  1437. :param wavelet: The type of wavelet to use (e.g., 'db1', 'haar', etc.).
  1438. :param level: The level of decomposition in the wavelet transform.
  1439. :param noise_factor: The standard deviation of noise to add to the high-frequency coefficients.
  1440. :return: The augmented data after wavelet-based perturbation.
  1441. """
  1442. augmented_data = []
  1443. for row in data:
  1444. augmented_row = {}
  1445. for column, value in row.items():
  1446. try:
  1447. value = float(value)
  1448. except ValueError:
  1449. augmented_row[column] = value # Keep non-numeric columns unchanged
  1450. continue
  1451. # Perform wavelet decomposition on the value (reshape to 1D array if necessary)
  1452. coeffs = pywt.wavedec([value], wavelet, level=level)
  1453. # Perturb the high-frequency coefficients (detail coefficients) with noise
  1454. for i in range(1, len(coeffs)):
  1455. coeffs[i] += np.random.normal(0, noise_factor, size=coeffs[i].shape)
  1456. # Reconstruct the signal after perturbation
  1457. perturbed_value = pywt.waverec(coeffs, wavelet)[0]
  1458. augmented_row[column] = perturbed_value
  1459. augmented_data.append(augmented_row)
  1460. return augmented_data
  1461. @app.route('/wal', methods=['POST'])
  1462. def process_data_augmentation3():
  1463. try:
  1464. data = request.json
  1465. # 获取URL并下载文件
  1466. url = data.get('url', '')
  1467. response = requests.get(url)
  1468. response.raise_for_status() # 检查请求是否成功
  1469. response.encoding = 'UTF-8'
  1470. csv_data = StringIO(response.text)
  1471. signal_data = pd.read_csv(csv_data, header=0, encoding='utf-8')
  1472. # If the "时间" column exists, separate it
  1473. if "时间" in signal_data.columns:
  1474. time_column = signal_data["时间"].values # Extract time column
  1475. signal_data = signal_data.drop(columns="时间") # Remove time column for processing
  1476. else:
  1477. time_column = None
  1478. # Apply wavelet-based data augmentation
  1479. augmented_signals = augment_data_with_wavelet(signal_data.to_dict(orient='records'))
  1480. # Convert augmented data back to a DataFrame
  1481. augmented_df = pd.DataFrame(augmented_signals)
  1482. # Apply a limit to the number of rows in the response
  1483. max_rows = 1000 # Adjust this as needed
  1484. if len(augmented_df) > max_rows:
  1485. augmented_df = augmented_df.head(max_rows)
  1486. # Ensure that the time column length matches the augmented data length
  1487. if time_column is not None:
  1488. if len(time_column) != len(augmented_df):
  1489. time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
  1490. augmented_df.insert(0, "时间", time_column)
  1491. # Combine the original and augmented data (append the augmented data below the original data)
  1492. combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
  1493. # Prepare response
  1494. result = {
  1495. 'status': 200,
  1496. 'msg': '数据扩充完成',
  1497. 'data': combined_df.to_dict(orient='records')
  1498. }
  1499. return jsonify(result)
  1500. except Exception as e:
  1501. return jsonify({
  1502. 'status': 500,
  1503. 'msg': str(e)
  1504. })
  1505. # 4、小波系数扰动算法
  1506. # 在小波变换的基础上,对小波系数进行扰动,以生成新的数据表示,用于数据增强。
  1507. def augment_data_with_wavelet_coeffs(data, wavelet='db1', level=2, noise_factor=0.01, scale_factor=0.05):
  1508. """
  1509. Augment the data by perturbing the wavelet coefficients.
  1510. :param data: The original data.
  1511. :param wavelet: The type of wavelet to use (e.g., 'db1', 'haar', etc.).
  1512. :param level: The level of decomposition in the wavelet transform.
  1513. :param noise_factor: The standard deviation of noise to add to the high-frequency coefficients.
  1514. :param scale_factor: The factor to scale the wavelet coefficients.
  1515. :return: The augmented data after perturbing the wavelet coefficients.
  1516. """
  1517. augmented_data = []
  1518. for row in data:
  1519. augmented_row = {}
  1520. for column, value in row.items():
  1521. try:
  1522. value = float(value)
  1523. except ValueError:
  1524. augmented_row[column] = value # Keep non-numeric columns unchanged
  1525. continue
  1526. # Perform wavelet decomposition on the value (reshape to 1D array if necessary)
  1527. coeffs = pywt.wavedec([value], wavelet, level=level)
  1528. # Perturb the coefficients (add noise and/or scale)
  1529. for i in range(1, len(coeffs)):
  1530. # Add noise to the high-frequency coefficients (i > 0)
  1531. coeffs[i] += np.random.normal(0, noise_factor, size=coeffs[i].shape)
  1532. # Optionally, scale the coefficients to create variation
  1533. coeffs[i] *= (1 + np.random.uniform(-scale_factor, scale_factor))
  1534. # Reconstruct the signal after perturbing the coefficients
  1535. perturbed_value = pywt.waverec(coeffs, wavelet)[0]
  1536. augmented_row[column] = perturbed_value
  1537. augmented_data.append(augmented_row)
  1538. return augmented_data
  1539. @app.route('/xbrd', methods=['POST'])
  1540. def process_data_augmentation4():
  1541. try:
  1542. data = request.json
  1543. # 获取URL并下载文件
  1544. url = data.get('url', '')
  1545. response = requests.get(url)
  1546. response.raise_for_status() # 检查请求是否成功
  1547. response.encoding = 'UTF-8'
  1548. csv_data = StringIO(response.text)
  1549. signal_data = pd.read_csv(csv_data, header=0, encoding='utf-8')
  1550. # If the "时间" column exists, separate it
  1551. if "时间" in signal_data.columns:
  1552. time_column = signal_data["时间"].values # Extract time column
  1553. signal_data = signal_data.drop(columns="时间") # Remove time column for processing
  1554. else:
  1555. time_column = None
  1556. # Apply wavelet-based data augmentation
  1557. augmented_signals = augment_data_with_wavelet_coeffs(signal_data.to_dict(orient='records'))
  1558. # Convert augmented data back to a DataFrame
  1559. augmented_df = pd.DataFrame(augmented_signals)
  1560. # Apply a limit to the number of rows in the response
  1561. max_rows = 1000 # Adjust this as needed
  1562. if len(augmented_df) > max_rows:
  1563. augmented_df = augmented_df.head(max_rows)
  1564. # Ensure that the time column length matches the augmented data length
  1565. if time_column is not None:
  1566. if len(time_column) != len(augmented_df):
  1567. time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
  1568. augmented_df.insert(0, "时间", time_column)
  1569. # Combine the original and augmented data (append the augmented data below the original data)
  1570. combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
  1571. # Prepare response
  1572. result = {
  1573. 'status': 200,
  1574. 'msg': '数据扩充完成',
  1575. 'data': combined_df.to_dict(orient='records')
  1576. }
  1577. return jsonify(result)
  1578. except Exception as e:
  1579. return jsonify({
  1580. 'status': 500,
  1581. 'msg': str(e)
  1582. })
  1583. # 5、小波线性插值算法
  1584. #结合小波变换和线性插值技术,用于数据的插值和平滑处理,可以用于数据扩充。
  1585. def wavelet_linear_interpolation(data, wavelet='db1', level=2):
  1586. """
  1587. Apply wavelet-based linear interpolation on the data to augment it.
  1588. :param data: The original data.
  1589. :param wavelet: The type of wavelet to use (e.g., 'db1', 'haar', etc.).
  1590. :param level: The level of decomposition in the wavelet transform.
  1591. :return: The augmented data after wavelet-based interpolation.
  1592. """
  1593. augmented_data = []
  1594. for row in data:
  1595. augmented_row = {}
  1596. for column, value in row.items():
  1597. try:
  1598. value = float(value)
  1599. except ValueError:
  1600. augmented_row[column] = value # Keep non-numeric columns unchanged
  1601. continue
  1602. # Perform wavelet decomposition on the value (reshape to 1D array if necessary)
  1603. coeffs = pywt.wavedec([value], wavelet, level=level)
  1604. # Perform linear interpolation on the low-frequency (approximated) coefficients
  1605. low_freq = coeffs[0] # Low-frequency component (approximated signal)
  1606. high_freqs = coeffs[1:] # High-frequency components (details)
  1607. # Check the length of low_freq before interpolation
  1608. if len(low_freq) < 2:
  1609. augmented_row[column] = value # If too short, return original value
  1610. continue
  1611. # Linear interpolation on the low-frequency coefficients
  1612. interp_func = interp1d(np.arange(len(low_freq)), low_freq, kind='linear', fill_value='extrapolate')
  1613. interpolated_low_freq = interp_func(np.linspace(0, len(low_freq)-1, num=len(low_freq) * 2)) # Insert points
  1614. # Linear interpolation on high-frequency coefficients (details)
  1615. interpolated_high_freqs = []
  1616. for freq in high_freqs:
  1617. if len(freq) < 2: # Skip if the frequency component has less than 2 points
  1618. interpolated_high_freqs.append(freq)
  1619. continue
  1620. interp_func = interp1d(np.arange(len(freq)), freq, kind='linear', fill_value='extrapolate')
  1621. interpolated_high_freq = interp_func(np.linspace(0, len(freq)-1, num=len(freq) * 2)) # Insert points
  1622. interpolated_high_freqs.append(interpolated_high_freq)
  1623. # Combine the interpolated low-frequency and high-frequency coefficients
  1624. new_coeffs = [interpolated_low_freq] + interpolated_high_freqs
  1625. # Reconstruct the signal after interpolation
  1626. perturbed_value = pywt.waverec(new_coeffs, wavelet)[0]
  1627. augmented_row[column] = perturbed_value
  1628. augmented_data.append(augmented_row)
  1629. return augmented_data
  1630. @app.route('/xbcz', methods=['POST'])
  1631. def process_data_augmentation5():
  1632. try:
  1633. data = request.json
  1634. # 获取URL并下载文件
  1635. url = data.get('url', '')
  1636. response = requests.get(url)
  1637. response.raise_for_status() # 检查请求是否成功
  1638. response.encoding = 'UTF-8'
  1639. csv_data = StringIO(response.text)
  1640. signal_data = pd.read_csv(csv_data, header=0,encoding='utf-8')
  1641. # If the "时间" column exists, separate it
  1642. if "时间" in signal_data.columns:
  1643. time_column = signal_data["时间"].values # Extract time column
  1644. signal_data = signal_data.drop(columns="时间") # Remove time column for processing
  1645. else:
  1646. time_column = None
  1647. # Apply wavelet-based linear interpolation data augmentation
  1648. augmented_signals = wavelet_linear_interpolation(signal_data.to_dict(orient='records'))
  1649. # Convert augmented data back to a DataFrame
  1650. augmented_df = pd.DataFrame(augmented_signals)
  1651. # Apply a limit to the number of rows in the response
  1652. max_rows = 1000 # Adjust this as needed
  1653. if len(augmented_df) > max_rows:
  1654. augmented_df = augmented_df.head(max_rows)
  1655. # Ensure that the time column length matches the augmented data length
  1656. if time_column is not None:
  1657. if len(time_column) != len(augmented_df):
  1658. time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
  1659. augmented_df.insert(0, "时间", time_column)
  1660. # Combine the original and augmented data (append the augmented data below the original data)
  1661. combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
  1662. # Prepare response
  1663. result = {
  1664. 'status': 200,
  1665. 'msg': '数据扩充完成',
  1666. 'data': combined_df.to_dict(orient='records')
  1667. }
  1668. return jsonify(result)
  1669. except Exception as e:
  1670. return jsonify({
  1671. 'status': 500,
  1672. 'msg': str(e)
  1673. })
  1674. #6添加噪声
  1675. def augment_data(data, noise_factor=0.01):
  1676. augmented_data = []
  1677. # Iterate over each row of the data and apply noise
  1678. for row in data:
  1679. augmented_row = {}
  1680. for column, value in row.items():
  1681. # Convert to numeric if it's not already (ignores non-numeric columns like "时间")
  1682. try:
  1683. value = float(value)
  1684. except ValueError:
  1685. augmented_row[column] = value # Keep non-numeric columns (e.g., time column) unchanged
  1686. continue
  1687. # Apply Gaussian noise
  1688. noise = random.gauss(0, noise_factor) # Add Gaussian noise
  1689. noisy_value = value + noise
  1690. augmented_row[column] = noisy_value
  1691. augmented_data.append(augmented_row)
  1692. return augmented_data
  1693. @app.route('/zs', methods=['POST'])
  1694. def process_data_augmentation6():
  1695. try:
  1696. data = request.json
  1697. # 获取URL并下载文件
  1698. url = data.get('url', '')
  1699. response = requests.get(url)
  1700. response.raise_for_status() # 检查请求是否成功
  1701. response.encoding = 'UTF-8'
  1702. csv_data = StringIO(response.text)
  1703. signal_data = pd.read_csv(csv_data, header=0,encoding='utf-8')
  1704. # If the "时间" column exists, separate it
  1705. if "时间" in signal_data.columns:
  1706. time_column = signal_data["时间"].values # Extract time column
  1707. signal_data = signal_data.drop(columns="时间") # Remove time column for processing
  1708. else:
  1709. time_column = None
  1710. # Apply data augmentation (only noise)
  1711. augmented_signals = augment_data(signal_data.to_dict(orient='records'))
  1712. # Convert augmented data back to a DataFrame
  1713. augmented_df = pd.DataFrame(augmented_signals)
  1714. # Ensure that the time column length matches the augmented data length
  1715. if time_column is not None:
  1716. # If time_column exists, we can duplicate it to match the augmented data length
  1717. if len(time_column) != len(augmented_df):
  1718. time_column = np.tile(time_column, len(augmented_df) // len(time_column) + 1)[:len(augmented_df)]
  1719. augmented_df.insert(0, "时间", time_column)
  1720. # Combine the original and augmented data (append the augmented data below the original data)
  1721. combined_df = pd.concat([signal_data, augmented_df], ignore_index=True)
  1722. # Prepare response
  1723. result = {
  1724. 'status': 200,
  1725. 'msg': '数据扩充完成',
  1726. 'data': combined_df.to_dict(orient='records')
  1727. }
  1728. return jsonify(result)
  1729. except Exception as e:
  1730. return jsonify({
  1731. 'status': 500,
  1732. 'msg': str(e)
  1733. })
  1734. def process_paiwen(df):
  1735. # 从请求中获取数据
  1736. df=np.array(df)
  1737. youmen =df[1:, 0]
  1738. paiwen = df[1:, 1]
  1739. fs = 1000
  1740. # 采样点数
  1741. num_fft = len(youmen) * 4
  1742. index = 1
  1743. plot_index = 0
  1744. str_res1 = '不存在摆动数据'
  1745. str_res2 = '摆动位置为:'
  1746. pos_res = []
  1747. for i in range(len(youmen)):
  1748. if index == 1:
  1749. paiwen_res = []
  1750. youmen_res = []
  1751. paiwen_res.append(paiwen[i])
  1752. youmen_res.append(youmen[i])
  1753. index += 1
  1754. else:
  1755. if abs(youmen[i] - youmen[i - 1]) < 1 and youmen[i] > 1:
  1756. paiwen_res.append(paiwen[i])
  1757. youmen_res.append(youmen[i])
  1758. index += 1
  1759. else:
  1760. if index > 300:
  1761. pos_res.append(i)
  1762. pos_res.append(index)
  1763. paiwen_res = paiwen_res[100:]
  1764. youmen_res = youmen_res[100:]
  1765. mean = np.mean(paiwen_res)
  1766. std = np.std(paiwen_res)
  1767. data = (paiwen_res - mean) / std
  1768. mean2 = np.mean(youmen_res)
  1769. std2 = np.std(youmen_res)
  1770. data2 = (youmen_res - mean2) / std2
  1771. # b, a = signal.butter(data, [1, 5], 'low', analog=True)
  1772. # w, h = signal.freqs(b, a)
  1773. # plt.semilogx(w, 20 * np.log10(abs(h)))
  1774. Y = fft(data, num_fft)
  1775. # np.savetxt('I:\\灵巧\\'+str(i)+'Y.csv', Y, delimiter=',')
  1776. Y = np.abs(Y)
  1777. res = 20 * np.log10(Y[:num_fft // 2])
  1778. # np.savetxt('I:\\灵巧\\'+str(i)+'res.csv', res, delimiter=',')
  1779. # ax = plt.subplot(512)
  1780. # ax.set_title('fft transform')
  1781. index2 = 0
  1782. for m in range(60):
  1783. index_j = 0
  1784. for j in range(998):
  1785. if (Y[j + 1] - m) * (Y[j + 2] - m) < 0:
  1786. index_j += 1
  1787. if index_j > index2:
  1788. index2 = index_j
  1789. if index2 > 10:
  1790. str_res2 = str_res2 + str(i - index + 101) + '-' + str(i) + ';'
  1791. k = np.arange(i - index + 101, i, 1)
  1792. if plot_index == 0:
  1793. plt.plot(k, paiwen_res, 'r', zorder=2, label='摆动数据')
  1794. plot_index = 1
  1795. if plot_index == 1:
  1796. plt.plot(k, paiwen_res, 'r', zorder=2)
  1797. # plt.plot(youmen_res)
  1798. # plt.show()
  1799. index = 1
  1800. plt.plot(paiwen, 'b', zorder=1, label='正常数据')
  1801. plt.legend()
  1802. plt.savefig('baidong.jpg', dpi=600)
  1803. if plot_index == 1:
  1804. str_res = str_res2
  1805. if plot_index == 0:
  1806. str_res = str_res1
  1807. return str_res
  1808. # 排气温度摆动
  1809. @app.route('/pwbd', methods=['POST'])
  1810. def process_data_pqbd():
  1811. try:
  1812. # 获取请求数据
  1813. data = request.json
  1814. url = data.get('url', '')
  1815. # 读取 CSV 数据
  1816. df = read_csv_from_url(url)
  1817. if df is None:
  1818. return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
  1819. str_res=process_paiwen(df)
  1820. file_path = 'baidong.jpg'
  1821. files = {'file': open(file_path, 'rb')}
  1822. response = requests.post('http://localhost:9090/als/resource/oss/upload', files=files)
  1823. files['file'].close()
  1824. res = json.loads(response.text)
  1825. print(res)
  1826. if res["code"] == 200:
  1827. data1 = {"result": str_res, "ossId": res.get('data', {}).get('ossId', None)}
  1828. result = {'status': 200, 'data': data1} # 最终返回结果
  1829. return jsonify(result)
  1830. except Exception as e:
  1831. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  1832. def process_maoci(df):
  1833. data=np.array(df)
  1834. data=data[1:,:]
  1835. data0= data[
  1836. np.where(
  1837. (data[:, 1] == 1)
  1838. )
  1839. ]
  1840. maoci=data0[:,0]
  1841. mean=np.mean(maoci)
  1842. str_res1 = '不存在毛刺数据'
  1843. str_res2 = '毛刺数据位置为:'
  1844. time_list=np.where((data[:, 1] == 1) & (abs(data[:, 0]- mean)>1))
  1845. time_list=np.array(time_list)[0,:]
  1846. plt.figure()
  1847. plt.scatter(time_list,data[time_list,0],c='r', marker='o', zorder=1,label='毛刺数据')
  1848. plt.plot(data[:, 0], c='b', zorder=2, label='正常数据')
  1849. plt.legend()
  1850. plt.savefig('maoci.jpg', dpi=600)
  1851. if time_list.shape[0] > 0:
  1852. for i in time_list:
  1853. str_res2 = str_res2 + str(i) + ';'
  1854. str_res = str_res2
  1855. else:
  1856. str_res = str_res1
  1857. return str_res
  1858. # 毛刺检测
  1859. @app.route('/maoci', methods=['POST'])
  1860. def process_data_maoci():
  1861. try:
  1862. # 获取请求数据
  1863. data = request.json
  1864. url = data.get('url', '')
  1865. # 读取 CSV 数据
  1866. df = read_csv_from_url(url)
  1867. if df is None:
  1868. return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
  1869. str_res=process_maoci(df)
  1870. file_path = 'maoci.jpg'
  1871. files = {'file': open(file_path, 'rb')}
  1872. response = requests.post('http://localhost:9090/als/resource/oss/upload', files=files)
  1873. files['file'].close()
  1874. res = json.loads(response.text)
  1875. print(res)
  1876. if res["code"] == 200:
  1877. data1 = {"result": str_res, "ossId": res.get('data', {}).get('ossId', None)}
  1878. result = {'status': 200, 'data': data1} # 最终返回结果
  1879. return jsonify(result)
  1880. except Exception as e:
  1881. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  1882. def process_tiaobian(df):
  1883. youliang = np.array(df)
  1884. youliang =youliang[1:]
  1885. index = 0
  1886. plot_index = 0
  1887. str_res1 = '不存在跳变数据'
  1888. str_res2 = '跳变位置为:'
  1889. for i in np.arange(50, len(youliang) - 50):
  1890. if (youliang[i + index + 1] - youliang[i]) / youliang[i] > 0.5:
  1891. index += 1
  1892. if index > 0 and (youliang[i + index + 1] - youliang[i]) / youliang[i] < 0.5:
  1893. str_res2 = str_res2 + str(i - index - 1) + '-' + str(i) + ';'
  1894. k = np.arange(i - index - 1, i + 1, 1)
  1895. if plot_index == 0:
  1896. plt.plot(k, youliang[i - index - 1: i + 1], 'r', zorder=2, label='跳变数据')
  1897. plot_index = 1
  1898. if plot_index == 1:
  1899. plt.plot(k, youliang[i - index - 1: i + 1], 'r', zorder=2)
  1900. index = 0
  1901. plt.plot(youliang, 'b', zorder=1, label='正常数据')
  1902. plt.legend()
  1903. plt.savefig('tiaobian.jpg', dpi=600)
  1904. if plot_index == 1:
  1905. str_res = str_res2
  1906. if plot_index == 0:
  1907. str_res = str_res1
  1908. return str_res
  1909. # 数据跳变
  1910. @app.route('/tiaobian', methods=['POST'])
  1911. def process_data_tiaobian():
  1912. try:
  1913. # 获取请求数据
  1914. data = request.json
  1915. url = data.get('url', '')
  1916. # 读取 CSV 数据
  1917. df = read_csv_from_url(url)
  1918. if df is None:
  1919. return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
  1920. str_res=process_tiaobian(df)
  1921. file_path = 'tiaobian.jpg'
  1922. files = {'file': open(file_path, 'rb')}
  1923. response = requests.post('http://localhost:9090/als/resource/oss/upload', files=files)
  1924. files['file'].close()
  1925. res = json.loads(response.text)
  1926. print(res)
  1927. if res["code"] == 200:
  1928. data1 = {"result": str_res, "ossId": res.get('data', {}).get('ossId', None)}
  1929. result = {'status': 200, 'data': data1} # 最终返回结果
  1930. return jsonify(result)
  1931. except Exception as e:
  1932. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  1933. # 数据跳变
  1934. def process_tuihua_YNQ(df):
  1935. data=df
  1936. a_4 = data[
  1937. np.where(
  1938. (data[:, 7] > 1800)&(data[:, 7] < 2150)
  1939. )
  1940. ]
  1941. plt.plot(a_4[:,2], label='氧气浓缩器退化数据')
  1942. plt.savefig('tuihua_YNQ.jpg', dpi=600)
  1943. @app.route('/tuihua_YNQ', methods=['POST'])
  1944. def process_data_tuihua_YNQ():
  1945. try:
  1946. # 获取请求数据
  1947. data = request.json
  1948. urls = data.get('urls', '')
  1949. # 读取 CSV 数据
  1950. df = read_csv_from_urls(urls)
  1951. if df is None:
  1952. return jsonify({'status': 500, 'data': None, 'msg': '数据获取失败'})
  1953. process_tuihua_YNQ(df)
  1954. file_path = 'tuihua_YNQ.jpg'
  1955. files = {'file': open(file_path, 'rb')}
  1956. response = requests.post('http://localhost:9090/als/resource/oss/upload', files=files)
  1957. files['file'].close()
  1958. res = json.loads(response.text)
  1959. if res["code"] == 200:
  1960. data1 = { "ossId": res.get('data', {}).get('ossId', None)}
  1961. result = {'status': 200, 'data': data1} # 最终返回结果
  1962. return jsonify(result)
  1963. except Exception as e:
  1964. return jsonify({'status': 500, 'data': None, 'msg': str(e)})
  1965. # 主程序入口
  1966. if __name__ == "__main__":
  1967. app.run(debug=True, port=8888)