actions.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. from pathlib import Path
  2. import json
  3. import pandas as pd
  4. import requests
  5. from io import StringIO
  6. from flask import Flask, jsonify, request
  7. app = Flask(__name__)
  8. # 获取当前文件的父目录的父目录
  9. current_dir = Path(__file__).parent
  10. json_path = current_dir.parent / "config" / "config.json"
  11. # 读取 JSON
  12. with open(json_path, "r", encoding="utf-8") as f:
  13. config = json.load(f)
  14. headers = config['mutationFailures']
  15. # 故障分数公式为 60 - 60 * A(A)为系数
  16. # 逻辑判断函数
  17. def check_abnormal_headers(dataframe):
  18. abnormal_headers = []
  19. headers = config['logicalFailures']
  20. # 遍历所有列(表头)
  21. for header in headers:
  22. # 检查该列是否存在至少一个1
  23. if (dataframe[header] == 1).any():
  24. abnormal_headers.append(header)
  25. return abnormal_headers
  26. # 突发故障判断逻辑
  27. def analyze_excel_data(dataframe):
  28. # df = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
  29. results = []
  30. faults = [] # 故障
  31. degradations = [] # 退化
  32. headers = config['mutationFailures']
  33. for column_rule in headers:
  34. # 提取配置参数
  35. column_name = column_rule["column"]
  36. normal_range = column_rule["normalRange"]
  37. degradation_range = column_rule["degradationInterval"]
  38. normal_lower = normal_range[0] # 正常区间下限
  39. normal_upper = normal_range[1] # 正常区间上限
  40. degradation_lower = degradation_range[0] # 退化区间上限
  41. degradation_upper = degradation_range[1]
  42. # 数据预处理:转换为数值并去除非数值/空值
  43. col_data = pd.to_numeric(dataframe[column_name], errors="coerce").dropna()
  44. # 跳过无有效数据的列
  45. if len(col_data) == 0:
  46. continue
  47. # 逻辑判断(按优先级顺序)
  48. # 突发故障:超出退化区间上下限
  49. if ((col_data > degradation_upper) | (col_data < degradation_lower)).any():
  50. # 打印飞参数据中故障结果和退化分析结果
  51. results.append(f"{column_name}突发故障")
  52. # 只保留故障结果,不保留退化分析结果
  53. faults.append(column_name)
  54. continue
  55. # 退化偏大:在正常上限和退化上限之间
  56. if ((col_data > normal_upper) & (col_data <= degradation_upper)).any():
  57. results.append(f"{column_name}偏大")
  58. degradations.append(column_name)
  59. continue
  60. # 偏小:在退化下限和正常下限之间
  61. if ((col_data >= degradation_lower) & (col_data < normal_lower)).any():
  62. results.append(f"{column_name}偏小")
  63. degradations.append(f"{column_name}偏小")
  64. fault_result = {
  65. "results": results,
  66. "fault": faults,
  67. "degradation": degradations
  68. }
  69. return fault_result
  70. if __name__ == '__main__':
  71. data_url = "http://169.254.117.72:9090/profile/upload/2025/05/16/排气原始数据_20250516140130A009.csv"
  72. response = requests.get(data_url)
  73. response.encoding = 'UTF-8'
  74. csv_data = StringIO(response.text)
  75. dataframe = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
  76. result = check_abnormal_headers(dataframe)
  77. resa = analyze_excel_data(dataframe)
  78. fault = resa["fault"]
  79. for i in fault:
  80. result.append(i)
  81. results = {
  82. "code": 200,
  83. "msg": "操作成功",
  84. "data": {
  85. "score": 58,
  86. "fault": result,
  87. "degradation": resa["degradation"]
  88. }
  89. }
  90. print(results)