actions.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. from pathlib import Path
  2. import json
  3. import pandas as pd
  4. import requests
  5. from io import StringIO
  6. from fastapi import FastAPI, HTTPException
  7. from pydantic import BaseModel
  8. app = FastAPI(title="飞参判读API")
  9. # 获取当前文件的父目录的父目录
  10. current_dir = Path(__file__).parent
  11. json_path = current_dir.parent / "config" / "config.json"
  12. # 读取 JSON
  13. with open(json_path, "r", encoding="utf-8") as f:
  14. config = json.load(f)
  15. headers = config['mutationFailures']
  16. # 故障分数公式为 60 - 60 * A(A)为系数
  17. # 逻辑判断函数
  18. def check_abnormal_headers(dataframe):
  19. abnormal_headers = []
  20. headers = config['logicalFailures']
  21. # 遍历所有列(表头)
  22. for header in headers:
  23. # 检查该列是否存在至少一个1
  24. if (dataframe[header] == 1).any():
  25. abnormal_headers.append(header)
  26. return abnormal_headers
  27. # 突发故障判断逻辑
  28. def analyze_excel_data(dataframe):
  29. results = []
  30. faults = [] # 故障
  31. degradations = [] # 退化
  32. headers = config['mutationFailures']
  33. for column_rule in headers:
  34. column_name = column_rule["column"]
  35. normal_range = column_rule["normalRange"]
  36. degradation_range = column_rule["degradationInterval"]
  37. variable = column_rule["variable"]
  38. normal_lower, normal_upper = normal_range[0], normal_range[1]
  39. degradation_lower, degradation_upper = degradation_range[0], degradation_range[1]
  40. # 数据预处理
  41. col_data = pd.to_numeric(dataframe[column_name], errors="coerce").dropna()
  42. if len(col_data) == 0:
  43. continue
  44. # 突发故障判断
  45. if ((col_data > degradation_upper) | (col_data < degradation_lower)).any():
  46. results.append(f"{column_name}突发故障")
  47. faults.append(f"{column_name}突变故障")
  48. continue
  49. # 相邻值变化量检测 ----------------------------------------
  50. is_fault_detected = False
  51. # 遍历所有相邻数据点(至少需要2个数据)
  52. for i in range(len(col_data) - 1):
  53. current_val = col_data.iloc[i]
  54. next_val = col_data.iloc[i + 1]
  55. # 判断两个值是否都处于退化区间内
  56. if (current_val >= degradation_lower) and (current_val <= degradation_upper) \
  57. and (next_val >= degradation_lower) and (next_val <= degradation_upper):
  58. # 计算变化量绝对值
  59. delta = abs(next_val - current_val)
  60. if delta > variable:
  61. results.append(f"{column_name}突变故障")
  62. faults.append(f"{column_name}突变故障")
  63. is_fault_detected = True
  64. break # 发现故障后立即终止循环
  65. if is_fault_detected:
  66. continue # 跳过后续判断
  67. # 退化判断逻辑
  68. if ((col_data > normal_upper) & (col_data <= degradation_upper)).any():
  69. results.append(f"{column_name}偏大")
  70. degradations.append(column_name)
  71. continue
  72. if ((col_data >= degradation_lower) & (col_data < normal_lower)).any():
  73. results.append(f"{column_name}偏小")
  74. degradations.append(f"{column_name}偏小")
  75. return {"results": results, "fault": faults, "degradation": degradations}
  76. if __name__ == '__main__':
  77. # data_url = "http://169.254.117.72:9090/profile/upload/2025/05/16/排气原始数据_20250516140130A009.csv"
  78. # response = requests.get(data_url)
  79. # response.encoding = 'UTF-8'
  80. # csv_data = StringIO(response.text)
  81. csv_data = "D:\project\daima\huang\Hang_KG\data\排气原始数据.csv"
  82. dataframe = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
  83. result = check_abnormal_headers(dataframe)
  84. resa = analyze_excel_data(dataframe)
  85. fault = resa["fault"]
  86. for i in fault:
  87. result.append(i)
  88. results = {
  89. "code": 200,
  90. "msg": "操作成功",
  91. "data": {
  92. "score": 58,
  93. "fault": result,
  94. "degradation": resa["degradation"]
  95. }
  96. }
  97. print(results)