from pathlib import Path import json import pandas as pd import requests from io import StringIO from fastapi import FastAPI, HTTPException from pydantic import BaseModel app = FastAPI(title="飞参判读API") # 获取当前文件的父目录的父目录 current_dir = Path(__file__).parent json_path = current_dir.parent / "config" / "config.json" # 读取 JSON with open(json_path, "r", encoding="utf-8") as f: config = json.load(f) headers = config['mutationFailures'] # 故障分数公式为 60 - 60 * A(A)为系数 # 逻辑判断函数 def check_abnormal_headers(dataframe): abnormal_headers = [] headers = config['logicalFailures'] # 遍历所有列(表头) for header in headers: # 检查该列是否存在至少一个1 if (dataframe[header] == 1).any(): abnormal_headers.append(header) return abnormal_headers # 突发故障判断逻辑 def analyze_excel_data(dataframe): results = [] faults = [] # 故障 degradations = [] # 退化 headers = config['mutationFailures'] for column_rule in headers: column_name = column_rule["column"] normal_range = column_rule["normalRange"] degradation_range = column_rule["degradationInterval"] variable = column_rule["variable"] normal_lower, normal_upper = normal_range[0], normal_range[1] degradation_lower, degradation_upper = degradation_range[0], degradation_range[1] # 数据预处理 col_data = pd.to_numeric(dataframe[column_name], errors="coerce").dropna() if len(col_data) == 0: continue # 突发故障判断 if ((col_data > degradation_upper) | (col_data < degradation_lower)).any(): results.append(f"{column_name}突发故障") faults.append(f"{column_name}突变故障") continue # 相邻值变化量检测 ---------------------------------------- is_fault_detected = False # 遍历所有相邻数据点(至少需要2个数据) for i in range(len(col_data) - 1): current_val = col_data.iloc[i] next_val = col_data.iloc[i + 1] # 判断两个值是否都处于退化区间内 if (current_val >= degradation_lower) and (current_val <= degradation_upper) \ and (next_val >= degradation_lower) and (next_val <= degradation_upper): # 计算变化量绝对值 delta = abs(next_val - current_val) if delta > variable: results.append(f"{column_name}突变故障") faults.append(f"{column_name}突变故障") is_fault_detected = True break # 发现故障后立即终止循环 if is_fault_detected: continue # 跳过后续判断 # 退化判断逻辑 if ((col_data > normal_upper) & (col_data <= degradation_upper)).any(): results.append(f"{column_name}偏大") degradations.append(column_name) continue if ((col_data >= degradation_lower) & (col_data < normal_lower)).any(): results.append(f"{column_name}偏小") degradations.append(f"{column_name}偏小") return {"results": results, "fault": faults, "degradation": degradations} if __name__ == '__main__': # data_url = "http://169.254.117.72:9090/profile/upload/2025/05/16/排气原始数据_20250516140130A009.csv" # response = requests.get(data_url) # response.encoding = 'UTF-8' # csv_data = StringIO(response.text) csv_data = "D:\project\daima\huang\Hang_KG\data\排气原始数据.csv" dataframe = pd.read_csv(csv_data, index_col=False, encoding='UTF-8') result = check_abnormal_headers(dataframe) resa = analyze_excel_data(dataframe) fault = resa["fault"] for i in fault: result.append(i) results = { "code": 200, "msg": "操作成功", "data": { "score": 58, "fault": result, "degradation": resa["degradation"] } } print(results)