123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160 |
- from pathlib import Path
- import json
- import pandas as pd
- import requests
- from io import StringIO
- from fastapi import FastAPI, HTTPException
- from pydantic import BaseModel
- from models import AnalysisRequest
- import uvicorn
- app = FastAPI(title="飞参判读API")
- # 获取当前文件的父目录的父目录
- current_dir = Path(__file__).parent
- json_path = current_dir.parent / "config" / "config.json"
- # 读取 JSON
- with open(json_path, "r", encoding="utf-8") as f:
- config = json.load(f)
- headers = config['mutationFailures']
- # 故障分数公式为 60 - 60 * A(A)为系数
- # 逻辑判断函数
- def check_abnormal_headers(dataframe):
- abnormal_headers = []
- headers = config['logicalFailures']
- # 遍历所有列(表头)
- for column_rule in headers:
- column_name = column_rule["column"]
- if (dataframe[column_name] == 1).any():
- abnormal_headers.append(column_rule['column'])
- return abnormal_headers
- # 突发故障判断逻辑
- def analyze_excel_data(dataframe):
- results = []
- faults = [] # 故障
- degradations = [] # 退化
- headers = config['mutationFailures']
- for column_rule in headers:
- column_name = column_rule["column"]
- variable = column_rule["variable"]
- # 数据预处理
- col_data = pd.to_numeric(dataframe[column_name], errors="coerce").dropna()
- if len(col_data) == 0:
- continue
- # 相邻值变化量检测 ----------------------------------------
- is_fault_detected = False
- # 遍历所有相邻数据点(至少需要2个数据)
- for i in range(len(col_data) - 1):
- current_val = col_data.iloc[i]
- next_val = col_data.iloc[i + 1]
- # 判断两个值是否都处于退化区间内
- delta = abs(next_val - current_val)
- if delta > variable:
- results.append(f"{column_name}突变故障")
- faults.append(f"{column_name}突变故障")
- is_fault_detected = True
- break # 发现故障后立即终止循环
- if is_fault_detected:
- continue # 跳过后续判断
- return {"results": results, "fault": faults, "degradation": degradations}
- @app.post("/process/faultDiagnosis", summary="分析发动机健康状态")
- async def analyze_engine_health(request: AnalysisRequest):
- """
- 通过 CSV 数据 URL 分析发动机状态,返回:
- - 健康评分
- - 故障列表
- - 性能退化指标
- """
- try:
- # 从请求中获取数据
- data_url = request.url
- # 获取并解析数据
- response = requests.get(data_url)
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- dataframe = pd.read_csv(csv_data, index_col=False)
- # 定义液压泵压力列(根据实际列名修改)
- # hydraulic_pumps = ["液压泵1压力", "液压泵2压力", "液压泵3压力", "液压泵4压力"]
- # 提取列名和状态值
- conditions = config["startupConditions"]
- required_columns = [cond["column"] for cond in conditions]
- thresholds = {cond["column"]: cond["state"] for cond in conditions}
- # 初始化返回结果
- final_result = {
- "code": 200,
- "msg": "操作成功",
- "data": {
- "score": 100, # 默认满分
- "fault": [],
- "degradation": []
- }
- }
- try:
- # 检查列是否存在
- missing_cols = [col for col in required_columns if col not in dataframe]
- if missing_cols:
- raise ValueError(f"缺少压力列: {', '.join(missing_cols)}")
- # 生成条件掩码(所有泵≥25的行)
- condition_mask = pd.DataFrame({
- col: (dataframe[col] >= thresholds[col]) for col in required_columns
- }).all(axis=1)
- if condition_mask.any():
- # 获取首个满足条件的行号
- start_idx = condition_mask.idxmax()
- # 截取从该行开始的后续数据
- analysis_data = dataframe.loc[start_idx:]
- # 执行分析逻辑
- logical_errors = check_abnormal_headers(analysis_data)
- mutation_res = analyze_excel_data(analysis_data)
- # 合并结果
- final_result["data"]["fault"] = logical_errors + mutation_res["fault"]
- final_result["data"]["degradation"] = mutation_res["degradation"]
- # 计算得分(示例公式,按需调整)
- error_count = len(final_result["data"]["fault"])
- final_result["data"]["score"] = max(0, 60 - error_count * 6)
- else:
- final_result["msg"] = "🚀 未成功启动,无分析数据"
- except Exception as e:
- final_result = {
- "code": 400,
- "msg": f"错误: {str(e)}",
- "data": None
- }
- print(final_result)
- except requests.RequestException as e:
- raise HTTPException(status_code=400, detail=f"数据获取失败: {str(e)}")
- except pd.errors.ParserError as e:
- raise HTTPException(status_code=422, detail="CSV 数据解析失败")
- except KeyError as e:
- raise HTTPException(status_code=500, detail=f"配置字段缺失: {str(e)}")
- if __name__ == "__main__":
- uvicorn.run(app, host="0.0.0.0", port=8848)
|