31039 1 сар өмнө
parent
commit
aa33c9a5b0
5 өөрчлөгдсөн 197 нэмэгдсэн , 31 устгасан
  1. 40 31
      api/actions.py
  2. 1 0
      api/api.py
  3. 6 0
      api/models.py
  4. 148 0
      api/test2.py
  5. 2 0
      config/config.json

+ 40 - 31
api/actions.py

@@ -3,8 +3,10 @@ import json
 import pandas as pd
 import requests
 from io import StringIO
-from flask import Flask, jsonify, request
-app = Flask(__name__)
+from fastapi import FastAPI, HTTPException
+from pydantic import BaseModel
+
+app = FastAPI(title="飞参判读API")
 
 # 获取当前文件的父目录的父目录
 current_dir = Path(__file__).parent
@@ -35,64 +37,71 @@ def check_abnormal_headers(dataframe):
 
 # 突发故障判断逻辑
 def analyze_excel_data(dataframe):
-    # df = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
     results = []
-    faults = []              # 故障
-    degradations = []         # 退化
+    faults = []  # 故障
+    degradations = []  # 退化
     headers = config['mutationFailures']
 
     for column_rule in headers:
-        # 提取配置参数
         column_name = column_rule["column"]
         normal_range = column_rule["normalRange"]
         degradation_range = column_rule["degradationInterval"]
+        variable = column_rule["variable"]
 
-        normal_lower = normal_range[0]  # 正常区间下限
-        normal_upper = normal_range[1]  # 正常区间上限
-        degradation_lower = degradation_range[0]  # 退化区间上限
-        degradation_upper = degradation_range[1]
+        normal_lower, normal_upper = normal_range[0], normal_range[1]
+        degradation_lower, degradation_upper = degradation_range[0], degradation_range[1]
 
-        # 数据预处理:转换为数值并去除非数值/空值
+        # 数据预处理
         col_data = pd.to_numeric(dataframe[column_name], errors="coerce").dropna()
-
-        # 跳过无有效数据的列
         if len(col_data) == 0:
             continue
 
-        # 逻辑判断(按优先级顺序)
-        # 突发故障:超出退化区间上下限
+        # 突发故障判断
         if ((col_data > degradation_upper) | (col_data < degradation_lower)).any():
-            # 打印飞参数据中故障结果和退化分析结果
             results.append(f"{column_name}突发故障")
-            # 只保留故障结果,不保留退化分析结果
-            faults.append(column_name)
+            faults.append(f"{column_name}突变故障")
             continue
 
-        # 退化偏大:在正常上限和退化上限之间
+        # 相邻值变化量检测 ----------------------------------------
+        is_fault_detected = False
+        # 遍历所有相邻数据点(至少需要2个数据)
+        for i in range(len(col_data) - 1):
+            current_val = col_data.iloc[i]
+            next_val = col_data.iloc[i + 1]
+
+            # 判断两个值是否都处于退化区间内
+            if (current_val >= degradation_lower) and (current_val <= degradation_upper) \
+                    and (next_val >= degradation_lower) and (next_val <= degradation_upper):
+                # 计算变化量绝对值
+                delta = abs(next_val - current_val)
+                if delta > variable:
+                    results.append(f"{column_name}突变故障")
+                    faults.append(f"{column_name}突变故障")
+                    is_fault_detected = True
+                    break  # 发现故障后立即终止循环
+
+        if is_fault_detected:
+            continue  # 跳过后续判断
+
+        # 退化判断逻辑
         if ((col_data > normal_upper) & (col_data <= degradation_upper)).any():
             results.append(f"{column_name}偏大")
             degradations.append(column_name)
             continue
 
-        # 偏小:在退化下限和正常下限之间
         if ((col_data >= degradation_lower) & (col_data < normal_lower)).any():
             results.append(f"{column_name}偏小")
             degradations.append(f"{column_name}偏小")
 
-    fault_result = {
-            "results": results,
-            "fault": faults,
-            "degradation": degradations
-        }
-
-    return fault_result
+    return {"results": results, "fault": faults, "degradation": degradations}
 
 
 if __name__ == '__main__':
-    data_url = "http://169.254.117.72:9090/profile/upload/2025/05/16/排气原始数据_20250516140130A009.csv"
-    response = requests.get(data_url)
-    response.encoding = 'UTF-8'
-    csv_data = StringIO(response.text)
+    # data_url = "http://169.254.117.72:9090/profile/upload/2025/05/16/排气原始数据_20250516140130A009.csv"
+    # response = requests.get(data_url)
+    # response.encoding = 'UTF-8'
+    # csv_data = StringIO(response.text)
+    csv_data = "D:\project\daima\huang\Hang_KG\data\排气原始数据.csv"
     dataframe = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
     result = check_abnormal_headers(dataframe)
     resa = analyze_excel_data(dataframe)

+ 1 - 0
api/api.py

@@ -8,6 +8,7 @@ from sklearn.metrics import mean_squared_error
 from sklearn.model_selection import train_test_split
 import json
 
+
 app = Flask(__name__)
 
 

+ 6 - 0
api/models.py

@@ -0,0 +1,6 @@
+from fastapi import FastAPI, HTTPException
+from pydantic import BaseModel
+
+
+class AnalysisRequest(BaseModel):
+    data_url: str

+ 148 - 0
api/test2.py

@@ -0,0 +1,148 @@
+from pathlib import Path
+import json
+import pandas as pd
+import requests
+from io import StringIO
+from fastapi import FastAPI, HTTPException
+from pydantic import BaseModel
+from models import AnalysisRequest
+import uvicorn
+
+app = FastAPI(title="飞参判读API")
+
+# 获取当前文件的父目录的父目录
+current_dir = Path(__file__).parent
+json_path = current_dir.parent / "config" / "config.json"
+
+# 读取 JSON
+with open(json_path, "r", encoding="utf-8") as f:
+    config = json.load(f)
+
+headers = config['mutationFailures']
+
+# 故障分数公式为 60 - 60 * A(A)为系数
+
+
+# 逻辑判断函数
+def check_abnormal_headers(dataframe):
+    abnormal_headers = []
+    headers = config['logicalFailures']
+
+    # 遍历所有列(表头)
+    for header in headers:
+        # 检查该列是否存在至少一个1
+        if (dataframe[header] == 1).any():
+            abnormal_headers.append(header)
+
+    return abnormal_headers
+
+
+# 突发故障判断逻辑
+def analyze_excel_data(dataframe):
+    results = []
+    faults = []  # 故障
+    degradations = []  # 退化
+    headers = config['mutationFailures']
+
+    for column_rule in headers:
+        column_name = column_rule["column"]
+        normal_range = column_rule["normalRange"]
+        degradation_range = column_rule["degradationInterval"]
+        variable = column_rule["variable"]
+
+        normal_lower, normal_upper = normal_range[0], normal_range[1]
+        degradation_lower, degradation_upper = degradation_range[0], degradation_range[1]
+
+        # 数据预处理
+        col_data = pd.to_numeric(dataframe[column_name], errors="coerce").dropna()
+        if len(col_data) == 0:
+            continue
+
+        # 突发故障判断(原有逻辑)
+        if ((col_data > degradation_upper) | (col_data < degradation_lower)).any():
+            results.append(f"{column_name}突发故障")
+            faults.append(f"{column_name}突变故障")
+            continue
+
+        # 新增:相邻值变化量检测 ----------------------------------------
+        is_fault_detected = False
+        # 遍历所有相邻数据点(至少需要2个数据)
+        for i in range(len(col_data) - 1):
+            current_val = col_data.iloc[i]
+            next_val = col_data.iloc[i + 1]
+
+            # 判断两个值是否都处于退化区间内
+            if (current_val >= degradation_lower) and (current_val <= degradation_upper) \
+                    and (next_val >= degradation_lower) and (next_val <= degradation_upper):
+                # 计算变化量绝对值
+                delta = abs(next_val - current_val)
+                if delta > variable:
+                    results.append(f"{column_name}突变故障")
+                    faults.append(f"{column_name}突变故障")
+                    is_fault_detected = True
+                    break  # 发现故障后立即终止循环
+
+        if is_fault_detected:
+            continue  # 跳过后续判断
+        # -----------------------------------------------------------
+
+        # 原有退化判断逻辑
+        if ((col_data > normal_upper) & (col_data <= degradation_upper)).any():
+            results.append(f"{column_name}偏大")
+            degradations.append(column_name)
+            continue
+
+        if ((col_data >= degradation_lower) & (col_data < normal_lower)).any():
+            results.append(f"{column_name}偏小")
+            degradations.append(f"{column_name}偏小")
+
+    fault_result = {
+            "results": results,
+            "fault": faults,
+            "degradation": degradations
+        }
+
+    return fault_result
+
+
+@app.post("/process/faultDiagnosis", summary="分析发动机健康状态")
+async def analyze_engine_health(request: AnalysisRequest):
+    """
+    通过 CSV 数据 URL 分析发动机状态,返回:
+    - 健康评分
+    - 故障列表
+    - 性能退化指标
+    """
+    try:
+        # 获取并解析数据
+        response = requests.get(request.data_url)
+        response.encoding = 'UTF-8'
+        csv_data = StringIO(response.text)
+        dataframe = pd.read_csv(csv_data, index_col=False)
+
+        # 执行分析
+        header_issues = check_abnormal_headers(dataframe)
+        analysis_result = analyze_excel_data(dataframe)
+
+        # 合并结果
+        combined_faults = header_issues + analysis_result["fault"]
+
+        return {
+            "code": 200,
+            "msg": "操作成功",
+            "data": {
+                "score": 58,  # 待实现实际计分逻辑
+                "fault": combined_faults,
+                "degradation": analysis_result["degradation"]
+            }
+        }
+    except requests.RequestException as e:
+        raise HTTPException(status_code=400, detail=f"数据获取失败: {str(e)}")
+    except pd.errors.ParserError as e:
+        raise HTTPException(status_code=422, detail="CSV 数据解析失败")
+    except KeyError as e:
+        raise HTTPException(status_code=500, detail=f"配置字段缺失: {str(e)}")
+
+
+if __name__ == "__main__":
+    uvicorn.run(app, host="0.0.0.0", port=8848)

+ 2 - 0
config/config.json

@@ -2,11 +2,13 @@
   "mutationFailures": [
     {
       "column": "发动机油门角度",
+      "variable": 50,
       "normalRange": [100, 120],
       "degradationInterval": [0, 150]
     },
     {
       "column": "发动机排气温度",
+      "variable": 50,
       "normalRange": [400, 450],
       "degradationInterval": [300, 600]
     }