|
@@ -1,6 +1,10 @@
|
|
|
from pathlib import Path
|
|
|
import json
|
|
|
import pandas as pd
|
|
|
+import requests
|
|
|
+from io import StringIO
|
|
|
+from flask import Flask, jsonify, request
|
|
|
+app = Flask(__name__)
|
|
|
|
|
|
# 获取当前文件的父目录的父目录
|
|
|
current_dir = Path(__file__).parent
|
|
@@ -16,63 +20,22 @@ headers = config['mutationFailures']
|
|
|
|
|
|
|
|
|
# 逻辑判断函数
|
|
|
-def check_abnormal_headers(file_path):
|
|
|
- # 读取Excel文件(默认读取第一个sheet)
|
|
|
- df = pd.read_csv(file_path)
|
|
|
-
|
|
|
+def check_abnormal_headers(dataframe):
|
|
|
abnormal_headers = []
|
|
|
-
|
|
|
headers = config['logicalFailures']
|
|
|
|
|
|
# 遍历所有列(表头)
|
|
|
for header in headers:
|
|
|
# 检查该列是否存在至少一个1
|
|
|
- if (df[header] == 1).any():
|
|
|
+ if (dataframe[header] == 1).any():
|
|
|
abnormal_headers.append(header)
|
|
|
|
|
|
return abnormal_headers
|
|
|
|
|
|
|
|
|
# 突发故障判断逻辑
|
|
|
-# def analyze_excel_data(file_path):
|
|
|
-# """根据配置的区间规则分析 Excel 数据变化量"""
|
|
|
-# df = pd.read_excel(file_path)
|
|
|
-# results = []
|
|
|
-# headers = config['mutationFailures']
|
|
|
-# for columns in headers:
|
|
|
-# column_name = columns["column"]
|
|
|
-# normal_range = columns["normalRange"]
|
|
|
-# degradation_range = columns['degradationInterval']
|
|
|
-#
|
|
|
-# # 提取区间边界
|
|
|
-# normal_upper = normal_range[1]
|
|
|
-# degradation_upper = degradation_range[1]
|
|
|
-#
|
|
|
-# # 数据预处理
|
|
|
-# col_data = pd.to_numeric(df[column_name], errors="coerce").dropna()
|
|
|
-#
|
|
|
-# # 至少需要2个数据点才能计算变化量
|
|
|
-# if len(col_data) < 2:
|
|
|
-# continue
|
|
|
-#
|
|
|
-# # 计算相邻数据绝对变化量
|
|
|
-# changes = col_data.diff().dropna().abs()
|
|
|
-# max_change = changes.max()
|
|
|
-#
|
|
|
-# # 动态判断逻辑
|
|
|
-# if max_change > degradation_upper:
|
|
|
-# results.append(f"{column_name}突变故障")
|
|
|
-# elif max_change > normal_upper:
|
|
|
-# # 获取实际变化方向(正/负)
|
|
|
-# trend_value = col_data.diff().dropna().loc[changes.idxmax()]
|
|
|
-# trend = "偏大" if trend_value > 0 else "偏小"
|
|
|
-# results.append(f"{column_name}{trend}")
|
|
|
-#
|
|
|
-# return sorted(results)
|
|
|
-
|
|
|
-# 突发故障判断逻辑
|
|
|
-def analyze_excel_data(file_path):
|
|
|
- df = pd.read_csv(file_path)
|
|
|
+def analyze_excel_data(dataframe):
|
|
|
+ # df = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
|
|
|
results = []
|
|
|
faults = [] # 故障
|
|
|
degradations = [] # 退化
|
|
@@ -84,13 +47,13 @@ def analyze_excel_data(file_path):
|
|
|
normal_range = column_rule["normalRange"]
|
|
|
degradation_range = column_rule["degradationInterval"]
|
|
|
|
|
|
- normal_lower = normal_range[0] # 正常区间下限(如0)
|
|
|
- normal_upper = normal_range[1] # 正常区间上限(如5)
|
|
|
- degradation_lower = degradation_range[0] # 退化区间上限(如10)
|
|
|
+ normal_lower = normal_range[0] # 正常区间下限
|
|
|
+ normal_upper = normal_range[1] # 正常区间上限
|
|
|
+ degradation_lower = degradation_range[0] # 退化区间上限
|
|
|
degradation_upper = degradation_range[1]
|
|
|
|
|
|
# 数据预处理:转换为数值并去除非数值/空值
|
|
|
- col_data = pd.to_numeric(df[column_name], errors="coerce").dropna()
|
|
|
+ col_data = pd.to_numeric(dataframe[column_name], errors="coerce").dropna()
|
|
|
|
|
|
# 跳过无有效数据的列
|
|
|
if len(col_data) == 0:
|
|
@@ -114,22 +77,37 @@ def analyze_excel_data(file_path):
|
|
|
# 偏小:在退化下限和正常下限之间
|
|
|
if ((col_data >= degradation_lower) & (col_data < normal_lower)).any():
|
|
|
results.append(f"{column_name}偏小")
|
|
|
- degradations.append(column_name)
|
|
|
+ degradations.append(f"{column_name}偏小")
|
|
|
|
|
|
fault_result = {
|
|
|
"results": results,
|
|
|
- "故障": faults,
|
|
|
- "退化": degradations
|
|
|
+ "fault": faults,
|
|
|
+ "degradation": degradations
|
|
|
}
|
|
|
|
|
|
return fault_result
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
- data_path = "../data/排气原始数据.csv"
|
|
|
- result = check_abnormal_headers(data_path)
|
|
|
- resa = analyze_excel_data(data_path)
|
|
|
- print(result)
|
|
|
- print(resa)
|
|
|
-
|
|
|
+ data_url = "http://169.254.117.72:9090/profile/upload/2025/05/16/排气原始数据_20250516140130A009.csv"
|
|
|
+ response = requests.get(data_url)
|
|
|
+ response.encoding = 'UTF-8'
|
|
|
+ csv_data = StringIO(response.text)
|
|
|
+ dataframe = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
|
|
|
+ result = check_abnormal_headers(dataframe)
|
|
|
+ resa = analyze_excel_data(dataframe)
|
|
|
+ fault = resa["fault"]
|
|
|
+ for i in fault:
|
|
|
+ result.append(i)
|
|
|
+
|
|
|
+ results = {
|
|
|
+ "code": 200,
|
|
|
+ "msg": "操作成功",
|
|
|
+ "data": {
|
|
|
+ "score": 58,
|
|
|
+ "fault": result,
|
|
|
+ "degradation": resa["degradation"]
|
|
|
+ }
|
|
|
+ }
|
|
|
+ print(results)
|
|
|
|