123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113 |
- from pathlib import Path
- import json
- import pandas as pd
- import requests
- from io import StringIO
- from flask import Flask, jsonify, request
- app = Flask(__name__)
- # 获取当前文件的父目录的父目录
- current_dir = Path(__file__).parent
- json_path = current_dir.parent / "config" / "config.json"
- # 读取 JSON
- with open(json_path, "r", encoding="utf-8") as f:
- config = json.load(f)
- headers = config['mutationFailures']
- # 故障分数公式为 60 - 60 * A(A)为系数
- # 逻辑判断函数
- def check_abnormal_headers(dataframe):
- abnormal_headers = []
- headers = config['logicalFailures']
- # 遍历所有列(表头)
- for header in headers:
- # 检查该列是否存在至少一个1
- if (dataframe[header] == 1).any():
- abnormal_headers.append(header)
- return abnormal_headers
- # 突发故障判断逻辑
- def analyze_excel_data(dataframe):
- # df = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
- results = []
- faults = [] # 故障
- degradations = [] # 退化
- headers = config['mutationFailures']
- for column_rule in headers:
- # 提取配置参数
- column_name = column_rule["column"]
- normal_range = column_rule["normalRange"]
- degradation_range = column_rule["degradationInterval"]
- normal_lower = normal_range[0] # 正常区间下限
- normal_upper = normal_range[1] # 正常区间上限
- degradation_lower = degradation_range[0] # 退化区间上限
- degradation_upper = degradation_range[1]
- # 数据预处理:转换为数值并去除非数值/空值
- col_data = pd.to_numeric(dataframe[column_name], errors="coerce").dropna()
- # 跳过无有效数据的列
- if len(col_data) == 0:
- continue
- # 逻辑判断(按优先级顺序)
- # 突发故障:超出退化区间上下限
- if ((col_data > degradation_upper) | (col_data < degradation_lower)).any():
- # 打印飞参数据中故障结果和退化分析结果
- results.append(f"{column_name}突发故障")
- # 只保留故障结果,不保留退化分析结果
- faults.append(column_name)
- continue
- # 退化偏大:在正常上限和退化上限之间
- if ((col_data > normal_upper) & (col_data <= degradation_upper)).any():
- results.append(f"{column_name}偏大")
- degradations.append(column_name)
- continue
- # 偏小:在退化下限和正常下限之间
- if ((col_data >= degradation_lower) & (col_data < normal_lower)).any():
- results.append(f"{column_name}偏小")
- degradations.append(f"{column_name}偏小")
- fault_result = {
- "results": results,
- "fault": faults,
- "degradation": degradations
- }
- return fault_result
- if __name__ == '__main__':
- data_url = "http://169.254.117.72:9090/profile/upload/2025/05/16/排气原始数据_20250516140130A009.csv"
- response = requests.get(data_url)
- response.encoding = 'UTF-8'
- csv_data = StringIO(response.text)
- dataframe = pd.read_csv(csv_data, index_col=False, encoding='UTF-8')
- result = check_abnormal_headers(dataframe)
- resa = analyze_excel_data(dataframe)
- fault = resa["fault"]
- for i in fault:
- result.append(i)
- results = {
- "code": 200,
- "msg": "操作成功",
- "data": {
- "score": 58,
- "fault": result,
- "degradation": resa["degradation"]
- }
- }
- print(results)
|