Browse Source

添加算法代码

allen 1 year ago
parent
commit
21563a5e69
2 changed files with 478 additions and 0 deletions
  1. 184 0
      cirs-algorithm/recommend.py
  2. 294 0
      cirs-algorithm/stable_marking.py

+ 184 - 0
cirs-algorithm/recommend.py

@@ -0,0 +1,184 @@
+from flask import Flask, request, jsonify
+import pandas as pd
+import joblib
+from sklearn.model_selection import train_test_split
+from sklearn.ensemble import RandomForestClassifier
+import json
+from sklearn.pipeline import Pipeline
+from sklearn.feature_extraction.text import TfidfVectorizer
+
+
+app = Flask(__name__)
+app.config['JSON_AS_ASCII'] = False
+app.config['JSONIFY_MIMETYPE'] = "application/json;charset=utf-8"  # 指定浏览器渲染的文件类型,和解码格式;
+
+def load_model(model_path):
+    try:
+        # Load the pre-trained model
+        loaded_model = joblib.load(model_path)
+        return loaded_model
+    except Exception as e:
+        raise Exception(f"Failed to load the model: {str(e)}")
+
+def make_recommendations(model, use_scene, search_condition):
+    try:
+        # Convert input data to the format used for training
+        input_data = pd.DataFrame({'useScene': [use_scene], 'searchCondition': [search_condition]})
+        X_input = input_data[['useScene', 'searchCondition']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
+
+        # Predict recommendations using the trained model
+        y_pred = model.predict(X_input)
+        y_pred_list = y_pred[0].tolist()
+
+        # Format the output data
+        output_data = {
+            'useScene': use_scene,
+            'searchCondition': search_condition,
+            'result1Id': y_pred_list[0],
+            'result2Id': y_pred_list[1],
+            'result3Id': y_pred_list[2],
+            'result4Id': y_pred_list[3],
+            'result5Id': y_pred_list[4]
+        }
+
+        return output_data
+    except Exception as e:
+        raise Exception(f"Failed to make recommendations: {str(e)}")
+
+@app.route('/recommend', methods=['POST'])
+def recommend_endpoint():
+    try:
+        # Get input parameters from the request
+        data = request.json
+        model_path = data.get('modelPath')
+        use_scene = data.get('useScene')
+        search_condition = data.get('searchCondition')
+
+        # Load the pre-trained model and vectorizer
+        model = load_model(model_path+'model.pkl')
+        # Make recommendations
+        result_data = make_recommendations(model, use_scene, search_condition)
+
+        return jsonify({'code': 200, 'msg': '成功', 'data': result_data})
+
+    except Exception as e:
+        return jsonify({'code': 500, 'msg': str(e)})
+
+
+
+#训练算法API--完成版
+
+def train_and_save_model(data, model_path):
+    try:
+        # Convert the JSON data to a DataFrame
+        df = pd.DataFrame(data['dataSet'])
+        pipeline = Pipeline([
+            ('vectorizer', TfidfVectorizer()),
+            ('classifier', RandomForestClassifier(n_estimators=400, random_state=42))
+        ])
+        # Extract features and labels
+        X = df[['useScene', 'searchCondition']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
+        y = df[['result1Id', 'result2Id', 'result3Id', 'result4Id', 'result5Id']]
+        pipeline.fit(X, y)
+        # Save the trained model to a file
+        joblib.dump(pipeline, model_path+'model.pkl')
+
+        return {'code': 200, 'msg': '成功'}
+
+    except Exception as e:
+        return {'code': 500, 'msg': str(e)}
+
+@app.route('/train', methods=['POST'])
+def train_model():
+    try:
+        # Get input parameters from the request
+        data = request.json
+        model_path = data.get('modelPath')
+
+        # Call the function to train and save the model
+        result = train_and_save_model(data, model_path)
+
+        return jsonify(result)
+
+    except Exception as e:
+        return jsonify({'code': 500, 'msg': str(e)})
+
+
+#验证算法API--完成版2
+# 定义训练模型的函数
+def train_model2(data):
+    train_data, test_data = train_test_split(data, test_size=0.3, random_state=48)
+    X_train = train_data[['useScene', 'searchCondition']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
+    y_train = train_data[['result1Id', 'result2Id', 'result3Id', 'result4Id', 'result5Id']]
+    X_test = test_data[['useScene', 'searchCondition']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
+    y_test = test_data[['result1Id', 'result2Id', 'result3Id', 'result4Id', 'result5Id']]
+    train_data['trained'] = 1
+    train_data['tested'] = 0
+    # 使用CountVectorizer将文本转换为词袋模型
+    vectorizer = TfidfVectorizer()
+    X_train_matrix = vectorizer.fit_transform(X_train)
+    X_test_matrix = vectorizer.transform(X_test)
+    rf_classifier = RandomForestClassifier(n_estimators=400, random_state=42)
+    rf_classifier.fit(X_train_matrix, y_train)
+    return vectorizer, rf_classifier, test_data, y_test, train_data
+
+# 定义预测函数
+def predict(vectorizer, rf_classifier, test_data, y_test,train_data):
+    X_test = test_data[['useScene', 'searchCondition']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
+    y_pred = rf_classifier.predict(vectorizer.transform(X_test))
+    y_pred_df = pd.DataFrame(y_pred, columns=['calculate1Id', 'calculate2Id', 'calculate3Id', 'calculate4Id', 'calculate5Id'])
+    y_pred_df.index = test_data.index
+    output_df = pd.concat([test_data[['id','useScene','searchCondition','result1Id','result2Id','result3Id','result4Id','result5Id']], y_pred_df], axis=1)
+    output_df['trained'] = 0  # 0 indicates the record is not in the training set
+    output_df['tested'] = 1  # 1 indicates the record is in the testing set
+    full_data = pd.concat([train_data, output_df]).sort_index().reset_index(drop=True)
+    # 计算准确率和召回率
+    TP_list = []
+    TN_list = []
+    FP_list = []
+    FN_list = []
+    for i in range(y_test.shape[0]):
+        # Count true positives (TP), true negatives (TN), false positives (FP), and false negatives (FN) for each row
+        y_test_row = y_test.iloc[i, :].astype(str)
+        y_pred_row = y_pred_df.iloc[i, :].astype(str)
+        TP = sum((y_test_row).isin(y_pred_row))
+        TN = sum((y_test_row).isin(y_pred_row) == False)
+        FP = sum((y_pred_row).isin(y_test_row) == False)
+        FN = sum((y_test_row).isin(y_pred_row) == False)
+        TP_list.append(TP)
+        TN_list.append(TN)
+        FP_list.append(FP)
+        FN_list.append(FN)
+    accuracy_list = [(TP + TN) / (TP + TN + FP + FN) if (TP + TN + FP + FN) != 0 else 0 for TP, TN, FP, FN in zip(TP_list, TN_list, FP_list, FN_list)]
+    recall_list = [TP / (TP + FN) if (TP + FN) != 0 else 0 for TP, FN in zip(TP_list, FN_list)]
+    # 输出JSON格式的结果
+    output_data = {}
+    output_data['code'] = 200
+    output_data['msg'] = '成功'
+    output_data['data'] = {}
+    output_data['data']['accuracyRate'] = round((sum(accuracy_list) / len(accuracy_list))*100, 2)
+    output_data['data']['recallRate'] = round((sum(recall_list) / len(recall_list))*100, 2)
+    output_data['data']['dataSet'] = full_data.to_dict('records')
+    return output_data
+
+# 定义路由和处理函数
+@app.route('/verification', methods=['POST'])
+def verification():
+    # 从请求中获取JSON数据
+    input_data = request.get_json()
+    # 将JSON数据转换为DataFrame
+    data = pd.DataFrame(input_data['dataSet'])
+    # print(data)
+    # 训练模型
+    vectorizer, rf_classifier, test_data, y_test,train_data = train_model2(data)
+    # 预测结果
+    # print(train_data)
+    output_data = predict(vectorizer, rf_classifier, test_data, y_test, train_data)
+    # print(output_data)
+    # 返回JSON格式的响应
+    return jsonify(output_data)
+
+
+# 运行应用
+if __name__ == '__main__':
+    app.run(debug=True, port=8081, host='0.0.0.0')

+ 294 - 0
cirs-algorithm/stable_marking.py

@@ -0,0 +1,294 @@
+#打分算法API
+from flask import Flask, request, jsonify
+import pandas as pd
+import joblib
+
+from sklearn.preprocessing import LabelEncoder # 用来将文本标签转换为数值
+from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # 用来创建线性判别分析分类器
+from sklearn.model_selection import train_test_split
+
+from sklearn.metrics import accuracy_score
+from sklearn.metrics import recall_score
+
+
+
+app = Flask(__name__)
+app.config['JSON_AS_ASCII'] = False
+app.config['JSONIFY_MIMETYPE'] = "application/json;charset=utf-8"  # 指定浏览器渲染的文件类型,和解码格式;
+# def load_model(model_path):
+#     try:
+#         loaded_model = joblib.load(model_path)
+#         return loaded_model
+#     except Exception as e:
+#         raise Exception(f"Failed to load the model: {str(e)}")
+#
+# def make_Scoring(model_data, data):
+#     try:
+#         df = pd.DataFrame(data['data'])
+#         X = df.iloc[:, 1:-1]
+#         le_y = model_data['label_encoder_y']
+#         le_X = model_data['label_encoder_X']
+#         X = X.apply(le_X.transform)
+#         loaded_lda = model_data['model']
+#         y_pred = loaded_lda.predict(X)
+#         new_y_pred = le_y.inverse_transform(y_pred)
+#         data['finalGrade'] = new_y_pred
+#         return data
+#     except Exception as e:
+#         raise Exception(f"Failed to make Scoring: {str(e)}")
+#
+# @app.route('/biz/score/marking', methods=['POST'])
+# def Scoring_endpoint():
+#     try:
+#         # Get input parameters from the request
+#         data = request.json
+#         model_path = data.get('modelPath')
+#         # Load the pre-trained model and vectorizer
+#         model_data  = joblib.load(model_path+'model_and_encoders.joblib')
+#         # Make recommendations
+#         result_data = make_Scoring(model_data, data)
+#         return jsonify({'code': 200, 'msg': '成功', 'data': result_data})
+#
+#     except Exception as e:
+#         return jsonify({'code': 500, 'msg': str(e)})
+def load_model(model_path):
+    try:
+        loaded_model = joblib.load(model_path)
+        return loaded_model
+    except Exception as e:
+        raise Exception(f"Failed to load the model: {str(e)}")
+
+def make_Scoring(model_data, data):
+    try:
+
+    #     df = pd.DataFrame(data['dataSet'])
+        df = pd.DataFrame(data['dataSet'], index=range(len(data['dataSet'])))
+        # X = df[['factor1', 'factor2', 'factor3', 'factor4', 'factor5', 'factor6', 'factor7', 'factor8', 'factor9', 'factor10', 'factor11', 'factor12','factor13', 'factor14', 'factor15', 'factor16','factor17', 'factor18', 'factor19', 'factor20', 'factor21', 'factor22', 'factor23', 'factor24','factor25', 'factor26', 'factor27', 'factor28', 'factor29', 'factor30', 'factor31', 'factor32', 'factor33', 'factor34', 'factor35', 'factor36','factor37', 'factor38', 'factor39', 'factor40',
+        #  'factor41', 'factor42', 'factor43', 'factor44']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
+        X = df[['factor1', 'factor2', 'factor3', 'factor4', 'factor5', 'factor6', 'factor7', 'factor8', 'factor9',
+                'factor10', 'factor11', 'factor12', 'factor13', 'factor14', 'factor15', 'factor16', 'factor17',
+                'factor18', 'factor19', 'factor20', 'factor21', 'factor22', 'factor23', 'factor24', 'factor25',
+                'factor26', 'factor27', 'factor28', 'factor29', 'factor30', 'factor31', 'factor32', 'factor33',
+                'factor34', 'factor35', 'factor36', 'factor37', 'factor38', 'factor39', 'factor40',
+                'factor41', 'factor42', 'factor43', 'factor44']]
+        le_y = model_data['label_encoder_y']
+        le_X = model_data['label_encoder_X']
+        X = X.apply(le_X.transform)
+
+        loaded_lda = model_data['model']
+        y_pred = loaded_lda.predict(X)
+        new_y_pred = le_y.inverse_transform(y_pred)
+        # new_y_pred = le_y.inverse_transform(y_pred).tolist()
+        data['dataSet']['finalGrade'] = new_y_pred[0]
+
+        return data
+    except Exception as e:
+        raise Exception(f"Failed to make Scoring: {str(e)}")
+
+@app.route('/biz/score/marking', methods=['POST'])
+def Scoring_endpoint():
+    try:
+        # Get input parameters from the request
+        data = request.json
+        model_path = data.get('modelPath')
+        # Load the pre-trained model and vectorizer
+        model_data  = joblib.load(model_path+'model_and_encoders.joblib')
+        # Make recommendations
+        result_data = make_Scoring(model_data, data)
+        return jsonify({'code': 200, 'msg': '成功', 'data': result_data})
+
+    except Exception as e:
+        return jsonify({'code': 500, 'msg': str(e)})
+# def train_and_save_model(data, model_path):
+#     print(data)
+#     try:
+#         df = pd.DataFrame(data['dataSet'])
+#         X = df.iloc[:, 2:-1]
+#         # y = df.iloc[:, -1]
+#         y = data.iloc[:,-1]
+#         le_X = LabelEncoder()
+#         X = X.apply(le_X.fit_transform)
+#         le_y = LabelEncoder()
+#         y = le_y.fit_transform(y)
+#         lda = LinearDiscriminantAnalysis()
+#         lda.fit(X, y)
+#         model_data = {
+#             'model': lda,
+#             'label_encoder_y': le_y,
+#             'label_encoder_X': le_X
+#         }
+#         joblib.dump(model_data, model_path+'model_and_encoders.joblib')
+#         return {'code': 200, 'msg': '成功'}
+#     except Exception as e:
+#         return {'code': 500, 'msg': str(e)}
+#
+# @app.route('/biz/score/train', methods=['POST'])
+# def train_model():
+#     try:
+#         data = request.json
+#         model_path = data.get('modelPath')
+#         result = train_and_save_model(data, model_path)
+#         return jsonify(result)
+#     except Exception as e:
+#         return jsonify({'code': 500, 'msg': str(e)})
+def train_and_save_model(data, model_path):
+    try:
+        df = pd.DataFrame(data['dataSet'])
+        # X = df[['factor1', 'factor2', 'factor3', 'factor4', 'factor5', 'factor6', 'factor7', 'factor8', 'factor9', 'factor10', 'factor11', 'factor12','factor13', 'factor14', 'factor15', 'factor16','factor17', 'factor18', 'factor19', 'factor20', 'factor21', 'factor22', 'factor23', 'factor24','factor25', 'factor26', 'factor27', 'factor28', 'factor29', 'factor30', 'factor31', 'factor32', 'factor33', 'factor34', 'factor35', 'factor36','factor37', 'factor38', 'factor39', 'factor40',
+        #  'factor41', 'factor42', 'factor43', 'factor44']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
+
+
+        X = df[['factor1', 'factor2', 'factor3', 'factor4', 'factor5', 'factor6', 'factor7', 'factor8', 'factor9', 'factor10',
+        'factor11', 'factor12', 'factor13', 'factor14', 'factor15', 'factor16', 'factor17', 'factor18', 'factor19',
+        'factor20', 'factor21', 'factor22', 'factor23', 'factor24', 'factor25', 'factor26', 'factor27', 'factor28',
+        'factor29', 'factor30', 'factor31', 'factor32', 'factor33', 'factor34', 'factor35', 'factor36', 'factor37',
+        'factor38', 'factor39', 'factor40', 'factor41', 'factor42', 'factor43', 'factor44']]
+
+        le_X = LabelEncoder()
+
+        # 对X中的每一列应用LabelEncoder
+        # for column in X.columns:
+        #     X[column] = le_X.fit_transform(X[column])
+
+        X = X.apply(le_X.fit_transform)
+        y = df[['finalGrade']]
+        le_y = LabelEncoder()
+        y = le_y.fit_transform(y)
+        lda = LinearDiscriminantAnalysis()
+        lda.fit(X, y)
+        model_data = {
+            'model': lda,
+            'label_encoder_y': le_y,
+            'label_encoder_X': le_X
+        }
+        joblib.dump(model_data, model_path+'model_and_encoders.joblib')
+        return {'code': 200, 'msg': '成功'}
+    except Exception as e:
+        return {'code': 500, 'msg': str(e)}
+
+@app.route('/biz/score/train', methods=['POST'])
+def train_model():
+    try:
+        data = request.json
+        model_path = data.get('modelPath')
+        result = train_and_save_model(data, model_path)
+        return jsonify(result)
+    except Exception as e:
+        return jsonify({'code': 500, 'msg': str(e)})
+#验证算法API
+
+
+# 创建Flask应用
+# app = Flask(__name__)
+
+# 定义训练模型的函数
+# def train_model2(data):
+#     try:
+#         X = data.iloc[:, 2:-1]
+#         y = data.iloc[:, -1]
+#         le = LabelEncoder()
+#         X = X.apply(le.fit_transform)
+#         y = le.fit_transform(y)
+#         X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
+#         lda = LinearDiscriminantAnalysis()
+#         lda.fit(X_train, y_train)
+#         y_pred = lda.predict(X_test)
+#         y_pred_text = le.inverse_transform(y_pred)
+#         accuracy = accuracy_score(y_test, y_pred)
+#         recall = recall_score(y_test, y_pred, average='macro')
+#         return accuracy, recall, X_test,X_train,y_pred_text
+#     except Exception as e:
+#         raise Exception(f"Failed to make Scoring: {str(e)}")
+# # 定义路由和处理函数
+# @app.route('/biz/scoretaskdetail/verification', methods=['POST'])
+# def verification():
+#         try:
+#             # 从请求中获取JSON数据
+#             input_data = request.get_json()
+#             # 将JSON数据转换为DataFrame
+#             data = pd.DataFrame(input_data['dataSet'])
+#             # 训练模型
+#             accuracy, recall, X_test,X_train,y_pred_text= train_model2(data)
+#             data['trained'] = 0
+#             data['tested'] = 0
+#             data.loc[data.index.isin(X_test.index), 'tested'] = 1
+#             # 标记训练数据
+#             data.loc[data.index.isin(X_train.index), 'trained'] = 1
+#             # 预测结果
+#             data.loc[data.index.isin(X_test.index), 'calculatedGrade'] = y_pred_text
+#             output_data = {}
+#             output_data['code'] = 200
+#             output_data['msg'] = '成功'
+#             output_data['data'] = {}
+#             output_data['data']['accuracyRate'] = round(accuracy*100, 2)
+#             output_data['data']['recallRate'] = round(recall*100, 2)
+#             output_data['data']['dataSet'] = data.to_dict('records')
+#             # 返回JSON格式的响应
+#             return jsonify(output_data)
+#         except Exception as e:
+#             raise Exception(f"Failed to make Scoring: {str(e)}")
+
+# 定义训练模型的函数
+def train_model2(data):
+    try:
+
+        # X = data[['factor1', 'factor2', 'factor3', 'factor4', 'factor5', 'factor6', 'factor7', 'factor8', 'factor9', 'factor10', 'factor11', 'factor12','factor13', 'factor14', 'factor15', 'factor16','factor17', 'factor18', 'factor19', 'factor20', 'factor21', 'factor22', 'factor23', 'factor24','factor25', 'factor26', 'factor27', 'factor28', 'factor29', 'factor30', 'factor31', 'factor32', 'factor33', 'factor34', 'factor35', 'factor36','factor37', 'factor38', 'factor39', 'factor40',
+        #  'factor41', 'factor42', 'factor43', 'factor44']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
+        X = data[['factor1', 'factor2', 'factor3', 'factor4', 'factor5', 'factor6', 'factor7', 'factor8', 'factor9',
+                  'factor10', 'factor11', 'factor12', 'factor13', 'factor14', 'factor15', 'factor16', 'factor17',
+                  'factor18', 'factor19', 'factor20', 'factor21', 'factor22', 'factor23', 'factor24', 'factor25',
+                  'factor26', 'factor27', 'factor28', 'factor29', 'factor30', 'factor31', 'factor32', 'factor33',
+                  'factor34', 'factor35', 'factor36', 'factor37', 'factor38', 'factor39', 'factor40',
+                  'factor41', 'factor42', 'factor43', 'factor44']]
+        # data = data.dropna(subset=['finalGrade'])
+        # y = data['finalGrade'].values.ravel()  # 将 y 转换为一维数组
+        y = data[['finalGrade']]
+
+        le = LabelEncoder()
+        X = X.apply(le.fit_transform)
+        y = le.fit_transform(y)
+
+        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=40)
+        print("Number of samples in X_train:", len(X_train))
+        print("y_train:", y_train)
+        lda = LinearDiscriminantAnalysis()
+        lda.fit(X_train, y_train)
+        y_pred = lda.predict(X_test)
+        y_pred_text = le.inverse_transform(y_pred)
+        accuracy = accuracy_score(y_test, y_pred)
+        recall = recall_score(y_test, y_pred, average='macro')
+        return accuracy, recall, X_test,X_train,y_pred_text
+    except Exception as e:
+        raise Exception(f"Failed to make Scoring: {str(e)}")
+# 定义路由和处理函数
+@app.route('/biz/scoretaskdetail/verification', methods=['POST'])
+def verification():
+        try:
+            # 从请求中获取JSON数据
+            input_data = request.get_json()
+            # print(input_data)
+            # 将JSON数据转换为DataFrame
+            data = pd.DataFrame(input_data['dataSet'])
+            # 训练模型
+            accuracy, recall, X_test,X_train,y_pred_text= train_model2(data)
+            data['trained'] = 0
+            data['tested'] = 0
+            data.loc[data.index.isin(X_test.index), 'tested'] = 1
+            # 标记训练数据
+            data.loc[data.index.isin(X_train.index), 'trained'] = 1
+            # 预测结果
+            data.loc[data.index.isin(X_test.index), 'calculatedGrade'] = y_pred_text
+            output_data = {}
+            output_data['code'] = 200
+            output_data['msg'] = '成功'
+            output_data['data'] = {}
+            output_data['data']['accuracyRate'] = round(accuracy*100, 2)
+            output_data['data']['recallRate'] = round(recall*100, 2)
+            output_data['data']['dataSet'] = data.to_dict('records')
+            # 返回JSON格式的响应
+            return jsonify(output_data)
+        except Exception as e:
+            raise Exception(f"Failed to make Scoring: {str(e)}")
+# 运行应用
+if __name__ == '__main__':
+    app.run(debug=True, port=8082, host='0.0.0.0')