recommend.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. from flask import Flask, request, jsonify
  2. import pandas as pd
  3. import joblib
  4. from sklearn.model_selection import train_test_split
  5. from sklearn.ensemble import RandomForestClassifier
  6. import json
  7. from sklearn.pipeline import Pipeline
  8. from sklearn.feature_extraction.text import TfidfVectorizer
  9. app = Flask(__name__)
  10. app.config['JSON_AS_ASCII'] = False
  11. app.config['JSONIFY_MIMETYPE'] = "application/json;charset=utf-8" # 指定浏览器渲染的文件类型,和解码格式;
  12. def load_model(model_path):
  13. try:
  14. # Load the pre-trained model
  15. loaded_model = joblib.load(model_path)
  16. return loaded_model
  17. except Exception as e:
  18. raise Exception(f"Failed to load the model: {str(e)}")
  19. def make_recommendations(model, use_scene, search_condition):
  20. try:
  21. # Convert input data to the format used for training
  22. input_data = pd.DataFrame({'useScene': [use_scene], 'searchCondition': [search_condition]})
  23. X_input = input_data[['useScene', 'searchCondition']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
  24. # Predict recommendations using the trained model
  25. y_pred = model.predict(X_input)
  26. y_pred_list = y_pred[0].tolist()
  27. # Format the output data
  28. output_data = {
  29. 'useScene': use_scene,
  30. 'searchCondition': search_condition,
  31. 'result1Id': y_pred_list[0],
  32. 'result2Id': y_pred_list[1],
  33. 'result3Id': y_pred_list[2],
  34. 'result4Id': y_pred_list[3],
  35. 'result5Id': y_pred_list[4]
  36. }
  37. return output_data
  38. except Exception as e:
  39. raise Exception(f"Failed to make recommendations: {str(e)}")
  40. @app.route('/recommend', methods=['POST'])
  41. def recommend_endpoint():
  42. try:
  43. # Get input parameters from the request
  44. data = request.json
  45. model_path = data.get('modelPath')
  46. use_scene = data.get('useScene')
  47. search_condition = data.get('searchCondition')
  48. # Load the pre-trained model and vectorizer
  49. model = load_model(model_path+'model.pkl')
  50. # Make recommendations
  51. result_data = make_recommendations(model, use_scene, search_condition)
  52. return jsonify({'code': 200, 'msg': '成功', 'data': result_data})
  53. except Exception as e:
  54. return jsonify({'code': 500, 'msg': str(e)})
  55. #训练算法API--完成版
  56. def train_and_save_model(data, model_path):
  57. try:
  58. # Convert the JSON data to a DataFrame
  59. df = pd.DataFrame(data['dataSet'])
  60. pipeline = Pipeline([
  61. ('vectorizer', TfidfVectorizer()),
  62. ('classifier', RandomForestClassifier(n_estimators=400, random_state=42))
  63. ])
  64. # Extract features and labels
  65. X = df[['useScene', 'searchCondition']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
  66. y = df[['result1Id', 'result2Id', 'result3Id', 'result4Id', 'result5Id']]
  67. pipeline.fit(X, y)
  68. # Save the trained model to a file
  69. joblib.dump(pipeline, model_path+'model.pkl')
  70. return {'code': 200, 'msg': '成功'}
  71. except Exception as e:
  72. return {'code': 500, 'msg': str(e)}
  73. @app.route('/train', methods=['POST'])
  74. def train_model():
  75. try:
  76. # Get input parameters from the request
  77. data = request.json
  78. model_path = data.get('modelPath')
  79. # Call the function to train and save the model
  80. result = train_and_save_model(data, model_path)
  81. return jsonify(result)
  82. except Exception as e:
  83. return jsonify({'code': 500, 'msg': str(e)})
  84. #验证算法API--完成版2
  85. # 定义训练模型的函数
  86. def train_model2(data):
  87. train_data, test_data = train_test_split(data, test_size=0.3, random_state=48)
  88. X_train = train_data[['useScene', 'searchCondition']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
  89. y_train = train_data[['result1Id', 'result2Id', 'result3Id', 'result4Id', 'result5Id']]
  90. X_test = test_data[['useScene', 'searchCondition']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
  91. y_test = test_data[['result1Id', 'result2Id', 'result3Id', 'result4Id', 'result5Id']]
  92. train_data['trained'] = 1
  93. train_data['tested'] = 0
  94. # 使用CountVectorizer将文本转换为词袋模型
  95. vectorizer = TfidfVectorizer()
  96. X_train_matrix = vectorizer.fit_transform(X_train)
  97. X_test_matrix = vectorizer.transform(X_test)
  98. rf_classifier = RandomForestClassifier(n_estimators=400, random_state=42)
  99. rf_classifier.fit(X_train_matrix, y_train)
  100. return vectorizer, rf_classifier, test_data, y_test, train_data
  101. # 定义预测函数
  102. def predict(vectorizer, rf_classifier, test_data, y_test,train_data):
  103. X_test = test_data[['useScene', 'searchCondition']].apply(lambda x: ' '.join(x.astype(str)), axis=1)
  104. y_pred = rf_classifier.predict(vectorizer.transform(X_test))
  105. y_pred_df = pd.DataFrame(y_pred, columns=['calculate1Id', 'calculate2Id', 'calculate3Id', 'calculate4Id', 'calculate5Id'])
  106. y_pred_df.index = test_data.index
  107. output_df = pd.concat([test_data[['id','useScene','searchCondition','result1Id','result2Id','result3Id','result4Id','result5Id']], y_pred_df], axis=1)
  108. output_df['trained'] = 0 # 0 indicates the record is not in the training set
  109. output_df['tested'] = 1 # 1 indicates the record is in the testing set
  110. full_data = pd.concat([train_data, output_df]).sort_index().reset_index(drop=True)
  111. # 计算准确率和召回率
  112. TP_list = []
  113. TN_list = []
  114. FP_list = []
  115. FN_list = []
  116. for i in range(y_test.shape[0]):
  117. # Count true positives (TP), true negatives (TN), false positives (FP), and false negatives (FN) for each row
  118. y_test_row = y_test.iloc[i, :].astype(str)
  119. y_pred_row = y_pred_df.iloc[i, :].astype(str)
  120. TP = sum((y_test_row).isin(y_pred_row))
  121. TN = sum((y_test_row).isin(y_pred_row) == False)
  122. FP = sum((y_pred_row).isin(y_test_row) == False)
  123. FN = sum((y_test_row).isin(y_pred_row) == False)
  124. TP_list.append(TP)
  125. TN_list.append(TN)
  126. FP_list.append(FP)
  127. FN_list.append(FN)
  128. accuracy_list = [(TP + TN) / (TP + TN + FP + FN) if (TP + TN + FP + FN) != 0 else 0 for TP, TN, FP, FN in zip(TP_list, TN_list, FP_list, FN_list)]
  129. recall_list = [TP / (TP + FN) if (TP + FN) != 0 else 0 for TP, FN in zip(TP_list, FN_list)]
  130. # 输出JSON格式的结果
  131. output_data = {}
  132. output_data['code'] = 200
  133. output_data['msg'] = '成功'
  134. output_data['data'] = {}
  135. output_data['data']['accuracyRate'] = round((sum(accuracy_list) / len(accuracy_list))*100, 2)
  136. output_data['data']['recallRate'] = round((sum(recall_list) / len(recall_list))*100, 2)
  137. output_data['data']['dataSet'] = full_data.to_dict('records')
  138. return output_data
  139. # 定义路由和处理函数
  140. @app.route('/verification', methods=['POST'])
  141. def verification():
  142. # 从请求中获取JSON数据
  143. input_data = request.get_json()
  144. # 将JSON数据转换为DataFrame
  145. data = pd.DataFrame(input_data['dataSet'])
  146. # print(data)
  147. # 训练模型
  148. vectorizer, rf_classifier, test_data, y_test,train_data = train_model2(data)
  149. # 预测结果
  150. # print(train_data)
  151. output_data = predict(vectorizer, rf_classifier, test_data, y_test, train_data)
  152. # print(output_data)
  153. # 返回JSON格式的响应
  154. return jsonify(output_data)
  155. # 运行应用
  156. if __name__ == '__main__':
  157. app.run(debug=True, port=8081, host='0.0.0.0')