|
@@ -4,13 +4,16 @@ import pickle
|
|
|
import pandas as pd
|
|
|
from flask_sqlalchemy.session import Session
|
|
|
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
|
|
|
-from sklearn.metrics import r2_score
|
|
|
+from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
|
|
|
from sklearn.model_selection import train_test_split, cross_val_score
|
|
|
from sqlalchemy import text
|
|
|
from xgboost import XGBRegressor
|
|
|
+import logging
|
|
|
+import numpy as np
|
|
|
|
|
|
from .database_models import Models, Datasets
|
|
|
from .config import Config
|
|
|
+from .data_cleaner import clean_dataset
|
|
|
|
|
|
|
|
|
# 加载模型
|
|
@@ -31,8 +34,72 @@ def predict(session, input_data: pd.DataFrame, model_id):
|
|
|
predictions = ML_model.predict(input_data)
|
|
|
return predictions.tolist()
|
|
|
|
|
|
+def check_dataset_overlap_with_test(dataset_df, data_type):
|
|
|
+ """
|
|
|
+ 检查数据集是否与测试集有重叠
|
|
|
+
|
|
|
+ Args:
|
|
|
+ dataset_df (DataFrame): 要检查的数据集
|
|
|
+ data_type (str): 数据集类型 ('reflux' 或 'reduce')
|
|
|
+
|
|
|
+ Returns:
|
|
|
+ tuple: (重叠的行数, 重叠的行索引)
|
|
|
+ """
|
|
|
+ # 加载测试集
|
|
|
+ if data_type == 'reflux':
|
|
|
+ X_test = pd.read_csv('uploads/data/X_test_reflux.csv')
|
|
|
+ Y_test = pd.read_csv('uploads/data/Y_test_reflux.csv')
|
|
|
+ elif data_type == 'reduce':
|
|
|
+ X_test = pd.read_csv('uploads/data/X_test_reduce.csv')
|
|
|
+ Y_test = pd.read_csv('uploads/data/Y_test_reduce.csv')
|
|
|
+ else:
|
|
|
+ raise ValueError(f"不支持的数据类型: {data_type}")
|
|
|
+
|
|
|
+ # 合并X_test和Y_test
|
|
|
+ if data_type == 'reflux':
|
|
|
+ test_df = pd.concat([X_test, Y_test], axis=1)
|
|
|
+ else:
|
|
|
+ test_df = pd.concat([X_test, Y_test], axis=1)
|
|
|
+
|
|
|
+ # 确定用于比较的列
|
|
|
+ compare_columns = [col for col in dataset_df.columns if col in test_df.columns]
|
|
|
+
|
|
|
+ if not compare_columns:
|
|
|
+ return 0, []
|
|
|
+
|
|
|
+ # 查找重叠的行
|
|
|
+ merged = dataset_df[compare_columns].merge(test_df[compare_columns], how='inner', indicator=True)
|
|
|
+ overlapping_rows = merged[merged['_merge'] == 'both']
|
|
|
+
|
|
|
+ # 获取重叠行在原始数据集中的索引
|
|
|
+ if not overlapping_rows.empty:
|
|
|
+ # 使用合并后的数据找回原始索引
|
|
|
+ overlap_indices = []
|
|
|
+ for _, row in overlapping_rows.iterrows():
|
|
|
+ # 创建一个布尔掩码,用于在原始数据集中查找匹配的行
|
|
|
+ mask = True
|
|
|
+ for col in compare_columns:
|
|
|
+ mask = mask & (dataset_df[col] == row[col])
|
|
|
+
|
|
|
+ # 获取匹配行的索引
|
|
|
+ matching_indices = dataset_df[mask].index.tolist()
|
|
|
+ overlap_indices.extend(matching_indices)
|
|
|
+
|
|
|
+ return len(set(overlap_indices)), list(set(overlap_indices))
|
|
|
+
|
|
|
+ return 0, []
|
|
|
+
|
|
|
# 计算模型评分
|
|
|
def calculate_model_score(model_info):
|
|
|
+ """
|
|
|
+ 计算模型评分
|
|
|
+
|
|
|
+ Args:
|
|
|
+ model_info: 模型信息对象
|
|
|
+
|
|
|
+ Returns:
|
|
|
+ dict: 包含多种评分指标的字典
|
|
|
+ """
|
|
|
# 加载模型
|
|
|
with open(model_info.ModelFilePath, 'rb') as f:
|
|
|
ML_model = pickle.load(f)
|
|
@@ -42,22 +109,55 @@ def calculate_model_score(model_info):
|
|
|
# 加载保存的 X_test 和 Y_test
|
|
|
X_test = pd.read_csv('uploads/data/X_test_reflux.csv')
|
|
|
Y_test = pd.read_csv('uploads/data/Y_test_reflux.csv')
|
|
|
- print(X_test.columns) # 在测试时使用的数据的列名
|
|
|
+
|
|
|
+ # 预测测试集
|
|
|
y_pred = ML_model.predict(X_test)
|
|
|
+
|
|
|
+ # 计算各种评分指标
|
|
|
+ r2 = r2_score(Y_test, y_pred)
|
|
|
+ mae = mean_absolute_error(Y_test, y_pred)
|
|
|
+ rmse = np.sqrt(mean_squared_error(Y_test, y_pred))
|
|
|
+
|
|
|
elif model_info.Data_type == 'reduce': # 降酸数据集
|
|
|
# 加载保存的 X_test 和 Y_test
|
|
|
X_test = pd.read_csv('uploads/data/X_test_reduce.csv')
|
|
|
Y_test = pd.read_csv('uploads/data/Y_test_reduce.csv')
|
|
|
- print(X_test.columns) # 在测试时使用的数据的列名
|
|
|
+
|
|
|
+ # 预测测试集
|
|
|
y_pred = ML_model.predict(X_test)
|
|
|
-
|
|
|
-
|
|
|
- # 计算 R² 分数
|
|
|
- r2 = r2_score(Y_test, y_pred)
|
|
|
- return r2
|
|
|
+
|
|
|
+ # 计算各种评分指标
|
|
|
+ r2 = r2_score(Y_test, y_pred)
|
|
|
+ mae = mean_absolute_error(Y_test, y_pred)
|
|
|
+ rmse = np.sqrt(mean_squared_error(Y_test, y_pred))
|
|
|
+
|
|
|
+ else:
|
|
|
+ # 不支持的数据类型
|
|
|
+ return {'r2': 0, 'mae': 0, 'rmse': 0}
|
|
|
+
|
|
|
+ # 返回所有评分指标(不包括交叉验证得分)
|
|
|
+ return {
|
|
|
+ 'r2': float(r2),
|
|
|
+ 'mae': float(mae),
|
|
|
+ 'rmse': float(rmse)
|
|
|
+ }
|
|
|
|
|
|
|
|
|
def train_and_save_model(session, model_type, model_name, model_description, data_type, dataset_id=None):
|
|
|
+ """
|
|
|
+ 训练并保存模型
|
|
|
+
|
|
|
+ Args:
|
|
|
+ session: 数据库会话
|
|
|
+ model_type: 模型类型
|
|
|
+ model_name: 模型名称
|
|
|
+ model_description: 模型描述
|
|
|
+ data_type: 数据类型 ('reflux' 或 'reduce')
|
|
|
+ dataset_id: 数据集ID
|
|
|
+
|
|
|
+ Returns:
|
|
|
+ tuple: (模型名称, 模型ID, 数据集ID)
|
|
|
+ """
|
|
|
try:
|
|
|
if not dataset_id:
|
|
|
# 创建新的数据集并复制数据,此过程将不立即提交
|
|
@@ -79,26 +179,45 @@ def train_and_save_model(session, model_type, model_name, model_description, dat
|
|
|
if dataset.empty:
|
|
|
raise ValueError(f"Dataset {dataset_id} is empty or not found.")
|
|
|
|
|
|
+ # 使用数据清理模块
|
|
|
if data_type == 'reflux':
|
|
|
X = dataset.iloc[:, 1:-1]
|
|
|
y = dataset.iloc[:, -1]
|
|
|
+
|
|
|
+ # target_column = -1 # 假设目标变量在最后一列
|
|
|
+ # X, y, clean_stats = clean_dataset(dataset, target_column=target_column)
|
|
|
elif data_type == 'reduce':
|
|
|
X = dataset.iloc[:, 2:]
|
|
|
y = dataset.iloc[:, 1]
|
|
|
-
|
|
|
+ # target_column = 1 # 假设目标变量在第二列
|
|
|
+ # X, y, clean_stats = clean_dataset(dataset, target_column=target_column)
|
|
|
+
|
|
|
+ # 记录清理统计信息
|
|
|
+ # logging.info(f"数据清理统计: {clean_stats}")
|
|
|
+
|
|
|
# 训练模型
|
|
|
model = train_model_by_type(X, y, model_type)
|
|
|
-
|
|
|
+
|
|
|
+ # 计算交叉验证得分
|
|
|
+ cv_score = cross_val_score(model, X, y, cv=5).mean()
|
|
|
+
|
|
|
# 保存模型到数据库
|
|
|
model_id = save_model(session, model, model_name, model_type, model_description, dataset_id, data_type)
|
|
|
-
|
|
|
+
|
|
|
+ # 更新模型的交叉验证得分
|
|
|
+ model_info = session.query(Models).filter(Models.ModelID == model_id).first()
|
|
|
+ if model_info:
|
|
|
+ model_info.CV_score = float(cv_score)
|
|
|
+ session.commit()
|
|
|
+
|
|
|
# 所有操作成功后,手动提交事务
|
|
|
session.commit()
|
|
|
- return model_name, model_id, dataset_id
|
|
|
+ return model_name, model_id, dataset_id, cv_score
|
|
|
+
|
|
|
except Exception as e:
|
|
|
- # 如果在任何阶段出现异常,回滚事务
|
|
|
session.rollback()
|
|
|
- raise e # 可选择重新抛出异常或处理异常
|
|
|
+ logging.error(f"训练和保存模型时发生错误: {str(e)}", exc_info=True)
|
|
|
+ raise
|
|
|
|
|
|
|
|
|
|
|
@@ -152,8 +271,11 @@ def data_type_table_mapping(data_type):
|
|
|
|
|
|
def train_model_by_type(X, y, model_type):
|
|
|
# 划分数据集
|
|
|
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
|
|
-
|
|
|
+ # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
|
|
+
|
|
|
+ # 使用全部数据作为训练集
|
|
|
+ X_train, y_train = X, y
|
|
|
+
|
|
|
if model_type == 'RandomForest':
|
|
|
# 随机森林的参数优化
|
|
|
return train_random_forest(X_train, y_train)
|