import sqlite3 from flask import Blueprint, request, jsonify,current_app from werkzeug.security import generate_password_hash from .model import predict, train_and_save_model, calculate_model_score import pandas as pd from . import db # 从 app 包导入 db 实例 from sqlalchemy.engine.reflection import Inspector from .database_models import Models, ModelParameters, Datasets, CurrentReduce, CurrentReflux import os from .utils import create_dynamic_table, allowed_file, infer_column_types, rename_columns_for_model_predict, \ clean_column_names, rename_columns_for_model, insert_data_into_dynamic_table, insert_data_into_existing_table, \ predict_to_Q, Q_to_t_ha, create_kriging from sqlalchemy.orm import sessionmaker import logging from sqlalchemy import text, func from .tasks import train_model_task from datetime import datetime # 配置日志 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) # 创建蓝图 (Blueprint),用于分离路由 bp = Blueprint('routes', __name__) # 封装数据库连接函数 def get_db_connection(): return sqlite3.connect('software_intro.db') # 密码加密 def hash_password(password): return generate_password_hash(password) def get_db(): """ 获取数据库连接 """ return sqlite3.connect(current_app.config['DATABASE']) # 添加一个新的辅助函数来检查数据集大小并触发训练 def check_and_trigger_training(session, dataset_type, dataset_df): """ 检查当前数据集大小是否跨越新的阈值点并触发训练 Args: session: 数据库会话 dataset_type: 数据集类型 ('reduce' 或 'reflux') dataset_df: 数据集 DataFrame Returns: tuple: (是否触发训练, 任务ID) """ try: # 根据数据集类型选择表 table = CurrentReduce if dataset_type == 'reduce' else CurrentReflux # 获取当前记录数 current_count = session.query(func.count()).select_from(table).scalar() # 获取新增的记录数(从request.files中获取的DataFrame长度) new_records = len(dataset_df) # 需要从上层函数传入 # 计算新增数据前的记录数 previous_count = current_count - new_records # 设置阈值 THRESHOLD = current_app.config['THRESHOLD'] # 计算上一个阈值点(基于新增前的数据量) last_threshold = previous_count // THRESHOLD * THRESHOLD # 计算当前所在阈值点 current_threshold = current_count // THRESHOLD * THRESHOLD # 检查是否跨越了新的阈值点 if current_threshold > last_threshold and current_count >= THRESHOLD: # 触发异步训练任务 task = train_model_task.delay( model_type=current_app.config['DEFAULT_MODEL_TYPE'], model_name=f'auto_trained_{dataset_type}_{current_threshold}', model_description=f'Auto trained model at {current_threshold} records threshold', data_type=dataset_type ) return True, task.id return False, None except Exception as e: logging.error(f"检查并触发训练失败: {str(e)}") return False, None @bp.route('/upload-dataset', methods=['POST']) def upload_dataset(): # 创建 session Session = sessionmaker(bind=db.engine) session = Session() try: if 'file' not in request.files: return jsonify({'error': 'No file part'}), 400 file = request.files['file'] if file.filename == '' or not allowed_file(file.filename): return jsonify({'error': 'No selected file or invalid file type'}), 400 dataset_name = request.form.get('dataset_name') dataset_description = request.form.get('dataset_description', 'No description provided') dataset_type = request.form.get('dataset_type') if not dataset_type: return jsonify({'error': 'Dataset type is required'}), 400 new_dataset = Datasets( Dataset_name=dataset_name, Dataset_description=dataset_description, Row_count=0, Status='Datasets_upgraded', Dataset_type=dataset_type, Uploaded_at=datetime.now() ) session.add(new_dataset) session.commit() unique_filename = f"dataset_{new_dataset.Dataset_ID}.xlsx" upload_folder = current_app.config['UPLOAD_FOLDER'] file_path = os.path.join(upload_folder, unique_filename) file.save(file_path) dataset_df = pd.read_excel(file_path) new_dataset.Row_count = len(dataset_df) new_dataset.Status = 'excel_file_saved success' session.commit() # 处理列名 dataset_df = clean_column_names(dataset_df) dataset_df = rename_columns_for_model(dataset_df, dataset_type) column_types = infer_column_types(dataset_df) dynamic_table_class = create_dynamic_table(new_dataset.Dataset_ID, column_types) insert_data_into_dynamic_table(session, dataset_df, dynamic_table_class) # 根据 dataset_type 决定插入到哪个已有表 if dataset_type == 'reduce': insert_data_into_existing_table(session, dataset_df, CurrentReduce) elif dataset_type == 'reflux': insert_data_into_existing_table(session, dataset_df, CurrentReflux) session.commit() # 在完成数据插入后,检查是否需要触发训练 training_triggered, task_id = check_and_trigger_training(session, dataset_type, dataset_df) response_data = { 'message': f'Dataset {dataset_name} uploaded successfully!', 'dataset_id': new_dataset.Dataset_ID, 'filename': unique_filename, 'training_triggered': training_triggered } if training_triggered: response_data['task_id'] = task_id response_data['message'] += ' Auto-training has been triggered.' return jsonify(response_data), 201 except Exception as e: session.rollback() logging.error('Failed to process the dataset upload:', exc_info=True) return jsonify({'error': str(e)}), 500 finally: # 确保 session 总是被关闭 if session: session.close() @bp.route('/train-and-save-model', methods=['POST']) def train_and_save_model_endpoint(): # 创建 sessionmaker 实例 Session = sessionmaker(bind=db.engine) session = Session() data = request.get_json() # 从请求中解析参数 model_type = data.get('model_type') model_name = data.get('model_name') model_description = data.get('model_description') data_type = data.get('data_type') dataset_id = data.get('dataset_id', None) # 默认为 None,如果未提供 try: # 调用训练和保存模型的函数 result = train_and_save_model(session, model_type, model_name, model_description, data_type, dataset_id) model_id = result[1] if result else None # 计算模型评分 if model_id: model_info = session.query(Models).filter(Models.ModelID == model_id).first() if model_info: score = calculate_model_score(model_info) # 更新模型评分 model_info.Performance_score = score session.commit() result = {'model_id': model_id, 'model_score': score} # 返回成功响应 return jsonify({ 'message': 'Model trained and saved successfully', 'result': result }), 200 except Exception as e: session.rollback() logging.error('Failed to process the model training:', exc_info=True) return jsonify({ 'error': 'Failed to train and save model', 'message': str(e) }), 500 finally: session.close() @bp.route('/predict', methods=['POST']) def predict_route(): # 创建 sessionmaker 实例 Session = sessionmaker(bind=db.engine) session = Session() try: data = request.get_json() model_id = data.get('model_id') # 提取模型名称 parameters = data.get('parameters', {}) # 提取所有变量 # 根据model_id获取模型Data_type model_info = session.query(Models).filter(Models.ModelID == model_id).first() if not model_info: return jsonify({'error': 'Model not found'}), 404 data_type = model_info.Data_type input_data = pd.DataFrame([parameters]) # 转换参数为DataFrame # 如果为reduce,则不需要传入target_ph if data_type == 'reduce': # 获取传入的init_ph、target_ph参数 init_ph = float(parameters.get('init_pH', 0.0)) # 默认值为0.0,防止None导致错误 target_ph = float(parameters.get('target_pH', 0.0)) # 默认值为0.0,防止None导致错误 # 从输入数据中删除'target_pH'列 input_data = input_data.drop('target_pH', axis=1, errors='ignore') # 使用errors='ignore'防止列不存在时出错 input_data_rename = rename_columns_for_model_predict(input_data, data_type) # 重命名列名以匹配模型字段 predictions = predict(session, input_data_rename, model_id) # 调用预测函数 if data_type == 'reduce': predictions = predictions[0] # 将预测结果转换为Q Q = predict_to_Q(predictions, init_ph, target_ph) predictions = Q_to_t_ha(Q) # 将Q转换为t/ha print(predictions) return jsonify({'result': predictions}), 200 except Exception as e: logging.error('Failed to predict:', exc_info=True) return jsonify({'error': str(e)}), 400 # 为指定模型计算评分Performance_score,需要提供model_id @bp.route('/score-model/', methods=['POST']) def score_model(model_id): # 创建 sessionmaker 实例 Session = sessionmaker(bind=db.engine) session = Session() try: model_info = session.query(Models).filter(Models.ModelID == model_id).first() if not model_info: return jsonify({'error': 'Model not found'}), 404 # 计算模型评分 score = calculate_model_score(model_info) # 更新模型记录中的评分 model_info.Performance_score = score session.commit() return jsonify({'message': 'Model scored successfully', 'score': score}), 200 except Exception as e: logging.error('Failed to process the dataset upload:', exc_info=True) return jsonify({'error': str(e)}), 400 finally: session.close() @bp.route('/delete-dataset/', methods=['DELETE']) def delete_dataset_endpoint(dataset_id): """ 删除数据集的API接口 @param dataset_id: 要删除的数据集ID @return: JSON响应 """ # 创建 sessionmaker 实例 Session = sessionmaker(bind=db.engine) session = Session() try: # 查询数据集 dataset = session.query(Datasets).filter_by(Dataset_ID=dataset_id).first() if not dataset: return jsonify({'error': '未找到数据集'}), 404 # 检查是否有模型使用了该数据集 models_using_dataset = session.query(Models).filter_by(DatasetID=dataset_id).all() if models_using_dataset: models_info = [{'ModelID': model.ModelID, 'Model_name': model.Model_name} for model in models_using_dataset] return jsonify({ 'error': '无法删除数据集,因为以下模型正在使用它', 'models': models_info }), 400 # 删除Excel文件 filename = f"dataset_{dataset.Dataset_ID}.xlsx" file_path = os.path.join(current_app.config['UPLOAD_FOLDER'], filename) if os.path.exists(file_path): try: os.remove(file_path) except OSError as e: logger.error(f'删除文件失败: {str(e)}') return jsonify({'error': f'删除文件失败: {str(e)}'}), 500 # 删除数据表 table_name = f"dataset_{dataset.Dataset_ID}" session.execute(text(f"DROP TABLE IF EXISTS {table_name}")) # 删除数据集记录 session.delete(dataset) session.commit() return jsonify({ 'message': '数据集删除成功', 'deleted_files': [filename] }), 200 except Exception as e: session.rollback() logger.error(f'删除数据集 {dataset_id} 失败:', exc_info=True) return jsonify({'error': str(e)}), 500 finally: session.close() @bp.route('/tables', methods=['GET']) def list_tables(): engine = db.engine # 使用 db 实例的 engine inspector = Inspector.from_engine(engine) # 创建 Inspector 对象 table_names = inspector.get_table_names() # 获取所有表名 return jsonify(table_names) # 以 JSON 形式返回表名列表 @bp.route('/models/', methods=['GET']) def get_model(model_id): """ 获取单个模型信息的API接口 @param model_id: 模型ID @return: JSON响应 """ Session = sessionmaker(bind=db.engine) session = Session() try: model = session.query(Models).filter_by(ModelID=model_id).first() if model: return jsonify({ 'ModelID': model.ModelID, 'Model_name': model.Model_name, 'Model_type': model.Model_type, 'Created_at': model.Created_at.strftime('%Y-%m-%d %H:%M:%S'), 'Description': model.Description, 'Performance_score': float(model.Performance_score) if model.Performance_score else None, 'Data_type': model.Data_type }) else: return jsonify({'message': '未找到模型'}), 404 except Exception as e: logger.error(f'获取模型信息失败: {str(e)}') return jsonify({'error': '服务器内部错误', 'message': str(e)}), 500 finally: session.close() @bp.route('/model-parameters', methods=['GET']) def get_all_model_parameters(): """ 获取所有模型参数的API接口 @return: JSON响应 """ Session = sessionmaker(bind=db.engine) session = Session() try: parameters = session.query(ModelParameters).all() if parameters: result = [ { 'ParamID': param.ParamID, 'ModelID': param.ModelID, 'ParamName': param.ParamName, 'ParamValue': param.ParamValue } for param in parameters ] return jsonify(result) else: return jsonify({'message': '未找到任何参数'}), 404 except Exception as e: logger.error(f'获取所有模型参数失败: {str(e)}') return jsonify({'error': '服务器内部错误', 'message': str(e)}), 500 finally: session.close() @bp.route('/models//parameters', methods=['GET']) def get_model_parameters(model_id): try: model = Models.query.filter_by(ModelID=model_id).first() if model: # 获取该模型的所有参数 parameters = [ { 'ParamID': param.ParamID, 'ParamName': param.ParamName, 'ParamValue': param.ParamValue } for param in model.parameters ] # 返回模型参数信息 return jsonify({ 'ModelID': model.ModelID, 'ModelName': model.ModelName, 'ModelType': model.ModelType, 'CreatedAt': model.CreatedAt.strftime('%Y-%m-%d %H:%M:%S'), 'Description': model.Description, 'Parameters': parameters }) else: return jsonify({'message': 'Model not found'}), 404 except Exception as e: return jsonify({'error': 'Internal server error', 'message': str(e)}), 500 @bp.route('/train-model-async', methods=['POST']) def train_model_async(): """ 异步训练模型的API接口 """ try: data = request.get_json() # 从请求中获取参数 model_type = data.get('model_type') model_name = data.get('model_name') model_description = data.get('model_description') data_type = data.get('data_type') dataset_id = data.get('dataset_id', None) # 验证必要参数 if not all([model_type, model_name, data_type]): return jsonify({ 'error': 'Missing required parameters' }), 400 # 如果提供了dataset_id,验证数据集是否存在 if dataset_id: Session = sessionmaker(bind=db.engine) session = Session() try: dataset = session.query(Datasets).filter_by(Dataset_ID=dataset_id).first() if not dataset: return jsonify({ 'error': f'Dataset with ID {dataset_id} not found' }), 404 finally: session.close() # 启动异步任务 task = train_model_task.delay( model_type=model_type, model_name=model_name, model_description=model_description, data_type=data_type, dataset_id=dataset_id ) # 返回任务ID return jsonify({ 'task_id': task.id, 'message': 'Model training started' }), 202 except Exception as e: logging.error('Failed to start async training task:', exc_info=True) return jsonify({ 'error': str(e) }), 500 @bp.route('/task-status/', methods=['GET']) def get_task_status(task_id): """ 获取异步任务状态的API接口 """ try: task = train_model_task.AsyncResult(task_id) if task.state == 'PENDING': response = { 'state': task.state, 'status': 'Task is waiting for execution' } elif task.state == 'FAILURE': response = { 'state': task.state, 'status': 'Task failed', 'error': task.info.get('error') if isinstance(task.info, dict) else str(task.info) } elif task.state == 'SUCCESS': response = { 'state': task.state, 'status': 'Task completed successfully', 'result': task.get() } else: response = { 'state': task.state, 'status': 'Task is in progress' } return jsonify(response), 200 except Exception as e: return jsonify({ 'error': str(e) }), 500 @bp.route('/delete-model/', methods=['DELETE']) def delete_model_route(model_id): # 将URL参数转换为布尔值 delete_dataset_param = request.args.get('delete_dataset', 'False').lower() == 'true' # 调用原始函数 return delete_model(model_id, delete_dataset=delete_dataset_param) def delete_model(model_id, delete_dataset=False): """ 删除指定模型的API接口 @param model_id: 要删除的模型ID @query_param delete_dataset: 布尔值,是否同时删除关联的数据集,默认为False @return: JSON响应 """ Session = sessionmaker(bind=db.engine) session = Session() try: # 查询模型信息 model = session.query(Models).filter_by(ModelID=model_id).first() if not model: return jsonify({'error': '未找到指定模型'}), 404 dataset_id = model.DatasetID # 1. 先删除模型记录 session.delete(model) session.commit() # 2. 删除模型文件 model_file = f"rf_model_{model_id}.pkl" model_path = os.path.join(current_app.config['MODEL_SAVE_PATH'], model_file) if os.path.exists(model_path): try: os.remove(model_path) except OSError as e: # 如果删除文件失败,回滚数据库操作 session.rollback() logger.error(f'删除模型文件失败: {str(e)}') return jsonify({'error': f'删除模型文件失败: {str(e)}'}), 500 # 3. 如果需要删除关联的数据集 if delete_dataset and dataset_id: try: dataset_response = delete_dataset_endpoint(dataset_id) if not isinstance(dataset_response, tuple) or dataset_response[1] != 200: # 如果删除数据集失败,回滚之前的操作 session.rollback() return jsonify({ 'error': '删除关联数据集失败', 'dataset_error': dataset_response[0].get_json() if hasattr(dataset_response[0], 'get_json') else str(dataset_response[0]) }), 500 except Exception as e: session.rollback() logger.error(f'删除关联数据集失败: {str(e)}') return jsonify({'error': f'删除关联数据集失败: {str(e)}'}), 500 response_data = { 'message': '模型删除成功', 'deleted_files': [model_file] } if delete_dataset: response_data['dataset_info'] = { 'dataset_id': dataset_id, 'message': '关联数据集已删除' } return jsonify(response_data), 200 except Exception as e: session.rollback() logger.error(f'删除模型 {model_id} 失败:', exc_info=True) return jsonify({'error': str(e)}), 500 finally: session.close() # 添加一个新的API端点来清空指定数据集 @bp.route('/clear-dataset/', methods=['DELETE']) def clear_dataset(data_type): """ 清空指定类型的数据集并递增计数 @param data_type: 数据集类型 ('reduce' 或 'reflux') @return: JSON响应 """ # 创建 sessionmaker 实例 Session = sessionmaker(bind=db.engine) session = Session() try: # 根据数据集类型选择表 if data_type == 'reduce': table = CurrentReduce table_name = 'current_reduce' elif data_type == 'reflux': table = CurrentReflux table_name = 'current_reflux' else: return jsonify({'error': '无效的数据集类型'}), 400 # 清空表内容 session.query(table).delete() # 重置自增主键计数器 session.execute(text(f"DELETE FROM sqlite_sequence WHERE name='{table_name}'")) session.commit() return jsonify({'message': f'{data_type} 数据集已清空并重置计数器'}), 200 except Exception as e: session.rollback() return jsonify({'error': str(e)}), 500 finally: session.close() @bp.route('/update-threshold', methods=['POST']) def update_threshold(): """ 更新训练阈值的API接口 @body_param threshold: 新的阈值值(整数) @return: JSON响应 """ try: data = request.get_json() new_threshold = data.get('threshold') # 验证新阈值 if not isinstance(new_threshold, (int, float)) or new_threshold <= 0: return jsonify({ 'error': '无效的阈值值,必须为正数' }), 400 # 更新当前应用的阈值配置 current_app.config['THRESHOLD'] = int(new_threshold) return jsonify({ 'success': True, 'message': f'阈值已更新为 {new_threshold}', 'new_threshold': new_threshold }) except Exception as e: logging.error(f"更新阈值失败: {str(e)}") return jsonify({ 'error': f'更新阈值失败: {str(e)}' }), 500 @bp.route('/get-threshold', methods=['GET']) def get_threshold(): """ 获取当前训练阈值的API接口 @return: JSON响应 """ try: current_threshold = current_app.config['THRESHOLD'] default_threshold = current_app.config['DEFAULT_THRESHOLD'] return jsonify({ 'current_threshold': current_threshold, 'default_threshold': default_threshold }) except Exception as e: logging.error(f"获取阈值失败: {str(e)}") return jsonify({ 'error': f'获取阈值失败: {str(e)}' }), 500 @bp.route('/set-current-dataset//', methods=['POST']) def set_current_dataset(data_type, dataset_id): """ 将指定数据集设置为current数据集 @param data_type: 数据集类型 ('reduce' 或 'reflux') @param dataset_id: 要设置为current的数据集ID @return: JSON响应 """ Session = sessionmaker(bind=db.engine) session = Session() try: # 验证数据集存在且类型匹配 dataset = session.query(Datasets)\ .filter_by(Dataset_ID=dataset_id, Dataset_type=data_type)\ .first() if not dataset: return jsonify({ 'error': f'未找到ID为 {dataset_id} 且类型为 {data_type} 的数据集' }), 404 # 根据数据类型选择表 if data_type == 'reduce': table = CurrentReduce table_name = 'current_reduce' elif data_type == 'reflux': table = CurrentReflux table_name = 'current_reflux' else: return jsonify({'error': '无效的数据集类型'}), 400 # 清空current表 session.query(table).delete() # 重置自增主键计数器 session.execute(text(f"DELETE FROM sqlite_sequence WHERE name='{table_name}'")) # 从指定数据集复制数据到current表 dataset_table_name = f"dataset_{dataset_id}" copy_sql = text(f"INSERT INTO {table_name} SELECT * FROM {dataset_table_name}") session.execute(copy_sql) session.commit() return jsonify({ 'message': f'{data_type} current数据集已设置为数据集 ID: {dataset_id}', 'dataset_id': dataset_id, 'dataset_name': dataset.Dataset_name, 'row_count': dataset.Row_count }), 200 except Exception as e: session.rollback() logger.error(f'设置current数据集失败: {str(e)}') return jsonify({'error': str(e)}), 500 finally: session.close() @bp.route('/get-model-history/', methods=['GET']) def get_model_history(data_type): """ 获取模型训练历史数据的API接口 @param data_type: 数据集类型 ('reduce' 或 'reflux') @return: JSON响应,包含时间序列的模型性能数据 """ Session = sessionmaker(bind=db.engine) session = Session() try: # 查询所有自动生成的数据集,按时间排序 datasets = session.query(Datasets).filter( Datasets.Dataset_type == data_type, Datasets.Dataset_description == f"Automatically generated dataset for type {data_type}" ).order_by(Datasets.Uploaded_at).all() history_data = [] for dataset in datasets: # 查找对应的自动训练模型 model = session.query(Models).filter( Models.DatasetID == dataset.Dataset_ID, Models.Model_name.like(f'auto_trained_{data_type}_%') ).first() if model and model.Performance_score is not None: # 直接使用数据库中的时间,不进行格式化(保持与created_at相同的时区) created_at = model.Created_at.isoformat() if model.Created_at else None history_data.append({ 'dataset_id': dataset.Dataset_ID, 'row_count': dataset.Row_count, 'model_id': model.ModelID, 'model_name': model.Model_name, 'performance_score': float(model.Performance_score), 'timestamp': created_at }) # 按时间戳排序 history_data.sort(key=lambda x: x['timestamp'] if x['timestamp'] else '') # 构建返回数据,分离各个指标序列便于前端绘图 response_data = { 'data_type': data_type, 'timestamps': [item['timestamp'] for item in history_data], 'row_counts': [item['row_count'] for item in history_data], 'performance_scores': [item['performance_score'] for item in history_data], 'model_details': history_data # 保留完整数据供前端使用 } return jsonify(response_data), 200 except Exception as e: logger.error(f'获取模型历史数据失败: {str(e)}', exc_info=True) return jsonify({'error': str(e)}), 500 finally: session.close() @bp.route('/batch-delete-datasets', methods=['POST']) def batch_delete_datasets(): """ 批量删除数据集的API接口 @body_param dataset_ids: 要删除的数据集ID列表 @return: JSON响应 """ try: data = request.get_json() dataset_ids = data.get('dataset_ids', []) if not dataset_ids: return jsonify({'error': '未提供数据集ID列表'}), 400 results = { 'success': [], 'failed': [], 'protected': [] # 被模型使用的数据集 } for dataset_id in dataset_ids: try: # 调用单个删除接口 response = delete_dataset_endpoint(dataset_id) # 解析响应 if response[1] == 200: results['success'].append(dataset_id) elif response[1] == 400 and 'models' in response[0].json: # 数据集被模型保护 results['protected'].append({ 'id': dataset_id, 'models': response[0].json['models'] }) else: results['failed'].append({ 'id': dataset_id, 'reason': response[0].json.get('error', '删除失败') }) except Exception as e: logger.error(f'删除数据集 {dataset_id} 失败: {str(e)}') results['failed'].append({ 'id': dataset_id, 'reason': str(e) }) # 构建响应消息 message = f"成功删除 {len(results['success'])} 个数据集" if results['protected']: message += f", {len(results['protected'])} 个数据集被保护" if results['failed']: message += f", {len(results['failed'])} 个数据集删除失败" return jsonify({ 'message': message, 'results': results }), 200 except Exception as e: logger.error(f'批量删除数据集失败: {str(e)}') return jsonify({'error': str(e)}), 500 @bp.route('/batch-delete-models', methods=['POST']) def batch_delete_models(): """ 批量删除模型的API接口 @body_param model_ids: 要删除的模型ID列表 @query_param delete_datasets: 布尔值,是否同时删除关联的数据集,默认为False @return: JSON响应 """ try: data = request.get_json() model_ids = data.get('model_ids', []) delete_datasets = request.args.get('delete_datasets', 'false').lower() == 'true' if not model_ids: return jsonify({'error': '未提供模型ID列表'}), 400 results = { 'success': [], 'failed': [], 'datasets_deleted': [] # 如果delete_datasets为true,记录被删除的数据集 } for model_id in model_ids: try: # 调用单个删除接口 response = delete_model(model_id, delete_dataset=delete_datasets) # 解析响应 if response[1] == 200: results['success'].append(model_id) # 如果删除了关联数据集,记录数据集ID if 'dataset_info' in response[0].json: results['datasets_deleted'].append( response[0].json['dataset_info']['dataset_id'] ) else: results['failed'].append({ 'id': model_id, 'reason': response[0].json.get('error', '删除失败') }) except Exception as e: logger.error(f'删除模型 {model_id} 失败: {str(e)}') results['failed'].append({ 'id': model_id, 'reason': str(e) }) # 构建响应消息 message = f"成功删除 {len(results['success'])} 个模型" if results['datasets_deleted']: message += f", {len(results['datasets_deleted'])} 个关联数据集" if results['failed']: message += f", {len(results['failed'])} 个模型删除失败" return jsonify({ 'message': message, 'results': results }), 200 except Exception as e: logger.error(f'批量删除模型失败: {str(e)}') return jsonify({'error': str(e)}), 500 @bp.route('/kriging_interpolation', methods=['POST']) def kriging_interpolation(): try: data = request.get_json() required = ['file_name', 'emission_column', 'points'] if not all(k in data for k in required): return jsonify({"error": "Missing parameters"}), 400 # 添加坐标顺序验证 points = data['points'] if not all(len(pt) == 2 and isinstance(pt[0], (int, float)) for pt in points): return jsonify({"error": "Invalid points format"}), 400 result = create_kriging( data['file_name'], data['emission_column'], data['points'] ) return jsonify(result) except Exception as e: return jsonify({"error": str(e)}), 500 # 显示切换模型 @bp.route('/models', methods=['GET']) def get_models(): session = None try: # 创建 session Session = sessionmaker(bind=db.engine) session = Session() # 查询所有模型 models = session.query(Models).all() logger.debug(f"Models found: {models}") # 打印查询的模型数据 if not models: return jsonify({'message': 'No models found'}), 404 # 将模型数据转换为字典列表 models_list = [ { 'ModelID': model.ModelID, 'ModelName': model.Model_name, 'ModelType': model.Model_type, 'CreatedAt': model.Created_at.strftime('%Y-%m-%d %H:%M:%S'), 'Description': model.Description, 'DatasetID': model.DatasetID, 'ModelFilePath': model.ModelFilePath, 'DataType': model.Data_type, 'PerformanceScore': model.Performance_score } for model in models ] return jsonify(models_list), 200 except Exception as e: return jsonify({'error': str(e)}), 400 finally: if session: session.close()