cd_flux_removal_service.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. """
  2. Cd通量移除计算服务
  3. @description: 提供籽粒移除和秸秆移除的Cd通量计算功能
  4. @author: AcidMap Team
  5. @version: 1.0.0
  6. """
  7. import logging
  8. import math
  9. import os
  10. import pandas as pd
  11. from datetime import datetime
  12. from typing import Dict, Any, List, Optional
  13. from sqlalchemy.orm import sessionmaker, Session
  14. from sqlalchemy import create_engine, and_
  15. from ..database import SessionLocal, engine
  16. from ..models.parameters import Parameters
  17. from ..models.CropCd_output import CropCdOutputData
  18. from ..models.farmland import FarmlandData
  19. from ..utils.mapping_utils import MappingUtils
  20. from .admin_boundary_service import get_boundary_geojson_by_name, get_boundary_gdf_by_name
  21. import geopandas as gpd
  22. from shapely.geometry import shape, Point
  23. import tempfile
  24. import json
  25. class CdFluxRemovalService:
  26. """
  27. Cd通量移除计算服务类
  28. @description: 提供基于CropCd_output_data和Parameters表数据的籽粒移除和秸秆移除Cd通量计算功能
  29. """
  30. def __init__(self):
  31. """
  32. 初始化Cd通量移除服务
  33. """
  34. self.logger = logging.getLogger(__name__)
  35. # 严格匹配策略:不做名称变体或后缀映射
  36. def calculate_grain_removal_by_area(self, area: str, level: Optional[str] = None) -> Dict[str, Any]:
  37. """
  38. 根据地区计算籽粒移除Cd通量
  39. @param area: 地区名称(仅用于地图边界,参数固定使用"韶关")
  40. @returns: 计算结果字典
  41. 计算公式:籽粒移除(g/ha/a) = EXP(LnCropCd) * F11 * 0.5 * 15 / 1000
  42. """
  43. try:
  44. with SessionLocal() as db:
  45. # 参数固定使用"韶关",area参数仅用于地图边界
  46. parameter = db.query(Parameters).filter(Parameters.area == "韶关").first()
  47. if not parameter:
  48. return {
  49. "success": False,
  50. "message": f"未找到韶关地区的参数数据",
  51. "data": None
  52. }
  53. # 查询CropCd输出数据
  54. crop_cd_outputs = db.query(CropCdOutputData).all()
  55. if not crop_cd_outputs:
  56. return {
  57. "success": False,
  58. "message": f"未找到CropCd输出数据",
  59. "data": None
  60. }
  61. # 计算每个样点的籽粒移除Cd通量
  62. results = []
  63. for output in crop_cd_outputs:
  64. crop_cd_value = math.exp(output.ln_crop_cd) # EXP(LnCropCd)
  65. grain_removal = crop_cd_value * parameter.f11 * 0.5 * 15 / 1000
  66. results.append({
  67. "farmland_id": output.farmland_id,
  68. "sample_id": output.sample_id,
  69. "ln_crop_cd": output.ln_crop_cd,
  70. "crop_cd_value": crop_cd_value,
  71. "f11_yield": parameter.f11,
  72. "grain_removal_flux": grain_removal
  73. })
  74. # 计算统计信息
  75. flux_values = [r["grain_removal_flux"] for r in results]
  76. statistics = {
  77. "total_samples": len(results),
  78. "mean_flux": sum(flux_values) / len(flux_values),
  79. "max_flux": max(flux_values),
  80. "min_flux": min(flux_values)
  81. }
  82. return {
  83. "success": True,
  84. "message": f"地区 '{area}' 的籽粒移除Cd通量计算成功",
  85. "data": {
  86. "area": area,
  87. "calculation_type": "grain_removal",
  88. "formula": "EXP(LnCropCd) * F11 * 0.5 * 15 / 1000",
  89. "unit": "g/ha/a",
  90. "results": results,
  91. "statistics": statistics
  92. }
  93. }
  94. except Exception as e:
  95. self.logger.error(f"计算地区 '{area}' 的籽粒移除Cd通量失败: {str(e)}")
  96. return {
  97. "success": False,
  98. "message": f"计算失败: {str(e)}",
  99. "data": None
  100. }
  101. def calculate_straw_removal_by_area(self, area: str, level: Optional[str] = None) -> Dict[str, Any]:
  102. """
  103. 根据地区计算秸秆移除Cd通量
  104. @param area: 地区名称(仅用于地图边界,参数固定使用"韶关")
  105. @returns: 计算结果字典
  106. 计算公式:秸秆移除(g/ha/a) = [EXP(LnCropCd)/(EXP(LnCropCd)*0.76-0.0034)] * F11 * 0.5 * 15 / 1000
  107. """
  108. try:
  109. with SessionLocal() as db:
  110. # 参数固定使用"韶关",area参数仅用于地图边界
  111. parameter = db.query(Parameters).filter(Parameters.area == "韶关").first()
  112. if not parameter:
  113. return {
  114. "success": False,
  115. "message": f"未找到韶关地区的参数数据",
  116. "data": None
  117. }
  118. # 查询CropCd输出数据
  119. crop_cd_outputs = db.query(CropCdOutputData).all()
  120. if not crop_cd_outputs:
  121. return {
  122. "success": False,
  123. "message": f"未找到CropCd输出数据",
  124. "data": None
  125. }
  126. # 计算每个样点的秸秆移除Cd通量
  127. results = []
  128. for output in crop_cd_outputs:
  129. crop_cd_value = math.exp(output.ln_crop_cd) # EXP(LnCropCd)
  130. # 计算分母:EXP(LnCropCd)*0.76-0.0034
  131. denominator = crop_cd_value * 0.76 - 0.0034
  132. # 检查分母是否为零或负数,避免除零错误
  133. if denominator <= 0:
  134. self.logger.warning(f"样点 {output.farmland_id}-{output.sample_id} 的分母值为 {denominator},跳过计算")
  135. continue
  136. # 计算秸秆移除Cd通量
  137. straw_removal = (crop_cd_value / denominator) * parameter.f11 * 0.5 * 15 / 1000
  138. results.append({
  139. "farmland_id": output.farmland_id,
  140. "sample_id": output.sample_id,
  141. "ln_crop_cd": output.ln_crop_cd,
  142. "crop_cd_value": crop_cd_value,
  143. "denominator": denominator,
  144. "f11_yield": parameter.f11,
  145. "straw_removal_flux": straw_removal
  146. })
  147. if not results:
  148. return {
  149. "success": False,
  150. "message": "所有样点的计算都因分母值无效而失败",
  151. "data": None
  152. }
  153. # 计算统计信息
  154. flux_values = [r["straw_removal_flux"] for r in results]
  155. statistics = {
  156. "total_samples": len(results),
  157. "mean_flux": sum(flux_values) / len(flux_values),
  158. "max_flux": max(flux_values),
  159. "min_flux": min(flux_values)
  160. }
  161. return {
  162. "success": True,
  163. "message": f"地区 '{area}' 的秸秆移除Cd通量计算成功",
  164. "data": {
  165. "area": area,
  166. "calculation_type": "straw_removal",
  167. "formula": "[EXP(LnCropCd)/(EXP(LnCropCd)*0.76-0.0034)] * F11 * 0.5 * 15 / 1000",
  168. "unit": "g/ha/a",
  169. "results": results,
  170. "statistics": statistics
  171. }
  172. }
  173. except Exception as e:
  174. self.logger.error(f"计算地区 '{area}' 的秸秆移除Cd通量失败: {str(e)}")
  175. return {
  176. "success": False,
  177. "message": f"计算失败: {str(e)}",
  178. "data": None
  179. }
  180. def export_results_to_csv(self, results_data: Dict[str, Any], output_dir: str = "app/static/cd_flux") -> str:
  181. """
  182. 将计算结果导出为CSV文件
  183. @param results_data: 计算结果数据
  184. @param output_dir: 输出目录
  185. @returns: CSV文件路径
  186. """
  187. try:
  188. # 确保输出目录存在
  189. os.makedirs(output_dir, exist_ok=True)
  190. # 生成时间戳
  191. timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
  192. # 生成文件名
  193. calculation_type = results_data.get("calculation_type", "flux_removal")
  194. area = results_data.get("area", "unknown")
  195. filename = f"{calculation_type}_{area}_{timestamp}.csv"
  196. csv_path = os.path.join(output_dir, filename)
  197. # 转换为DataFrame
  198. results = results_data.get("results", [])
  199. if not results:
  200. raise ValueError("没有结果数据可导出")
  201. df = pd.DataFrame(results)
  202. # 保存CSV文件
  203. df.to_csv(csv_path, index=False, encoding='utf-8-sig')
  204. self.logger.info(f"✓ 成功导出结果到: {csv_path}")
  205. return csv_path
  206. except Exception as e:
  207. self.logger.error(f"导出CSV文件失败: {str(e)}")
  208. raise
  209. def get_coordinates_for_results(self, results_data: Dict[str, Any]) -> List[Dict[str, Any]]:
  210. """
  211. 获取结果数据对应的坐标信息
  212. @param results_data: 计算结果数据
  213. @returns: 包含坐标的结果列表
  214. """
  215. try:
  216. results = results_data.get("results", [])
  217. if not results:
  218. return []
  219. # 提取成对键,避免 N 次数据库查询
  220. farmland_sample_pairs = [(r["farmland_id"], r["sample_id"]) for r in results]
  221. with SessionLocal() as db:
  222. # 使用 farmland_id 分片查询,避免复合 IN 导致的兼容性与参数数量问题
  223. wanted_pairs = set(farmland_sample_pairs)
  224. unique_farmland_ids = sorted({fid for fid, _ in wanted_pairs})
  225. def chunk_list(items: List[int], chunk_size: int = 500) -> List[List[int]]:
  226. return [items[i:i + chunk_size] for i in range(0, len(items), chunk_size)]
  227. rows: List[FarmlandData] = []
  228. for id_chunk in chunk_list(unique_farmland_ids, 500):
  229. rows.extend(
  230. db.query(FarmlandData)
  231. .filter(FarmlandData.farmland_id.in_(id_chunk))
  232. .all()
  233. )
  234. pair_to_farmland = {
  235. (row.farmland_id, row.sample_id): row for row in rows
  236. }
  237. coordinates_results: List[Dict[str, Any]] = []
  238. for r in results:
  239. key = (r["farmland_id"], r["sample_id"])
  240. farmland = pair_to_farmland.get(key)
  241. if farmland is None:
  242. continue
  243. coord_result = {
  244. "farmland_id": r["farmland_id"],
  245. "sample_id": r["sample_id"],
  246. "longitude": farmland.lon,
  247. "latitude": farmland.lan,
  248. "flux_value": r.get("grain_removal_flux") or r.get("straw_removal_flux")
  249. }
  250. coord_result.update(r)
  251. coordinates_results.append(coord_result)
  252. self.logger.info(f"✓ 成功获取 {len(coordinates_results)} 个样点的坐标信息(分片批量查询)")
  253. return coordinates_results
  254. except Exception as e:
  255. self.logger.error(f"获取坐标信息失败: {str(e)}")
  256. raise
  257. def create_flux_visualization(self, area: str, calculation_type: str,
  258. results_with_coords: List[Dict[str, Any]],
  259. level: str = None,
  260. output_dir: str = "app/static/cd_flux",
  261. template_raster: str = "app/static/cd_flux/meanTemp.tif",
  262. boundary_shp: str = None,
  263. colormap: str = "green_yellow_red_purple",
  264. resolution_factor: float = 4.0,
  265. enable_interpolation: bool = False,
  266. cleanup_intermediate: bool = True) -> Dict[str, str]:
  267. """
  268. 创建Cd通量移除可视化图表
  269. @param area: 地区名称
  270. @param calculation_type: 计算类型(grain_removal 或 straw_removal)
  271. @param results_with_coords: 包含坐标的结果数据
  272. @param output_dir: 输出目录
  273. @param template_raster: 模板栅格文件路径
  274. @param boundary_shp: 边界shapefile路径
  275. @param colormap: 色彩方案
  276. @param resolution_factor: 分辨率因子
  277. @param enable_interpolation: 是否启用空间插值
  278. @returns: 生成的图片文件路径字典
  279. """
  280. try:
  281. if not results_with_coords:
  282. raise ValueError("没有包含坐标的结果数据")
  283. # 确保输出目录存在
  284. os.makedirs(output_dir, exist_ok=True)
  285. generated_files: Dict[str, str] = {}
  286. # 生成时间戳
  287. timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
  288. # 创建CSV文件用于绘图
  289. csv_filename = f"{calculation_type}_{area}_temp_{timestamp}.csv"
  290. csv_path = os.path.join(output_dir, csv_filename)
  291. # 准备绘图数据
  292. plot_data = []
  293. for result in results_with_coords:
  294. plot_data.append({
  295. "longitude": result["longitude"],
  296. "latitude": result["latitude"],
  297. "flux_value": result["flux_value"]
  298. })
  299. # 保存为CSV
  300. df = pd.DataFrame(plot_data)
  301. df.to_csv(csv_path, index=False, encoding='utf-8-sig')
  302. # 初始化绘图工具
  303. mapper = MappingUtils()
  304. # 生成输出文件路径
  305. map_output = os.path.join(output_dir, f"{calculation_type}_{area}_map_{timestamp}")
  306. histogram_output = os.path.join(output_dir, f"{calculation_type}_{area}_histogram_{timestamp}")
  307. # 检查模板文件是否存在
  308. if not os.path.exists(template_raster):
  309. self.logger.warning(f"模板栅格文件不存在: {template_raster}")
  310. template_raster = None
  311. # 动态获取边界数据(严格使用指定层级)
  312. if not level:
  313. raise ValueError("必须提供行政层级 level:county | city | province")
  314. # 优化:直接从数据库获取边界GeoDataFrame,避免创建临时shapefile文件
  315. # 这样可以减少磁盘I/O操作和临时文件管理的开销
  316. boundary_gdf = self._get_boundary_gdf_from_database(area, level)
  317. boundary_shp = None # 不再需要临时边界文件
  318. if boundary_gdf is None:
  319. self.logger.warning(f"未找到地区 '{area}' 的边界数据,将不使用边界裁剪")
  320. else:
  321. # 在绘图前进行样点边界包含性统计
  322. try:
  323. if boundary_gdf is not None and len(boundary_gdf) > 0:
  324. boundary_union = boundary_gdf.unary_union
  325. total_points = len(results_with_coords)
  326. inside_count = 0
  327. outside_points: List[Dict[str, Any]] = []
  328. for r in results_with_coords:
  329. pt = Point(float(r["longitude"]), float(r["latitude"]))
  330. if boundary_union.contains(pt) or boundary_union.touches(pt):
  331. inside_count += 1
  332. else:
  333. outside_points.append({
  334. "farmland_id": r.get("farmland_id"),
  335. "sample_id": r.get("sample_id"),
  336. "longitude": r.get("longitude"),
  337. "latitude": r.get("latitude"),
  338. "flux_value": r.get("flux_value")
  339. })
  340. outside_count = total_points - inside_count
  341. inside_pct = (inside_count / total_points * 100.0) if total_points > 0 else 0.0
  342. self.logger.info(
  343. f"样点边界检查 - 总数: {total_points}, 边界内: {inside_count} ({inside_pct:.2f}%), 边界外: {outside_count}")
  344. if outside_count > 0:
  345. sample_preview = outside_points[:10]
  346. self.logger.warning(
  347. f"存在 {outside_count} 个样点位于边界之外,绘图时将被掩膜隐藏。示例(最多10条): {sample_preview}")
  348. # 在日志中打印边界检查统计结果
  349. self.logger.info(
  350. f"边界检查统计 - 地区: {area}, 层级: {level}, 计算类型: {calculation_type}, "
  351. f"总样点: {total_points}, 边界内: {inside_count} ({inside_pct:.2f}%), "
  352. f"边界外: {outside_count}"
  353. )
  354. if outside_count > 0 and len(outside_points) > 0:
  355. sample_preview = outside_points[:5] # 只显示前5个样本
  356. self.logger.info(f"边界外样点示例(前5个): {sample_preview}")
  357. else:
  358. generated_files = {}
  359. except Exception as check_err:
  360. self.logger.warning(f"样点边界包含性检查失败: {str(check_err)}")
  361. # 保持已有 generated_files,不覆盖
  362. # 创建shapefile
  363. shapefile_path = csv_path.replace('.csv', '_points.shp')
  364. mapper.csv_to_shapefile(csv_path, shapefile_path,
  365. lon_col='longitude', lat_col='latitude', value_col='flux_value')
  366. # 合并已生成文件映射
  367. generated_files.update({"csv": csv_path, "shapefile": shapefile_path})
  368. # 如果有模板栅格文件,创建栅格地图
  369. if template_raster:
  370. try:
  371. # 创建栅格
  372. raster_path = csv_path.replace('.csv', '_raster.tif')
  373. raster_path, stats = mapper.vector_to_raster(
  374. shapefile_path, template_raster, raster_path, 'flux_value',
  375. resolution_factor=resolution_factor, boundary_shp=boundary_shp, boundary_gdf=boundary_gdf,
  376. interpolation_method='nearest', enable_interpolation=enable_interpolation
  377. )
  378. generated_files["raster"] = raster_path
  379. # 创建栅格地图 - 使用英文标题避免中文乱码
  380. title_mapping = {
  381. "grain_removal": "Grain Removal Cd Flux",
  382. "straw_removal": "Straw Removal Cd Flux"
  383. }
  384. map_title = title_mapping.get(calculation_type, "Cd Flux Removal")
  385. map_file = mapper.create_raster_map(
  386. boundary_shp if boundary_shp else None,
  387. raster_path,
  388. map_output,
  389. colormap=colormap,
  390. title=map_title,
  391. output_size=12,
  392. dpi=300,
  393. resolution_factor=4.0,
  394. enable_interpolation=False,
  395. interpolation_method='nearest',
  396. boundary_gdf=boundary_gdf
  397. )
  398. generated_files["map"] = map_file
  399. # 创建直方图 - 使用英文标题避免中文乱码
  400. histogram_title_mapping = {
  401. "grain_removal": "Grain Removal Cd Flux Distribution",
  402. "straw_removal": "Straw Removal Cd Flux Distribution"
  403. }
  404. histogram_title = histogram_title_mapping.get(calculation_type, "Cd Flux Distribution")
  405. histogram_file = mapper.create_histogram(
  406. raster_path,
  407. f"{histogram_output}.jpg",
  408. title=histogram_title,
  409. xlabel='Cd Flux (g/ha/a)',
  410. ylabel='Frequency Density'
  411. )
  412. generated_files["histogram"] = histogram_file
  413. except Exception as viz_error:
  414. self.logger.warning(f"栅格可视化创建失败: {str(viz_error)}")
  415. # 即使栅格可视化失败,也返回已生成的文件
  416. # 清理中间文件(默认开启,仅保留最终可视化)
  417. if cleanup_intermediate:
  418. try:
  419. # 由于不再创建临时边界文件,所以传递None
  420. self._cleanup_intermediate_files(generated_files, None)
  421. except Exception as cleanup_err:
  422. self.logger.warning(f"中间文件清理失败: {str(cleanup_err)}")
  423. self.logger.info(f"✓ 成功创建 {calculation_type} 可视化,生成文件: {list(generated_files.keys())}")
  424. return generated_files
  425. except Exception as e:
  426. self.logger.error(f"创建可视化失败: {str(e)}")
  427. raise
  428. def _cleanup_intermediate_files(self, generated_files: Dict[str, str], boundary_shp: Optional[str]) -> None:
  429. """
  430. 清理中间文件:CSV、Shapefile 及其配套文件、栅格TIFF;若边界为临时目录,则一并删除
  431. """
  432. import shutil
  433. import tempfile
  434. def _safe_remove(path: str) -> None:
  435. try:
  436. if path and os.path.exists(path) and os.path.isfile(path):
  437. os.remove(path)
  438. except Exception:
  439. pass
  440. # 删除 CSV
  441. _safe_remove(generated_files.get("csv"))
  442. # 删除栅格
  443. _safe_remove(generated_files.get("raster"))
  444. # 删除 Shapefile 全家桶
  445. shp_path = generated_files.get("shapefile")
  446. if shp_path:
  447. base, _ = os.path.splitext(shp_path)
  448. for ext in (".shp", ".shx", ".dbf", ".prj", ".cpg"):
  449. _safe_remove(base + ext)
  450. # 如果边界文件来自系统临时目录,删除其所在目录
  451. if boundary_shp:
  452. temp_root = tempfile.gettempdir()
  453. try:
  454. if os.path.commonprefix([os.path.abspath(boundary_shp), temp_root]) == temp_root:
  455. temp_dir = os.path.dirname(os.path.abspath(boundary_shp))
  456. if os.path.isdir(temp_dir):
  457. shutil.rmtree(temp_dir, ignore_errors=True)
  458. except Exception:
  459. pass
  460. def _get_boundary_file_for_area(self, area: str, level: str) -> Optional[str]:
  461. """
  462. 为指定地区获取边界文件
  463. @param area: 地区名称
  464. @returns: 边界文件路径或None
  465. """
  466. try:
  467. # 仅从数据库严格获取边界(按指定层级精确匹配)
  468. boundary_path = self._create_boundary_from_database(area, level)
  469. if boundary_path:
  470. return boundary_path
  471. # 如果都没有找到,记录警告但不使用默认文件
  472. self.logger.warning(f"未找到地区 '{area}' 的专用边界文件,也无法从数据库获取")
  473. return None
  474. except Exception as e:
  475. self.logger.error(f"获取边界文件失败: {str(e)}")
  476. return None
  477. def _get_boundary_gdf_from_database(self, area: str, level: str) -> Optional[gpd.GeoDataFrame]:
  478. """
  479. 直接从数据库获取边界数据作为GeoDataFrame
  480. @param area: 地区名称
  481. @param level: 行政层级
  482. @returns: 边界GeoDataFrame或None
  483. """
  484. try:
  485. with SessionLocal() as db:
  486. norm_area = area.strip()
  487. boundary_gdf = get_boundary_gdf_by_name(db, norm_area, level=level)
  488. if boundary_gdf is not None:
  489. self.logger.info(f"从数据库获取边界数据: {norm_area} ({level})")
  490. return boundary_gdf
  491. except Exception as e:
  492. self.logger.warning(f"从数据库获取边界数据失败: {str(e)}")
  493. return None
  494. def _create_boundary_from_database(self, area: str, level: str) -> Optional[str]:
  495. """
  496. 从数据库获取边界数据并创建临时shapefile
  497. @param area: 地区名称
  498. @returns: 临时边界文件路径或None
  499. @deprecated: 建议使用 _get_boundary_gdf_from_database 方法直接获取 GeoDataFrame
  500. """
  501. try:
  502. with SessionLocal() as db:
  503. norm_area = area.strip()
  504. boundary_geojson = get_boundary_geojson_by_name(db, norm_area, level=level)
  505. if boundary_geojson:
  506. geometry_obj = shape(boundary_geojson["geometry"])
  507. gdf = gpd.GeoDataFrame([boundary_geojson["properties"]], geometry=[geometry_obj], crs="EPSG:4326")
  508. temp_dir = tempfile.mkdtemp()
  509. boundary_path = os.path.join(temp_dir, f"{norm_area}_boundary.shp")
  510. gdf.to_file(boundary_path, driver="ESRI Shapefile")
  511. self.logger.info(f"从数据库创建边界文件: {boundary_path}")
  512. return boundary_path
  513. except Exception as e:
  514. self.logger.warning(f"从数据库创建边界文件失败: {str(e)}")
  515. return None