diff --git a/backend/apps/scan/flows/site_scan_flow.py b/backend/apps/scan/flows/site_scan_flow.py
index e9897989..7b7cd919 100644
--- a/backend/apps/scan/flows/site_scan_flow.py
+++ b/backend/apps/scan/flows/site_scan_flow.py
@@ -10,301 +10,319 @@
- 配置由 YAML 解析
"""
-from datetime import datetime
import logging
import subprocess
+from dataclasses import dataclass
+from datetime import datetime
from pathlib import Path
+from typing import Optional
from prefect import flow
-# Django 环境初始化(导入即生效,pylint: disable=unused-import)
+# Django 环境初始化(导入即生效)
from apps.common.prefect_django_setup import setup_django_for_prefect # noqa: F401
from apps.scan.handlers.scan_flow_handlers import (
on_scan_flow_completed,
on_scan_flow_failed,
on_scan_flow_running,
)
-from apps.scan.tasks.site_scan import export_site_urls_task, run_and_stream_save_websites_task
+from apps.scan.tasks.site_scan import (
+ export_site_urls_task,
+ run_and_stream_save_websites_task,
+)
from apps.scan.utils import build_scan_command, user_log, wait_for_system_load
logger = logging.getLogger(__name__)
-def calculate_timeout_by_line_count(
- tool_config: dict,
- file_path: str,
- base_per_time: int = 1,
- min_timeout: int = 60
-) -> int:
- """
- 根据文件行数计算 timeout
-
- 使用 wc -l 统计文件行数,根据行数和每行基础时间计算 timeout
-
- Args:
- tool_config: 工具配置字典(此函数未使用,但保持接口一致性)
- file_path: 要统计行数的文件路径
- base_per_time: 每行的基础时间(秒),默认1秒
- min_timeout: 最小超时时间(秒),默认60秒
-
- Returns:
- int: 计算出的超时时间(秒),不低于 min_timeout
-
- Example:
- timeout = calculate_timeout_by_line_count(
- tool_config={},
- file_path='/path/to/urls.txt',
- base_per_time=2
- )
- """
+@dataclass
+class ScanContext:
+ """扫描上下文,封装扫描参数"""
+ scan_id: int
+ target_id: int
+ target_name: str
+ site_scan_dir: Path
+ urls_file: str
+ total_urls: int
+
+
+def _count_file_lines(file_path: str) -> int:
+ """使用 wc -l 统计文件行数"""
try:
- # 使用 wc -l 快速统计行数
result = subprocess.run(
['wc', '-l', file_path],
capture_output=True,
text=True,
check=True
)
- # wc -l 输出格式:行数 + 空格 + 文件名
- line_count = int(result.stdout.strip().split()[0])
-
- # 计算 timeout:行数 × 每行基础时间,不低于最小值
- timeout = max(line_count * base_per_time, min_timeout)
-
- logger.info(
- f"timeout 自动计算: 文件={file_path}, "
- f"行数={line_count}, 每行时间={base_per_time}秒, 最小值={min_timeout}秒, timeout={timeout}秒"
- )
-
- return timeout
-
- except Exception as e:
- # 如果 wc -l 失败,使用默认值
- logger.warning(f"wc -l 计算行数失败: {e},使用默认 timeout: {min_timeout}秒")
- return min_timeout
+ return int(result.stdout.strip().split()[0])
+ except (subprocess.CalledProcessError, ValueError, IndexError) as e:
+ logger.warning("wc -l 计算行数失败: %s,返回 0", e)
+ return 0
+def _calculate_timeout_by_line_count(
+ file_path: str,
+ base_per_time: int = 1,
+ min_timeout: int = 60
+) -> int:
+ """
+ 根据文件行数计算 timeout
+
+ Args:
+ file_path: 要统计行数的文件路径
+ base_per_time: 每行的基础时间(秒),默认1秒
+ min_timeout: 最小超时时间(秒),默认60秒
+
+ Returns:
+ int: 计算出的超时时间(秒),不低于 min_timeout
+ """
+ line_count = _count_file_lines(file_path)
+ timeout = max(line_count * base_per_time, min_timeout)
+
+ logger.info(
+ "timeout 自动计算: 文件=%s, 行数=%d, 每行时间=%d秒, timeout=%d秒",
+ file_path, line_count, base_per_time, timeout
+ )
+ return timeout
-
-def _export_site_urls(target_id: int, site_scan_dir: Path, target_name: str = None) -> tuple[str, int, int]:
+def _export_site_urls(
+ target_id: int,
+ site_scan_dir: Path
+) -> tuple[str, int, int]:
"""
导出站点 URL 到文件
-
+
Args:
target_id: 目标 ID
site_scan_dir: 站点扫描目录
- target_name: 目标名称(用于懒加载时写入默认值)
-
+
Returns:
tuple: (urls_file, total_urls, association_count)
-
- Raises:
- ValueError: URL 数量为 0
"""
logger.info("Step 1: 导出站点URL列表")
-
+
urls_file = str(site_scan_dir / 'site_urls.txt')
export_result = export_site_urls_task(
target_id=target_id,
output_file=urls_file,
- batch_size=1000 # 每次处理1000个子域名
+ batch_size=1000
)
-
+
total_urls = export_result['total_urls']
- association_count = export_result['association_count'] # 主机端口关联数
-
+ association_count = export_result['association_count']
+
logger.info(
"✓ 站点URL导出完成 - 文件: %s, URL数量: %d, 关联数: %d",
- export_result['output_file'],
- total_urls,
- association_count
+ export_result['output_file'], total_urls, association_count
)
-
+
if total_urls == 0:
logger.warning("目标下没有可用的站点URL,无法执行站点扫描")
- # 不抛出异常,由上层决定如何处理
- # raise ValueError("目标下没有可用的站点URL,无法执行站点扫描")
-
+
return export_result['output_file'], total_urls, association_count
+def _get_tool_timeout(tool_config: dict, urls_file: str) -> int:
+ """获取工具超时时间(支持 'auto' 动态计算)"""
+ config_timeout = tool_config.get('timeout', 300)
+
+ if config_timeout == 'auto':
+ return _calculate_timeout_by_line_count(urls_file, base_per_time=1)
+
+ dynamic_timeout = _calculate_timeout_by_line_count(urls_file, base_per_time=1)
+ return max(dynamic_timeout, config_timeout)
+
+
+def _execute_single_tool(
+ tool_name: str,
+ tool_config: dict,
+ ctx: ScanContext
+) -> Optional[dict]:
+ """
+ 执行单个扫描工具
+
+ Returns:
+ 成功返回结果字典,失败返回 None
+ """
+ # 构建命令
+ try:
+ command = build_scan_command(
+ tool_name=tool_name,
+ scan_type='site_scan',
+ command_params={'url_file': ctx.urls_file},
+ tool_config=tool_config
+ )
+ except (ValueError, KeyError) as e:
+ logger.error("构建 %s 命令失败: %s", tool_name, e)
+ return None
+
+ timeout = _get_tool_timeout(tool_config, ctx.urls_file)
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
+ log_file = ctx.site_scan_dir / f"{tool_name}_{timestamp}.log"
+
+ logger.info(
+ "开始执行 %s 站点扫描 - URL数: %d, 超时: %ds",
+ tool_name, ctx.total_urls, timeout
+ )
+ user_log(ctx.scan_id, "site_scan", f"Running {tool_name}: {command}")
+
+ try:
+ result = run_and_stream_save_websites_task(
+ cmd=command,
+ tool_name=tool_name,
+ scan_id=ctx.scan_id,
+ target_id=ctx.target_id,
+ cwd=str(ctx.site_scan_dir),
+ shell=True,
+ timeout=timeout,
+ log_file=str(log_file)
+ )
+
+ tool_created = result.get('created_websites', 0)
+ skipped = result.get('skipped_no_subdomain', 0) + result.get('skipped_failed', 0)
+
+ logger.info(
+ "✓ 工具 %s 完成 - 处理: %d, 创建: %d, 跳过: %d",
+ tool_name, result.get('processed_records', 0), tool_created, skipped
+ )
+ user_log(
+ ctx.scan_id, "site_scan",
+ f"{tool_name} completed: found {tool_created} websites"
+ )
+
+ return {'command': command, 'result': result, 'timeout': timeout}
+
+ except subprocess.TimeoutExpired:
+ logger.warning(
+ "⚠️ 工具 %s 执行超时 - 超时配置: %d秒 (超时前数据已保存)",
+ tool_name, timeout
+ )
+ user_log(
+ ctx.scan_id, "site_scan",
+ f"{tool_name} failed: timeout after {timeout}s", "error"
+ )
+ except (OSError, RuntimeError) as exc:
+ logger.error("工具 %s 执行失败: %s", tool_name, exc, exc_info=True)
+ user_log(ctx.scan_id, "site_scan", f"{tool_name} failed: {exc}", "error")
+
+ return None
+
+
def _run_scans_sequentially(
enabled_tools: dict,
- urls_file: str,
- total_urls: int,
- site_scan_dir: Path,
- scan_id: int,
- target_id: int,
- target_name: str
+ ctx: ScanContext
) -> tuple[dict, int, list, list]:
"""
串行执行站点扫描任务
-
- Args:
- enabled_tools: 已启用的工具配置字典
- urls_file: URL 文件路径
- total_urls: URL 总数
- site_scan_dir: 站点扫描目录
- scan_id: 扫描任务 ID
- target_id: 目标 ID
- target_name: 目标名称(用于错误日志)
-
+
Returns:
- tuple: (tool_stats, processed_records, successful_tool_names, failed_tools)
-
- Raises:
- RuntimeError: 所有工具均失败
+ tuple: (tool_stats, processed_records, successful_tools, failed_tools)
"""
tool_stats = {}
processed_records = 0
failed_tools = []
-
+
for tool_name, tool_config in enabled_tools.items():
- # 1. 构建完整命令(变量替换)
- try:
- command_params = {'url_file': urls_file}
-
- command = build_scan_command(
- tool_name=tool_name,
- scan_type='site_scan',
- command_params=command_params,
- tool_config=tool_config
- )
- except Exception as e:
- reason = f"命令构建失败: {str(e)}"
- logger.error(f"构建 {tool_name} 命令失败: {e}")
- failed_tools.append({'tool': tool_name, 'reason': reason})
- continue
-
- # 2. 获取超时时间(支持 'auto' 动态计算)
- config_timeout = tool_config.get('timeout', 300)
- if config_timeout == 'auto':
- # 动态计算超时时间
- timeout = calculate_timeout_by_line_count(tool_config, urls_file, base_per_time=1)
- logger.info(f"✓ 工具 {tool_name} 动态计算 timeout: {timeout}秒")
+ result = _execute_single_tool(tool_name, tool_config, ctx)
+
+ if result:
+ tool_stats[tool_name] = result
+ processed_records += result['result'].get('processed_records', 0)
else:
- # 使用配置的超时时间和动态计算的较大值
- dynamic_timeout = calculate_timeout_by_line_count(tool_config, urls_file, base_per_time=1)
- timeout = max(dynamic_timeout, config_timeout)
-
- # 2.1 生成日志文件路径(类似端口扫描)
- timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
- log_file = site_scan_dir / f"{tool_name}_{timestamp}.log"
-
- logger.info(
- "开始执行 %s 站点扫描 - URL数: %d, 最终超时: %ds",
- tool_name, total_urls, timeout
- )
- user_log(scan_id, "site_scan", f"Running {tool_name}: {command}")
-
- # 3. 执行扫描任务
- try:
- # 流式执行扫描并实时保存结果
- result = run_and_stream_save_websites_task(
- cmd=command,
- tool_name=tool_name,
- scan_id=scan_id,
- target_id=target_id,
- cwd=str(site_scan_dir),
- shell=True,
- timeout=timeout,
- log_file=str(log_file)
- )
-
- tool_stats[tool_name] = {
- 'command': command,
- 'result': result,
- 'timeout': timeout
- }
- tool_records = result.get('processed_records', 0)
- tool_created = result.get('created_websites', 0)
- processed_records += tool_records
-
- logger.info(
- "✓ 工具 %s 流式处理完成 - 处理记录: %d, 创建站点: %d, 跳过: %d",
- tool_name,
- tool_records,
- tool_created,
- result.get('skipped_no_subdomain', 0) + result.get('skipped_failed', 0)
- )
- user_log(scan_id, "site_scan", f"{tool_name} completed: found {tool_created} websites")
-
- except subprocess.TimeoutExpired:
- # 超时异常单独处理
- reason = f"timeout after {timeout}s"
- failed_tools.append({'tool': tool_name, 'reason': reason})
- logger.warning(
- "⚠️ 工具 %s 执行超时 - 超时配置: %d秒\n"
- "注意:超时前已解析的站点数据已保存到数据库,但扫描未完全完成。",
- tool_name, timeout
- )
- user_log(scan_id, "site_scan", f"{tool_name} failed: {reason}", "error")
- except Exception as exc:
- # 其他异常
- reason = str(exc)
- failed_tools.append({'tool': tool_name, 'reason': reason})
- logger.error("工具 %s 执行失败: %s", tool_name, exc, exc_info=True)
- user_log(scan_id, "site_scan", f"{tool_name} failed: {reason}", "error")
-
+ failed_tools.append({'tool': tool_name, 'reason': '执行失败'})
+
if failed_tools:
logger.warning(
"以下扫描工具执行失败: %s",
- ', '.join([f['tool'] for f in failed_tools])
+ ', '.join(f['tool'] for f in failed_tools)
)
-
+
if not tool_stats:
- error_details = "; ".join([f"{f['tool']}: {f['reason']}" for f in failed_tools])
- logger.warning("所有站点扫描工具均失败 - 目标: %s, 失败工具: %s", target_name, error_details)
- # 返回空结果,不抛出异常,让扫描继续
+ logger.warning(
+ "所有站点扫描工具均失败 - 目标: %s", ctx.target_name
+ )
return {}, 0, [], failed_tools
-
- # 动态计算成功的工具列表
- successful_tool_names = [name for name in enabled_tools.keys()
- if name not in [f['tool'] for f in failed_tools]]
-
+
+ successful_tools = [
+ name for name in enabled_tools
+ if name not in {f['tool'] for f in failed_tools}
+ ]
+
logger.info(
- "✓ 串行站点扫描执行完成 - 成功: %d/%d (成功: %s, 失败: %s)",
- len(tool_stats), len(enabled_tools),
- ', '.join(successful_tool_names) if successful_tool_names else '无',
- ', '.join([f['tool'] for f in failed_tools]) if failed_tools else '无'
+ "✓ 站点扫描执行完成 - 成功: %d/%d",
+ len(tool_stats), len(enabled_tools)
)
-
- return tool_stats, processed_records, successful_tool_names, failed_tools
+
+ return tool_stats, processed_records, successful_tools, failed_tools
-def calculate_timeout(url_count: int, base: int = 600, per_url: int = 1) -> int:
- """
- 根据 URL 数量动态计算扫描超时时间
+def _build_empty_result(
+ scan_id: int,
+ target_name: str,
+ scan_workspace_dir: str,
+ urls_file: str,
+ association_count: int
+) -> dict:
+ """构建空结果(无 URL 可扫描时)"""
+ return {
+ 'success': True,
+ 'scan_id': scan_id,
+ 'target': target_name,
+ 'scan_workspace_dir': scan_workspace_dir,
+ 'urls_file': urls_file,
+ 'total_urls': 0,
+ 'association_count': association_count,
+ 'processed_records': 0,
+ 'created_websites': 0,
+ 'skipped_no_subdomain': 0,
+ 'skipped_failed': 0,
+ 'executed_tasks': ['export_site_urls'],
+ 'tool_stats': {
+ 'total': 0,
+ 'successful': 0,
+ 'failed': 0,
+ 'successful_tools': [],
+ 'failed_tools': [],
+ 'details': {}
+ }
+ }
- 规则:
- - 基础时间:默认 600 秒(10 分钟)
- - 每个 URL 额外增加:默认 1 秒
- Args:
- url_count: URL 数量,必须为正整数
- base: 基础超时时间(秒),默认 600
- per_url: 每个 URL 增加的时间(秒),默认 1
+def _aggregate_tool_results(tool_stats: dict) -> tuple[int, int, int]:
+ """汇总工具结果"""
+ total_created = sum(
+ s['result'].get('created_websites', 0) for s in tool_stats.values()
+ )
+ total_skipped_no_subdomain = sum(
+ s['result'].get('skipped_no_subdomain', 0) for s in tool_stats.values()
+ )
+ total_skipped_failed = sum(
+ s['result'].get('skipped_failed', 0) for s in tool_stats.values()
+ )
+ return total_created, total_skipped_no_subdomain, total_skipped_failed
- Returns:
- int: 计算得到的超时时间(秒),不超过 max_timeout
- Raises:
- ValueError: 当 url_count 为负数或 0 时抛出异常
- """
- if url_count < 0:
- raise ValueError(f"URL数量不能为负数: {url_count}")
- if url_count == 0:
- raise ValueError("URL数量不能为0")
-
- timeout = base + int(url_count * per_url)
-
- # 不设置上限,由调用方根据需要控制
- return timeout
+def _validate_flow_params(
+ scan_id: int,
+ target_name: str,
+ target_id: int,
+ scan_workspace_dir: str
+) -> None:
+ """验证 Flow 参数"""
+ if scan_id is None:
+ raise ValueError("scan_id 不能为空")
+ if not target_name:
+ raise ValueError("target_name 不能为空")
+ if target_id is None:
+ raise ValueError("target_id 不能为空")
+ if not scan_workspace_dir:
+ raise ValueError("scan_workspace_dir 不能为空")
@flow(
- name="site_scan",
+ name="site_scan",
log_prints=True,
on_running=[on_scan_flow_running],
on_completion=[on_scan_flow_completed],
@@ -319,143 +337,83 @@ def site_scan_flow(
) -> dict:
"""
站点扫描 Flow
-
+
主要功能:
1. 从target获取所有子域名与其对应的端口号,拼接成URL写入文件
2. 用httpx进行批量请求并实时保存到数据库(流式处理)
-
- 工作流程:
- Step 0: 创建工作目录
- Step 1: 导出站点 URL 列表
- Step 2: 解析配置,获取启用的工具
- Step 3: 串行执行扫描工具并实时保存结果
-
+
Args:
scan_id: 扫描任务 ID
target_name: 目标名称
target_id: 目标 ID
scan_workspace_dir: 扫描工作空间目录
enabled_tools: 启用的工具配置字典
-
+
Returns:
- dict: {
- 'success': bool,
- 'scan_id': int,
- 'target': str,
- 'scan_workspace_dir': str,
- 'urls_file': str,
- 'total_urls': int,
- 'association_count': int,
- 'processed_records': int,
- 'created_websites': int,
- 'skipped_no_subdomain': int,
- 'skipped_failed': int,
- 'executed_tasks': list,
- 'tool_stats': {
- 'total': int,
- 'successful': int,
- 'failed': int,
- 'successful_tools': list[str],
- 'failed_tools': list[dict]
- }
- }
-
+ dict: 扫描结果
+
Raises:
ValueError: 配置错误
RuntimeError: 执行失败
"""
try:
- # 负载检查:等待系统资源充足
wait_for_system_load(context="site_scan_flow")
logger.info(
- "="*60 + "\n" +
- "开始站点扫描\n" +
- f" Scan ID: {scan_id}\n" +
- f" Target: {target_name}\n" +
- f" Workspace: {scan_workspace_dir}\n" +
- "="*60
+ "开始站点扫描 - Scan ID: %s, Target: %s, Workspace: %s",
+ scan_id, target_name, scan_workspace_dir
)
-
- # 参数验证
- if scan_id is None:
- raise ValueError("scan_id 不能为空")
- if not target_name:
- raise ValueError("target_name 不能为空")
- if target_id is None:
- raise ValueError("target_id 不能为空")
- if not scan_workspace_dir:
- raise ValueError("scan_workspace_dir 不能为空")
-
+
+ _validate_flow_params(scan_id, target_name, target_id, scan_workspace_dir)
user_log(scan_id, "site_scan", "Starting site scan")
-
+
# Step 0: 创建工作目录
from apps.scan.utils import setup_scan_directory
site_scan_dir = setup_scan_directory(scan_workspace_dir, 'site_scan')
-
+
# Step 1: 导出站点 URL
urls_file, total_urls, association_count = _export_site_urls(
- target_id, site_scan_dir, target_name
+ target_id, site_scan_dir
)
-
+
if total_urls == 0:
logger.warning("跳过站点扫描:没有站点 URL 可扫描 - Scan ID: %s", scan_id)
user_log(scan_id, "site_scan", "Skipped: no site URLs to scan", "warning")
- return {
- 'success': True,
- 'scan_id': scan_id,
- 'target': target_name,
- 'scan_workspace_dir': scan_workspace_dir,
- 'urls_file': urls_file,
- 'total_urls': 0,
- 'association_count': association_count,
- 'processed_records': 0,
- 'created_websites': 0,
- 'skipped_no_subdomain': 0,
- 'skipped_failed': 0,
- 'executed_tasks': ['export_site_urls'],
- 'tool_stats': {
- 'total': 0,
- 'successful': 0,
- 'failed': 0,
- 'successful_tools': [],
- 'failed_tools': [],
- 'details': {}
- }
- }
-
+ return _build_empty_result(
+ scan_id, target_name, scan_workspace_dir, urls_file, association_count
+ )
+
# Step 2: 工具配置信息
- logger.info("Step 2: 工具配置信息")
- logger.info(
- "✓ 启用工具: %s",
- ', '.join(enabled_tools.keys())
- )
-
+ logger.info("✓ 启用工具: %s", ', '.join(enabled_tools))
+
# Step 3: 串行执行扫描工具
- logger.info("Step 3: 串行执行扫描工具并实时保存结果")
- tool_stats, processed_records, successful_tool_names, failed_tools = _run_scans_sequentially(
- enabled_tools=enabled_tools,
- urls_file=urls_file,
- total_urls=total_urls,
- site_scan_dir=site_scan_dir,
+ ctx = ScanContext(
scan_id=scan_id,
target_id=target_id,
- target_name=target_name
+ target_name=target_name,
+ site_scan_dir=site_scan_dir,
+ urls_file=urls_file,
+ total_urls=total_urls
)
-
- # 动态生成已执行的任务列表
+
+ tool_stats, processed_records, successful_tools, failed_tools = \
+ _run_scans_sequentially(enabled_tools, ctx)
+
+ # 汇总结果
executed_tasks = ['export_site_urls', 'parse_config']
- executed_tasks.extend([f'run_and_stream_save_websites ({tool})' for tool in tool_stats.keys()])
-
- # 汇总所有工具的结果
- total_created = sum(stats['result'].get('created_websites', 0) for stats in tool_stats.values())
- total_skipped_no_subdomain = sum(stats['result'].get('skipped_no_subdomain', 0) for stats in tool_stats.values())
- total_skipped_failed = sum(stats['result'].get('skipped_failed', 0) for stats in tool_stats.values())
-
- # 记录 Flow 完成
+ executed_tasks.extend(
+ f'run_and_stream_save_websites ({tool})' for tool in tool_stats
+ )
+
+ total_created, total_skipped_no_sub, total_skipped_failed = \
+ _aggregate_tool_results(tool_stats)
+
logger.info("✓ 站点扫描完成 - 创建站点: %d", total_created)
- user_log(scan_id, "site_scan", f"site_scan completed: found {total_created} websites")
-
+ user_log(
+ scan_id, "site_scan",
+ f"site_scan completed: found {total_created} websites"
+ )
+
return {
'success': True,
'scan_id': scan_id,
@@ -466,25 +424,20 @@ def site_scan_flow(
'association_count': association_count,
'processed_records': processed_records,
'created_websites': total_created,
- 'skipped_no_subdomain': total_skipped_no_subdomain,
+ 'skipped_no_subdomain': total_skipped_no_sub,
'skipped_failed': total_skipped_failed,
'executed_tasks': executed_tasks,
'tool_stats': {
'total': len(enabled_tools),
- 'successful': len(successful_tool_names),
+ 'successful': len(successful_tools),
'failed': len(failed_tools),
- 'successful_tools': successful_tool_names,
+ 'successful_tools': successful_tools,
'failed_tools': failed_tools,
'details': tool_stats
}
}
-
- except ValueError as e:
- logger.error("配置错误: %s", e)
+
+ except ValueError:
raise
- except RuntimeError as e:
- logger.error("运行时错误: %s", e)
+ except RuntimeError:
raise
- except Exception as e:
- logger.exception("站点扫描失败: %s", e)
- raise
\ No newline at end of file
diff --git a/frontend/components/settings/system-logs/ansi-log-viewer.tsx b/frontend/components/settings/system-logs/ansi-log-viewer.tsx
index d6800305..ee35f167 100644
--- a/frontend/components/settings/system-logs/ansi-log-viewer.tsx
+++ b/frontend/components/settings/system-logs/ansi-log-viewer.tsx
@@ -55,9 +55,10 @@ function hasAnsiCodes(text: string): boolean {
// 解析纯文本日志内容,为日志级别添加颜色
function colorizeLogContent(content: string): string {
- // 匹配日志格式: [时间] [级别] [模块:行号] 消息
- // 例如: [2025-01-05 10:30:00] [INFO] [apps.scan:123] 消息内容
- const logLineRegex = /^(\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\]) (\[(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\]) (.*)$/
+ // 匹配日志格式:
+ // 1) 系统日志: [2026-01-10 09:51:52] [INFO] [apps.scan.xxx:123] ...
+ // 2) 扫描日志: [09:50:37] [INFO] [subdomain_discovery] ...
+ const logLineRegex = /^(\[(?:\d{4}-\d{2}-\d{2} )?\d{2}:\d{2}:\d{2}\]) (\[(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\]) (.*)$/i
return content
.split("\n")
@@ -66,14 +67,15 @@ function colorizeLogContent(content: string): string {
if (match) {
const [, timestamp, levelBracket, level, rest] = match
- const color = LOG_LEVEL_COLORS[level] || "#d4d4d4"
+ const levelUpper = level.toUpperCase()
+ const color = LOG_LEVEL_COLORS[levelUpper] || "#d4d4d4"
// ansiConverter.toHtml 已经处理了 HTML 转义
const escapedTimestamp = ansiConverter.toHtml(timestamp)
const escapedLevelBracket = ansiConverter.toHtml(levelBracket)
const escapedRest = ansiConverter.toHtml(rest)
// 时间戳灰色,日志级别带颜色,其余默认色
- return `${escapedTimestamp} ${escapedLevelBracket} ${escapedRest}`
+ return `${escapedTimestamp} ${escapedLevelBracket} ${escapedRest}`
}
// 非标准格式的行,也进行 HTML 转义
@@ -112,6 +114,8 @@ function highlightSearch(html: string, query: string): string {
const LOG_LEVEL_PATTERNS = [
// 标准格式: [2026-01-07 12:00:00] [INFO]
/^\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\] \[(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\]/i,
+ // 扫描日志格式: [09:50:37] [INFO] [stage]
+ /^\[\d{2}:\d{2}:\d{2}\] \[(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\]/i,
// Prefect 格式: 12:01:50.419 | WARNING | prefect
/^[\d:.]+\s+\|\s+(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\s+\|/i,
// 简单格式: [INFO] message 或 INFO: message
diff --git a/update.sh b/update.sh
new file mode 100755
index 00000000..ecb11dab
--- /dev/null
+++ b/update.sh
@@ -0,0 +1,176 @@
+#!/bin/bash
+# ============================================
+# XingRin 系统更新脚本
+# 用途:更新代码 + 同步版本 + 重建镜像 + 重启服务
+# ============================================
+#
+# 更新流程:
+# 1. 停止服务
+# 2. git pull 拉取最新代码
+# 3. 合并 .env 新配置项 + 同步 VERSION
+# 4. 构建/拉取镜像(开发模式构建,生产模式拉取)
+# 5. 启动服务(server 启动时自动执行数据库迁移)
+#
+# 用法:
+# sudo ./update.sh 生产模式更新(拉取 Docker Hub 镜像)
+# sudo ./update.sh --dev 开发模式更新(本地构建镜像)
+# sudo ./update.sh --no-frontend 更新后只启动后端
+# sudo ./update.sh --dev --no-frontend 开发环境更新后只启动后端
+
+cd "$(dirname "$0")"
+
+# 权限检查
+if [ "$EUID" -ne 0 ]; then
+ echo -e "\033[0;31m[错误] 请使用 sudo 运行此脚本\033[0m"
+ echo -e " 正确用法: \033[1msudo ./update.sh\033[0m"
+ exit 1
+fi
+
+# 跨平台 sed -i(兼容 macOS 和 Linux)
+sed_inplace() {
+ if [[ "$OSTYPE" == "darwin"* ]]; then
+ sed -i '' "$@"
+ else
+ sed -i "$@"
+ fi
+}
+
+# 解析参数判断模式
+DEV_MODE=false
+for arg in "$@"; do
+ case $arg in
+ --dev) DEV_MODE=true ;;
+ esac
+done
+
+# 颜色定义
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+RED='\033[0;31m'
+BLUE='\033[0;34m'
+CYAN='\033[0;36m'
+BOLD='\033[1m'
+NC='\033[0m'
+
+# 合并 .env 新配置项(保留用户已有值)
+merge_env_config() {
+ local example_file="docker/.env.example"
+ local env_file="docker/.env"
+
+ if [ ! -f "$example_file" ] || [ ! -f "$env_file" ]; then
+ return
+ fi
+
+ local new_keys=0
+
+ while IFS= read -r line || [ -n "$line" ]; do
+ [[ -z "$line" || "$line" =~ ^# ]] && continue
+ local key="${line%%=*}"
+ [[ -z "$key" || "$key" == "$line" ]] && continue
+
+ if ! grep -q "^${key}=" "$env_file"; then
+ printf '%s\n' "$line" >> "$env_file"
+ echo -e " ${GREEN}+${NC} 新增: $key"
+ ((new_keys++))
+ fi
+ done < "$example_file"
+
+ if [ $new_keys -gt 0 ]; then
+ echo -e " ${GREEN}OK${NC} 已添加 $new_keys 个新配置项"
+ else
+ echo -e " ${GREEN}OK${NC} 配置已是最新"
+ fi
+}
+
+echo ""
+echo -e "${BOLD}${BLUE}╔════════════════════════════════════════╗${NC}"
+if [ "$DEV_MODE" = true ]; then
+ echo -e "${BOLD}${BLUE}║ 开发环境更新(本地构建) ║${NC}"
+else
+ echo -e "${BOLD}${BLUE}║ 生产环境更新(Docker Hub) ║${NC}"
+fi
+echo -e "${BOLD}${BLUE}╚════════════════════════════════════════╝${NC}"
+echo ""
+
+# 测试性功能警告
+echo -e "${BOLD}${YELLOW}[!] 警告:此功能为测试性功能,可能会导致升级失败${NC}"
+echo -e "${YELLOW} 建议运行 ./uninstall.sh 后重新执行 ./install.sh 进行全新安装${NC}"
+echo ""
+echo -n -e "${YELLOW}是否继续更新?(y/N) ${NC}"
+read -r ans_continue
+ans_continue=${ans_continue:-N}
+
+if [[ ! $ans_continue =~ ^[Yy]$ ]]; then
+ echo -e "${CYAN}已取消更新。${NC}"
+ exit 0
+fi
+echo ""
+
+# Step 1: 停止服务
+echo -e "${CYAN}[1/5]${NC} 停止服务..."
+./stop.sh 2>&1 | sed 's/^/ /'
+
+# Step 2: 拉取代码
+echo ""
+echo -e "${CYAN}[2/5]${NC} 拉取代码..."
+git pull --rebase 2>&1 | sed 's/^/ /'
+if [ $? -ne 0 ]; then
+ echo -e "${RED}[错误]${NC} git pull 失败,请手动解决冲突后重试"
+ exit 1
+fi
+
+# Step 3: 检查配置更新 + 版本同步
+echo ""
+echo -e "${CYAN}[3/5]${NC} 检查配置更新..."
+merge_env_config
+
+# 版本同步:从 VERSION 文件更新 IMAGE_TAG
+if [ -f "VERSION" ]; then
+ NEW_VERSION=$(cat VERSION | tr -d '[:space:]')
+ if [ -n "$NEW_VERSION" ]; then
+ if grep -q "^IMAGE_TAG=" "docker/.env"; then
+ sed_inplace "s/^IMAGE_TAG=.*/IMAGE_TAG=$NEW_VERSION/" "docker/.env"
+ echo -e " ${GREEN}+${NC} 版本同步: IMAGE_TAG=$NEW_VERSION"
+ else
+ printf '%s\n' "IMAGE_TAG=$NEW_VERSION" >> "docker/.env"
+ echo -e " ${GREEN}+${NC} 新增版本: IMAGE_TAG=$NEW_VERSION"
+ fi
+ fi
+fi
+
+# Step 4: 构建/拉取镜像
+echo ""
+echo -e "${CYAN}[4/5]${NC} 更新镜像..."
+
+if [ "$DEV_MODE" = true ]; then
+ # 开发模式:本地构建所有镜像(包括 Worker)
+ echo -e " 构建 Worker 镜像..."
+
+ # 读取 IMAGE_TAG
+ IMAGE_TAG=$(grep "^IMAGE_TAG=" "docker/.env" | cut -d'=' -f2)
+ if [ -z "$IMAGE_TAG" ]; then
+ IMAGE_TAG="dev"
+ fi
+
+ # 构建 Worker 镜像(Worker 是临时容器,不在 compose 中,需要单独构建)
+ docker build -t docker-worker -f docker/worker/Dockerfile . 2>&1 | sed 's/^/ /'
+ docker tag docker-worker docker-worker:${IMAGE_TAG} 2>&1 | sed 's/^/ /'
+ echo -e " ${GREEN}OK${NC} Worker 镜像已构建: docker-worker:${IMAGE_TAG}"
+
+ # 其他服务镜像由 start.sh --dev 构建
+ echo -e " 其他服务镜像将在启动时构建..."
+else
+ # 生产模式:镜像由 start.sh 拉取
+ echo -e " 镜像将在启动时从 Docker Hub 拉取..."
+fi
+
+# Step 5: 启动服务
+echo ""
+echo -e "${CYAN}[5/5]${NC} 启动服务..."
+./start.sh "$@"
+
+echo ""
+echo -e "${BOLD}${GREEN}════════════════════════════════════════${NC}"
+echo -e "${BOLD}${GREEN} 更新完成!${NC}"
+echo -e "${BOLD}${GREEN}════════════════════════════════════════${NC}"
+echo ""