Compare commits

..

11 Commits

Author SHA1 Message Date
yyhuni
1269e5a314 refactor(scan): reorganize models and serializers into modular structure
- Split monolithic models.py into separate model files (scan_models.py, scan_log_model.py, scheduled_scan_model.py, subfinder_provider_settings_model.py)
- Split monolithic serializers.py into separate serializer files with dedicated modules for each domain
- Add SubfinderProviderSettings model to store API key configurations for subfinder data sources
- Create SubfinderProviderConfigService to generate provider configuration files dynamically
- Add subfinder_provider_settings views and serializers for API key management
- Update subdomain_discovery_flow to support provider configuration file generation and passing to subfinder
- Update command templates to use provider config file and remove recursive flag for better source coverage
- Add frontend settings page for managing API keys at /settings/api-keys
- Add frontend hooks and services for API key settings management
- Update sidebar navigation to include API keys settings link
- Add internationalization support for new API keys settings UI (English and Chinese)
- Improves code maintainability by organizing related models and serializers into logical modules
2026-01-05 10:00:19 +08:00
yyhuni
802e967906 docs: add online demo link to README
- Add new "🌐 在线 Demo" section with live demo URL
- Include disclaimer note that demo is UI-only without backend database
- Improve documentation to help users quickly access and test the application
2026-01-04 19:19:33 +08:00
github-actions[bot]
e446326416 chore: bump version to v1.3.14 2026-01-04 11:02:14 +00:00
yyhuni
e0abb3ce7b Merge branch 'dev' 2026-01-04 18:57:49 +08:00
yyhuni
d418baaf79 feat(mock,scan): add comprehensive mock data and improve system load management
- Add mock data files for directories, fingerprints, IP addresses, notification settings, nuclei templates, search, system logs, tools, and wordlists
- Update mock index to export new mock data modules
- Increase SCAN_LOAD_CHECK_INTERVAL from 30 to 180 seconds for better system stability
- Improve load check logging message to clarify OOM prevention strategy
- Enhance mock data infrastructure to support frontend development and testing
2026-01-04 18:52:08 +08:00
github-actions[bot]
f8da408580 chore: bump version to v1.3.13-dev 2026-01-04 10:24:10 +00:00
yyhuni
7cd4354d8f feat(scan,asset): add scan logging system and improve search view architecture
- Add user_logger utility for structured scan operation logging
- Create scan log views and API endpoints for retrieving scan execution logs
- Add scan-log-list component and use-scan-logs hook for frontend log display
- Refactor asset search views to remove ArrayField support from pg_ivm IMMV
- Update search_service.py to JOIN original tables for array field retrieval
- Add system architecture requirements (AMD64/ARM64) to README
- Update scan flow handlers to integrate logging system
- Enhance scan progress dialog with log viewer integration
- Add ANSI log viewer component for formatted log display
- Update scan service API to support log retrieval endpoints
- Migrate database schema to support new logging infrastructure
- Add internationalization strings for scan logs (en/zh)
This change improves observability of scan operations and resolves pg_ivm limitations with ArrayField types by fetching array data from original tables via JOIN operations.
2026-01-04 18:19:45 +08:00
yyhuni
6bf35a760f chore(docker): configure Prefect home directory in worker image
- Add PREFECT_HOME environment variable pointing to /app/.prefect
- Create Prefect configuration directory to prevent home directory warnings
- Update step numbering in Dockerfile comments for clarity
- Ensures Prefect can properly initialize configuration without relying on user home directory
2026-01-04 10:39:11 +08:00
github-actions[bot]
be9ecadffb chore: bump version to v1.3.12-dev 2026-01-04 01:05:00 +00:00
yyhuni
7b7bbed634 Update README.md 2026-01-03 22:15:35 +08:00
github-actions[bot]
08372588a4 chore: bump version to v1.2.15 2026-01-01 15:44:15 +00:00
61 changed files with 3755 additions and 702 deletions

View File

@@ -25,6 +25,13 @@
---
## 🌐 在线 Demo
👉 **[https://xingrin.vercel.app/](https://xingrin.vercel.app/)**
> ⚠️ 仅用于 UI 展示,未接入后端数据库
---
<p align="center">
<b>🎨 现代化 UI </b>
@@ -198,6 +205,7 @@ url="/api/v1" && status!="404"
### 环境要求
- **操作系统**: Ubuntu 20.04+ / Debian 11+
- **系统架构**: AMD64 (x86_64) / ARM64 (aarch64)
- **硬件**: 2核 4G 内存起步20GB+ 磁盘空间
### 一键安装

View File

@@ -1 +1 @@
v1.3.11-dev
v1.3.14

View File

@@ -6,6 +6,18 @@
包含:
1. asset_search_view - Website 搜索视图
2. endpoint_search_view - Endpoint 搜索视图
重要限制:
⚠️ pg_ivm 不支持数组类型字段ArrayField因为其使用 anyarray 伪类型进行比较时,
PostgreSQL 无法确定空数组的元素类型,导致错误:
"cannot determine element type of \"anyarray\" argument"
因此,所有 ArrayField 字段tech, matched_gf_patterns 等)已从 IMMV 中移除,
搜索时通过 JOIN 原表获取。
如需添加新的数组字段,请:
1. 不要将其包含在 IMMV 视图中
2. 在搜索服务中通过 JOIN 原表获取
"""
from django.db import migrations
@@ -33,6 +45,8 @@ class Migration(migrations.Migration):
# ==================== Website IMMV ====================
# 2. 创建 asset_search_view IMMV
# ⚠️ 注意:不包含 w.tech 数组字段pg_ivm 不支持 ArrayField
# 数组字段通过 search_service.py 中 JOIN website 表获取
migrations.RunSQL(
sql="""
SELECT pgivm.create_immv('asset_search_view', $$
@@ -41,7 +55,6 @@ class Migration(migrations.Migration):
w.url,
w.host,
w.title,
w.tech,
w.status_code,
w.response_headers,
w.response_body,
@@ -85,10 +98,6 @@ class Migration(migrations.Migration):
CREATE INDEX IF NOT EXISTS asset_search_view_body_trgm_idx
ON asset_search_view USING gin (response_body gin_trgm_ops);
-- tech 数组索引
CREATE INDEX IF NOT EXISTS asset_search_view_tech_idx
ON asset_search_view USING gin (tech);
-- status_code 索引
CREATE INDEX IF NOT EXISTS asset_search_view_status_idx
ON asset_search_view (status_code);
@@ -104,7 +113,6 @@ class Migration(migrations.Migration):
DROP INDEX IF EXISTS asset_search_view_url_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_headers_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_body_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_tech_idx;
DROP INDEX IF EXISTS asset_search_view_status_idx;
DROP INDEX IF EXISTS asset_search_view_created_idx;
"""
@@ -113,6 +121,8 @@ class Migration(migrations.Migration):
# ==================== Endpoint IMMV ====================
# 4. 创建 endpoint_search_view IMMV
# ⚠️ 注意:不包含 e.tech 和 e.matched_gf_patterns 数组字段pg_ivm 不支持 ArrayField
# 数组字段通过 search_service.py 中 JOIN endpoint 表获取
migrations.RunSQL(
sql="""
SELECT pgivm.create_immv('endpoint_search_view', $$
@@ -121,7 +131,6 @@ class Migration(migrations.Migration):
e.url,
e.host,
e.title,
e.tech,
e.status_code,
e.response_headers,
e.response_body,
@@ -130,7 +139,6 @@ class Migration(migrations.Migration):
e.webserver,
e.location,
e.vhost,
e.matched_gf_patterns,
e.created_at,
e.target_id
FROM endpoint e
@@ -166,10 +174,6 @@ class Migration(migrations.Migration):
CREATE INDEX IF NOT EXISTS endpoint_search_view_body_trgm_idx
ON endpoint_search_view USING gin (response_body gin_trgm_ops);
-- tech 数组索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_tech_idx
ON endpoint_search_view USING gin (tech);
-- status_code 索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_status_idx
ON endpoint_search_view (status_code);
@@ -185,7 +189,6 @@ class Migration(migrations.Migration):
DROP INDEX IF EXISTS endpoint_search_view_url_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_headers_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_body_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_tech_idx;
DROP INDEX IF EXISTS endpoint_search_view_status_idx;
DROP INDEX IF EXISTS endpoint_search_view_created_idx;
"""

View File

@@ -37,46 +37,55 @@ VIEW_MAPPING = {
'endpoint': 'endpoint_search_view',
}
# 资产类型到原表名的映射(用于 JOIN 获取数组字段)
# ⚠️ 重要pg_ivm 不支持 ArrayField所有数组字段必须从原表 JOIN 获取
TABLE_MAPPING = {
'website': 'website',
'endpoint': 'endpoint',
}
# 有效的资产类型
VALID_ASSET_TYPES = {'website', 'endpoint'}
# Website 查询字段
# Website 查询字段v=视图t=原表)
# ⚠️ 注意t.tech 从原表获取,因为 pg_ivm 不支持 ArrayField
WEBSITE_SELECT_FIELDS = """
id,
url,
host,
title,
tech,
status_code,
response_headers,
response_body,
content_type,
content_length,
webserver,
location,
vhost,
created_at,
target_id
v.id,
v.url,
v.host,
v.title,
t.tech, -- ArrayField从 website 表 JOIN 获取
v.status_code,
v.response_headers,
v.response_body,
v.content_type,
v.content_length,
v.webserver,
v.location,
v.vhost,
v.created_at,
v.target_id
"""
# Endpoint 查询字段(包含 matched_gf_patterns
# Endpoint 查询字段
# ⚠️ 注意t.tech 和 t.matched_gf_patterns 从原表获取,因为 pg_ivm 不支持 ArrayField
ENDPOINT_SELECT_FIELDS = """
id,
url,
host,
title,
tech,
status_code,
response_headers,
response_body,
content_type,
content_length,
webserver,
location,
vhost,
matched_gf_patterns,
created_at,
target_id
v.id,
v.url,
v.host,
v.title,
t.tech, -- ArrayField从 endpoint 表 JOIN 获取
v.status_code,
v.response_headers,
v.response_body,
v.content_type,
v.content_length,
v.webserver,
v.location,
v.vhost,
t.matched_gf_patterns, -- ArrayField从 endpoint 表 JOIN 获取
v.created_at,
v.target_id
"""
@@ -119,8 +128,8 @@ class SearchQueryParser:
# 检查是否包含操作符语法,如果不包含则作为 host 模糊搜索
if not cls.CONDITION_PATTERN.search(query):
# 裸文本,默认作为 host 模糊搜索
return "host ILIKE %s", [f"%{query}%"]
# 裸文本,默认作为 host 模糊搜索v 是视图别名)
return "v.host ILIKE %s", [f"%{query}%"]
# 按 || 分割为 OR 组
or_groups = cls._split_by_or(query)
@@ -273,45 +282,45 @@ class SearchQueryParser:
def _build_like_condition(cls, field: str, value: str, is_array: bool) -> Tuple[str, List[Any]]:
"""构建模糊匹配条件"""
if is_array:
# 数组字段:检查数组中是否有元素包含该值
return f"EXISTS (SELECT 1 FROM unnest({field}) AS t WHERE t ILIKE %s)", [f"%{value}%"]
# 数组字段:检查数组中是否有元素包含该值(从原表 t 获取)
return f"EXISTS (SELECT 1 FROM unnest(t.{field}) AS elem WHERE elem ILIKE %s)", [f"%{value}%"]
elif field == 'status_code':
# 状态码是整数,模糊匹配转为精确匹配
try:
return f"{field} = %s", [int(value)]
return f"v.{field} = %s", [int(value)]
except ValueError:
return f"{field}::text ILIKE %s", [f"%{value}%"]
return f"v.{field}::text ILIKE %s", [f"%{value}%"]
else:
return f"{field} ILIKE %s", [f"%{value}%"]
return f"v.{field} ILIKE %s", [f"%{value}%"]
@classmethod
def _build_exact_condition(cls, field: str, value: str, is_array: bool) -> Tuple[str, List[Any]]:
"""构建精确匹配条件"""
if is_array:
# 数组字段:检查数组中是否包含该精确值
return f"%s = ANY({field})", [value]
# 数组字段:检查数组中是否包含该精确值(从原表 t 获取)
return f"%s = ANY(t.{field})", [value]
elif field == 'status_code':
# 状态码是整数
try:
return f"{field} = %s", [int(value)]
return f"v.{field} = %s", [int(value)]
except ValueError:
return f"{field}::text = %s", [value]
return f"v.{field}::text = %s", [value]
else:
return f"{field} = %s", [value]
return f"v.{field} = %s", [value]
@classmethod
def _build_not_equal_condition(cls, field: str, value: str, is_array: bool) -> Tuple[str, List[Any]]:
"""构建不等于条件"""
if is_array:
# 数组字段:检查数组中不包含该值
return f"NOT (%s = ANY({field}))", [value]
# 数组字段:检查数组中不包含该值(从原表 t 获取)
return f"NOT (%s = ANY(t.{field}))", [value]
elif field == 'status_code':
try:
return f"({field} IS NULL OR {field} != %s)", [int(value)]
return f"(v.{field} IS NULL OR v.{field} != %s)", [int(value)]
except ValueError:
return f"({field} IS NULL OR {field}::text != %s)", [value]
return f"(v.{field} IS NULL OR v.{field}::text != %s)", [value]
else:
return f"({field} IS NULL OR {field} != %s)", [value]
return f"(v.{field} IS NULL OR v.{field} != %s)", [value]
AssetType = Literal['website', 'endpoint']
@@ -339,15 +348,18 @@ class AssetSearchService:
"""
where_clause, params = SearchQueryParser.parse(query)
# 根据资产类型选择视图和字段
# 根据资产类型选择视图、原表和字段
view_name = VIEW_MAPPING.get(asset_type, 'asset_search_view')
table_name = TABLE_MAPPING.get(asset_type, 'website')
select_fields = ENDPOINT_SELECT_FIELDS if asset_type == 'endpoint' else WEBSITE_SELECT_FIELDS
# JOIN 原表获取数组字段tech, matched_gf_patterns
sql = f"""
SELECT {select_fields}
FROM {view_name}
FROM {view_name} v
JOIN {table_name} t ON v.id = t.id
WHERE {where_clause}
ORDER BY created_at DESC
ORDER BY v.created_at DESC
"""
# 添加 LIMIT
@@ -383,10 +395,12 @@ class AssetSearchService:
"""
where_clause, params = SearchQueryParser.parse(query)
# 根据资产类型选择视图
# 根据资产类型选择视图和原表
view_name = VIEW_MAPPING.get(asset_type, 'asset_search_view')
table_name = TABLE_MAPPING.get(asset_type, 'website')
sql = f"SELECT COUNT(*) FROM {view_name} WHERE {where_clause}"
# JOIN 原表以支持数组字段查询
sql = f"SELECT COUNT(*) FROM {view_name} v JOIN {table_name} t ON v.id = t.id WHERE {where_clause}"
try:
with connection.cursor() as cursor:
@@ -419,8 +433,9 @@ class AssetSearchService:
"""
where_clause, params = SearchQueryParser.parse(query)
# 根据资产类型选择视图和字段
# 根据资产类型选择视图、原表和字段
view_name = VIEW_MAPPING.get(asset_type, 'asset_search_view')
table_name = TABLE_MAPPING.get(asset_type, 'website')
select_fields = ENDPOINT_SELECT_FIELDS if asset_type == 'endpoint' else WEBSITE_SELECT_FIELDS
# 使用 OFFSET/LIMIT 分批查询Django 不支持命名游标)
@@ -428,11 +443,13 @@ class AssetSearchService:
try:
while True:
# JOIN 原表获取数组字段
sql = f"""
SELECT {select_fields}
FROM {view_name}
FROM {view_name} v
JOIN {table_name} t ON v.id = t.id
WHERE {where_clause}
ORDER BY created_at DESC
ORDER BY v.created_at DESC
LIMIT {batch_size} OFFSET {offset}
"""

View File

@@ -13,12 +13,14 @@ SCAN_TOOLS_BASE_PATH = getattr(settings, 'SCAN_TOOLS_BASE_PATH', '/usr/local/bin
SUBDOMAIN_DISCOVERY_COMMANDS = {
'subfinder': {
# 默认使用所有数据源(更全面,略慢),并始终开启递归
# -all 使用所有数据源
# -recursive 对支持递归的源启用递归枚举(默认开启
'base': "subfinder -d {domain} -all -recursive -o '{output_file}' -silent",
# 使用所有数据源(包括付费源,只要配置了 API key
# -all 使用所有数据源slow 但全面)
# -v 显示详细输出,包括使用的数据源(调试用
# 注意:不要加 -recursive它会排除不支持递归的源如 fofa
'base': "subfinder -d {domain} -all -o '{output_file}' -v",
'optional': {
'threads': '-t {threads}', # 控制并发 goroutine 数
'provider_config': "-pc '{provider_config}'", # Provider 配置文件路径
}
},

View File

@@ -33,7 +33,7 @@ from apps.scan.handlers.scan_flow_handlers import (
on_scan_flow_completed,
on_scan_flow_failed,
)
from apps.scan.utils import config_parser, build_scan_command, ensure_wordlist_local
from apps.scan.utils import config_parser, build_scan_command, ensure_wordlist_local, user_log
logger = logging.getLogger(__name__)
@@ -413,6 +413,7 @@ def _run_scans_concurrently(
logger.info("="*60)
logger.info("使用工具: %s (并发模式, max_workers=%d)", tool_name, max_workers)
logger.info("="*60)
user_log(scan_id, "directory_scan", f"Running {tool_name}")
# 如果配置了 wordlist_name则先确保本地存在对应的字典文件含 hash 校验)
wordlist_name = tool_config.get('wordlist_name')
@@ -467,6 +468,11 @@ def _run_scans_concurrently(
total_tasks = len(scan_params_list)
logger.info("开始分批执行 %d 个扫描任务(每批 %d 个)...", total_tasks, max_workers)
# 进度里程碑跟踪
last_progress_percent = 0
tool_directories = 0
tool_processed = 0
batch_num = 0
for batch_start in range(0, total_tasks, max_workers):
batch_end = min(batch_start + max_workers, total_tasks)
@@ -498,7 +504,9 @@ def _run_scans_concurrently(
result = future.result() # 阻塞等待单个任务完成
directories_found = result.get('created_directories', 0)
total_directories += directories_found
tool_directories += directories_found
processed_sites_count += 1
tool_processed += 1
logger.info(
"✓ [%d/%d] 站点扫描完成: %s - 发现 %d 个目录",
@@ -517,6 +525,19 @@ def _run_scans_concurrently(
"✗ [%d/%d] 站点扫描失败: %s - 错误: %s",
idx, len(sites), site_url, exc
)
# 进度里程碑:每 20% 输出一次
current_progress = int((batch_end / total_tasks) * 100)
if current_progress >= last_progress_percent + 20:
user_log(scan_id, "directory_scan", f"Progress: {batch_end}/{total_tasks} sites scanned")
last_progress_percent = (current_progress // 20) * 20
# 工具完成日志(开发者日志 + 用户日志)
logger.info(
"✓ 工具 %s 执行完成 - 已处理站点: %d/%d, 发现目录: %d",
tool_name, tool_processed, total_tasks, tool_directories
)
user_log(scan_id, "directory_scan", f"{tool_name} completed: found {tool_directories} directories")
# 输出汇总信息
if failed_sites:
@@ -605,6 +626,8 @@ def directory_scan_flow(
"="*60
)
user_log(scan_id, "directory_scan", "Starting directory scan")
# 参数验证
if scan_id is None:
raise ValueError("scan_id 不能为空")
@@ -625,7 +648,8 @@ def directory_scan_flow(
sites_file, site_count = _export_site_urls(target_id, target_name, directory_scan_dir)
if site_count == 0:
logger.warning("目标下没有站点,跳过目录扫描")
logger.warning("跳过目录扫描:没有站点可扫描 - Scan ID: %s", scan_id)
user_log(scan_id, "directory_scan", "Skipped: no sites to scan", "warning")
return {
'success': True,
'scan_id': scan_id,
@@ -664,7 +688,9 @@ def directory_scan_flow(
logger.warning("所有站点扫描均失败 - 总站点数: %d, 失败数: %d", site_count, len(failed_sites))
# 不抛出异常,让扫描继续
logger.info("="*60 + "\n✓ 目录扫描完成\n" + "="*60)
# 记录 Flow 完成
logger.info("✓ 目录扫描完成 - 发现目录: %d", total_directories)
user_log(scan_id, "directory_scan", f"directory_scan completed: found {total_directories} directories")
return {
'success': True,

View File

@@ -29,7 +29,7 @@ from apps.scan.tasks.fingerprint_detect import (
export_urls_for_fingerprint_task,
run_xingfinger_and_stream_update_tech_task,
)
from apps.scan.utils import build_scan_command
from apps.scan.utils import build_scan_command, user_log
from apps.scan.utils.fingerprint_helpers import get_fingerprint_paths
logger = logging.getLogger(__name__)
@@ -168,6 +168,7 @@ def _run_fingerprint_detect(
"开始执行 %s 指纹识别 - URL数: %d, 超时: %ds, 指纹库: %s",
tool_name, url_count, timeout, list(fingerprint_paths.keys())
)
user_log(scan_id, "fingerprint_detect", f"Running {tool_name}: {command}")
# 6. 执行扫描任务
try:
@@ -190,17 +191,21 @@ def _run_fingerprint_detect(
'fingerprint_libs': list(fingerprint_paths.keys())
}
tool_updated = result.get('updated_count', 0)
logger.info(
"✓ 工具 %s 执行完成 - 处理记录: %d, 更新: %d, 未找到: %d",
tool_name,
result.get('processed_records', 0),
result.get('updated_count', 0),
tool_updated,
result.get('not_found_count', 0)
)
user_log(scan_id, "fingerprint_detect", f"{tool_name} completed: identified {tool_updated} fingerprints")
except Exception as exc:
failed_tools.append({'tool': tool_name, 'reason': str(exc)})
reason = str(exc)
failed_tools.append({'tool': tool_name, 'reason': reason})
logger.error("工具 %s 执行失败: %s", tool_name, exc, exc_info=True)
user_log(scan_id, "fingerprint_detect", f"{tool_name} failed: {reason}", "error")
if failed_tools:
logger.warning(
@@ -272,6 +277,8 @@ def fingerprint_detect_flow(
"="*60
)
user_log(scan_id, "fingerprint_detect", "Starting fingerprint detection")
# 参数验证
if scan_id is None:
raise ValueError("scan_id 不能为空")
@@ -293,7 +300,8 @@ def fingerprint_detect_flow(
urls_file, url_count = _export_urls(target_id, fingerprint_dir, source)
if url_count == 0:
logger.warning("目标下没有可用的 URL跳过指纹识别")
logger.warning("跳过指纹识别:没有 URL 可扫描 - Scan ID: %s", scan_id)
user_log(scan_id, "fingerprint_detect", "Skipped: no URLs to scan", "warning")
return {
'success': True,
'scan_id': scan_id,
@@ -332,8 +340,6 @@ def fingerprint_detect_flow(
source=source
)
logger.info("="*60 + "\n✓ 指纹识别完成\n" + "="*60)
# 动态生成已执行的任务列表
executed_tasks = ['export_urls_for_fingerprint']
executed_tasks.extend([f'run_xingfinger ({tool})' for tool in tool_stats.keys()])
@@ -344,6 +350,10 @@ def fingerprint_detect_flow(
total_created = sum(stats['result'].get('created_count', 0) for stats in tool_stats.values())
total_snapshots = sum(stats['result'].get('snapshot_count', 0) for stats in tool_stats.values())
# 记录 Flow 完成
logger.info("✓ 指纹识别完成 - 识别指纹: %d", total_updated)
user_log(scan_id, "fingerprint_detect", f"fingerprint_detect completed: identified {total_updated} fingerprints")
successful_tools = [name for name in enabled_tools.keys()
if name not in [f['tool'] for f in failed_tools]]

View File

@@ -28,7 +28,7 @@ from apps.scan.handlers.scan_flow_handlers import (
on_scan_flow_completed,
on_scan_flow_failed,
)
from apps.scan.utils import config_parser, build_scan_command
from apps.scan.utils import config_parser, build_scan_command, user_log
logger = logging.getLogger(__name__)
@@ -265,6 +265,7 @@ def _run_scans_sequentially(
# 3. 执行扫描任务
logger.info("开始执行 %s 扫描(超时: %d秒)...", tool_name, config_timeout)
user_log(scan_id, "port_scan", f"Running {tool_name}: {command}")
try:
# 直接调用 task串行执行
@@ -286,26 +287,31 @@ def _run_scans_sequentially(
'result': result,
'timeout': config_timeout
}
processed_records += result.get('processed_records', 0)
tool_records = result.get('processed_records', 0)
processed_records += tool_records
logger.info(
"✓ 工具 %s 流式处理完成 - 记录数: %d",
tool_name, result.get('processed_records', 0)
tool_name, tool_records
)
user_log(scan_id, "port_scan", f"{tool_name} completed: found {tool_records} ports")
except subprocess.TimeoutExpired as exc:
# 超时异常单独处理
# 注意:流式处理任务超时时,已解析的数据已保存到数据库
reason = f"执行超时(配置: {config_timeout}秒)"
reason = f"timeout after {config_timeout}s"
failed_tools.append({'tool': tool_name, 'reason': reason})
logger.warning(
"⚠️ 工具 %s 执行超时 - 超时配置: %d\n"
"注意:超时前已解析的端口数据已保存到数据库,但扫描未完全完成。",
tool_name, config_timeout
)
user_log(scan_id, "port_scan", f"{tool_name} failed: {reason}", "error")
except Exception as exc:
# 其他异常
failed_tools.append({'tool': tool_name, 'reason': str(exc)})
reason = str(exc)
failed_tools.append({'tool': tool_name, 'reason': reason})
logger.error("工具 %s 执行失败: %s", tool_name, exc, exc_info=True)
user_log(scan_id, "port_scan", f"{tool_name} failed: {reason}", "error")
if failed_tools:
logger.warning(
@@ -420,6 +426,8 @@ def port_scan_flow(
"="*60
)
user_log(scan_id, "port_scan", "Starting port scan")
# Step 0: 创建工作目录
from apps.scan.utils import setup_scan_directory
port_scan_dir = setup_scan_directory(scan_workspace_dir, 'port_scan')
@@ -428,7 +436,8 @@ def port_scan_flow(
targets_file, target_count, target_type = _export_scan_targets(target_id, port_scan_dir)
if target_count == 0:
logger.warning("目标下没有可扫描的地址,跳过端口扫描")
logger.warning("跳过端口扫描:没有目标可扫描 - Scan ID: %s", scan_id)
user_log(scan_id, "port_scan", "Skipped: no targets to scan", "warning")
return {
'success': True,
'scan_id': scan_id,
@@ -467,7 +476,9 @@ def port_scan_flow(
target_name=target_name
)
logger.info("="*60 + "\n✓ 端口扫描完成\n" + "="*60)
# 记录 Flow 完成
logger.info("✓ 端口扫描完成 - 发现端口: %d", processed_records)
user_log(scan_id, "port_scan", f"port_scan completed: found {processed_records} ports")
# 动态生成已执行的任务列表
executed_tasks = ['export_scan_targets', 'parse_config']

View File

@@ -17,6 +17,7 @@ from apps.common.prefect_django_setup import setup_django_for_prefect
import logging
import os
import subprocess
import time
from pathlib import Path
from typing import Callable
from prefect import flow
@@ -26,7 +27,7 @@ from apps.scan.handlers.scan_flow_handlers import (
on_scan_flow_completed,
on_scan_flow_failed,
)
from apps.scan.utils import config_parser, build_scan_command
from apps.scan.utils import config_parser, build_scan_command, user_log
logger = logging.getLogger(__name__)
@@ -198,6 +199,7 @@ def _run_scans_sequentially(
"开始执行 %s 站点扫描 - URL数: %d, 最终超时: %ds",
tool_name, total_urls, timeout
)
user_log(scan_id, "site_scan", f"Running {tool_name}: {command}")
# 3. 执行扫描任务
try:
@@ -218,29 +220,35 @@ def _run_scans_sequentially(
'result': result,
'timeout': timeout
}
processed_records += result.get('processed_records', 0)
tool_records = result.get('processed_records', 0)
tool_created = result.get('created_websites', 0)
processed_records += tool_records
logger.info(
"✓ 工具 %s 流式处理完成 - 处理记录: %d, 创建站点: %d, 跳过: %d",
tool_name,
result.get('processed_records', 0),
result.get('created_websites', 0),
tool_records,
tool_created,
result.get('skipped_no_subdomain', 0) + result.get('skipped_failed', 0)
)
user_log(scan_id, "site_scan", f"{tool_name} completed: found {tool_created} websites")
except subprocess.TimeoutExpired as exc:
# 超时异常单独处理
reason = f"执行超时(配置: {timeout}秒)"
reason = f"timeout after {timeout}s"
failed_tools.append({'tool': tool_name, 'reason': reason})
logger.warning(
"⚠️ 工具 %s 执行超时 - 超时配置: %d\n"
"注意:超时前已解析的站点数据已保存到数据库,但扫描未完全完成。",
tool_name, timeout
)
user_log(scan_id, "site_scan", f"{tool_name} failed: {reason}", "error")
except Exception as exc:
# 其他异常
failed_tools.append({'tool': tool_name, 'reason': str(exc)})
reason = str(exc)
failed_tools.append({'tool': tool_name, 'reason': reason})
logger.error("工具 %s 执行失败: %s", tool_name, exc, exc_info=True)
user_log(scan_id, "site_scan", f"{tool_name} failed: {reason}", "error")
if failed_tools:
logger.warning(
@@ -379,6 +387,8 @@ def site_scan_flow(
if not scan_workspace_dir:
raise ValueError("scan_workspace_dir 不能为空")
user_log(scan_id, "site_scan", "Starting site scan")
# Step 0: 创建工作目录
from apps.scan.utils import setup_scan_directory
site_scan_dir = setup_scan_directory(scan_workspace_dir, 'site_scan')
@@ -389,7 +399,8 @@ def site_scan_flow(
)
if total_urls == 0:
logger.warning("目标下没有可用的站点URL,跳过站点扫描")
logger.warning("跳过站点扫描:没有站点 URL 可扫描 - Scan ID: %s", scan_id)
user_log(scan_id, "site_scan", "Skipped: no site URLs to scan", "warning")
return {
'success': True,
'scan_id': scan_id,
@@ -432,8 +443,6 @@ def site_scan_flow(
target_name=target_name
)
logger.info("="*60 + "\n✓ 站点扫描完成\n" + "="*60)
# 动态生成已执行的任务列表
executed_tasks = ['export_site_urls', 'parse_config']
executed_tasks.extend([f'run_and_stream_save_websites ({tool})' for tool in tool_stats.keys()])
@@ -443,6 +452,10 @@ def site_scan_flow(
total_skipped_no_subdomain = sum(stats['result'].get('skipped_no_subdomain', 0) for stats in tool_stats.values())
total_skipped_failed = sum(stats['result'].get('skipped_failed', 0) for stats in tool_stats.values())
# 记录 Flow 完成
logger.info("✓ 站点扫描完成 - 创建站点: %d", total_created)
user_log(scan_id, "site_scan", f"site_scan completed: found {total_created} websites")
return {
'success': True,
'scan_id': scan_id,

View File

@@ -30,7 +30,7 @@ from apps.scan.handlers.scan_flow_handlers import (
on_scan_flow_completed,
on_scan_flow_failed,
)
from apps.scan.utils import build_scan_command, ensure_wordlist_local
from apps.scan.utils import build_scan_command, ensure_wordlist_local, user_log
from apps.engine.services.wordlist_service import WordlistService
from apps.common.normalizer import normalize_domain
from apps.common.validators import validate_domain
@@ -77,7 +77,9 @@ def _validate_and_normalize_target(target_name: str) -> str:
def _run_scans_parallel(
enabled_tools: dict,
domain_name: str,
result_dir: Path
result_dir: Path,
scan_id: int,
provider_config_path: str = None
) -> tuple[list, list, list]:
"""
并行运行所有启用的子域名扫描工具
@@ -86,6 +88,8 @@ def _run_scans_parallel(
enabled_tools: 启用的工具配置字典 {'tool_name': {'timeout': 600, ...}}
domain_name: 目标域名
result_dir: 结果输出目录
scan_id: 扫描任务 ID用于记录日志
provider_config_path: Provider 配置文件路径(可选,用于 subfinder
Returns:
tuple: (result_files, failed_tools, successful_tool_names)
@@ -110,13 +114,19 @@ def _run_scans_parallel(
# 1.2 构建完整命令(变量替换)
try:
command_params = {
'domain': domain_name, # 对应 {domain}
'output_file': output_file # 对应 {output_file}
}
# 如果是 subfinder 且有 provider_config添加到参数
if tool_name == 'subfinder' and provider_config_path:
command_params['provider_config'] = provider_config_path
command = build_scan_command(
tool_name=tool_name,
scan_type='subdomain_discovery',
command_params={
'domain': domain_name, # 对应 {domain}
'output_file': output_file # 对应 {output_file}
},
command_params=command_params,
tool_config=tool_config
)
except Exception as e:
@@ -137,6 +147,9 @@ def _run_scans_parallel(
f"提交任务 - 工具: {tool_name}, 超时: {timeout}s, 输出: {output_file}"
)
# 记录工具开始执行日志
user_log(scan_id, "subdomain_discovery", f"Running {tool_name}: {command}")
future = run_subdomain_discovery_task.submit(
tool=tool_name,
command=command,
@@ -164,16 +177,19 @@ def _run_scans_parallel(
if result:
result_files.append(result)
logger.info("✓ 扫描工具 %s 执行成功: %s", tool_name, result)
user_log(scan_id, "subdomain_discovery", f"{tool_name} completed")
else:
failure_msg = f"{tool_name}: 未生成结果文件"
failures.append(failure_msg)
failed_tools.append({'tool': tool_name, 'reason': '未生成结果文件'})
logger.warning("⚠️ 扫描工具 %s 未生成结果文件", tool_name)
user_log(scan_id, "subdomain_discovery", f"{tool_name} failed: no output file", "error")
except Exception as e:
failure_msg = f"{tool_name}: {str(e)}"
failures.append(failure_msg)
failed_tools.append({'tool': tool_name, 'reason': str(e)})
logger.warning("⚠️ 扫描工具 %s 执行失败: %s", tool_name, str(e))
user_log(scan_id, "subdomain_discovery", f"{tool_name} failed: {str(e)}", "error")
# 4. 检查是否有成功的工具
if not result_files:
@@ -203,7 +219,8 @@ def _run_single_tool(
tool_config: dict,
command_params: dict,
result_dir: Path,
scan_type: str = 'subdomain_discovery'
scan_type: str = 'subdomain_discovery',
scan_id: int = None
) -> str:
"""
运行单个扫描工具
@@ -214,6 +231,7 @@ def _run_single_tool(
command_params: 命令参数
result_dir: 结果目录
scan_type: 扫描类型
scan_id: 扫描 ID用于记录用户日志
Returns:
str: 输出文件路径,失败返回空字符串
@@ -242,7 +260,9 @@ def _run_single_tool(
if timeout == 'auto':
timeout = 3600
logger.info(f"执行 {tool_name}: timeout={timeout}s")
logger.info(f"执行 {tool_name}: {command}")
if scan_id:
user_log(scan_id, scan_type, f"Running {tool_name}: {command}")
try:
result = run_subdomain_discovery_task(
@@ -401,7 +421,6 @@ def subdomain_discovery_flow(
logger.warning("目标域名无效,跳过子域名发现扫描: %s", e)
return _empty_result(scan_id, target_name, scan_workspace_dir)
# 验证成功后打印日志
logger.info(
"="*60 + "\n" +
"开始子域名发现扫描\n" +
@@ -410,6 +429,7 @@ def subdomain_discovery_flow(
f" Workspace: {scan_workspace_dir}\n" +
"="*60
)
user_log(scan_id, "subdomain_discovery", f"Starting subdomain discovery for {domain_name}")
# 解析配置
passive_tools = scan_config.get('passive_tools', {})
@@ -428,24 +448,37 @@ def subdomain_discovery_flow(
failed_tools = []
successful_tool_names = []
# ==================== Stage 1: 被动收集(并行)====================
logger.info("=" * 40)
logger.info("Stage 1: 被动收集(并行)")
logger.info("=" * 40)
# ==================== 生成 Provider 配置文件 ====================
# 为 subfinder 生成第三方数据源配置
provider_config_path = None
try:
from apps.scan.services.subfinder_provider_config_service import SubfinderProviderConfigService
provider_config_service = SubfinderProviderConfigService()
provider_config_path = provider_config_service.generate(str(result_dir))
if provider_config_path:
logger.info(f"Provider 配置文件已生成: {provider_config_path}")
user_log(scan_id, "subdomain_discovery", "Provider config generated for subfinder")
except Exception as e:
logger.warning(f"生成 Provider 配置文件失败: {e}")
# ==================== Stage 1: 被动收集(并行)====================
if enabled_passive_tools:
logger.info("=" * 40)
logger.info("Stage 1: 被动收集(并行)")
logger.info("=" * 40)
logger.info("启用工具: %s", ', '.join(enabled_passive_tools.keys()))
user_log(scan_id, "subdomain_discovery", f"Stage 1: passive collection ({', '.join(enabled_passive_tools.keys())})")
result_files, stage1_failed, stage1_success = _run_scans_parallel(
enabled_tools=enabled_passive_tools,
domain_name=domain_name,
result_dir=result_dir
result_dir=result_dir,
scan_id=scan_id,
provider_config_path=provider_config_path
)
all_result_files.extend(result_files)
failed_tools.extend(stage1_failed)
successful_tool_names.extend(stage1_success)
executed_tasks.extend([f'passive ({tool})' for tool in stage1_success])
else:
logger.warning("未启用任何被动收集工具")
# 合并 Stage 1 结果
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
@@ -456,7 +489,6 @@ def subdomain_discovery_flow(
else:
# 创建空文件
Path(current_result).touch()
logger.warning("Stage 1 无结果,创建空文件")
# ==================== Stage 2: 字典爆破(可选)====================
bruteforce_enabled = bruteforce_config.get('enabled', False)
@@ -464,6 +496,7 @@ def subdomain_discovery_flow(
logger.info("=" * 40)
logger.info("Stage 2: 字典爆破")
logger.info("=" * 40)
user_log(scan_id, "subdomain_discovery", "Stage 2: bruteforce")
bruteforce_tool_config = bruteforce_config.get('subdomain_bruteforce', {})
wordlist_name = bruteforce_tool_config.get('wordlist_name', 'dns_wordlist.txt')
@@ -496,22 +529,16 @@ def subdomain_discovery_flow(
**bruteforce_tool_config,
'timeout': timeout_value,
}
logger.info(
"subdomain_bruteforce 使用自动 timeout: %s 秒 (字典行数=%s, 3秒/行)",
timeout_value,
line_count_int,
)
brute_output = str(result_dir / f"subs_brute_{timestamp}.txt")
brute_result = _run_single_tool(
tool_name='subdomain_bruteforce',
tool_config=bruteforce_tool_config,
command_params={
'domain': domain_name,
'wordlist': local_wordlist_path,
'output_file': brute_output
},
result_dir=result_dir
result_dir=result_dir,
scan_id=scan_id
)
if brute_result:
@@ -522,11 +549,16 @@ def subdomain_discovery_flow(
)
successful_tool_names.append('subdomain_bruteforce')
executed_tasks.append('bruteforce')
logger.info("✓ subdomain_bruteforce 执行完成")
user_log(scan_id, "subdomain_discovery", "subdomain_bruteforce completed")
else:
failed_tools.append({'tool': 'subdomain_bruteforce', 'reason': '执行失败'})
logger.warning("⚠️ subdomain_bruteforce 执行失败")
user_log(scan_id, "subdomain_discovery", "subdomain_bruteforce failed: execution failed", "error")
except Exception as exc:
logger.warning("字典准备失败,跳过字典爆破: %s", exc)
failed_tools.append({'tool': 'subdomain_bruteforce', 'reason': str(exc)})
logger.warning("字典准备失败,跳过字典爆破: %s", exc)
user_log(scan_id, "subdomain_discovery", f"subdomain_bruteforce failed: {str(exc)}", "error")
# ==================== Stage 3: 变异生成 + 验证(可选)====================
permutation_enabled = permutation_config.get('enabled', False)
@@ -534,6 +566,7 @@ def subdomain_discovery_flow(
logger.info("=" * 40)
logger.info("Stage 3: 变异生成 + 存活验证(流式管道)")
logger.info("=" * 40)
user_log(scan_id, "subdomain_discovery", "Stage 3: permutation + resolve")
permutation_tool_config = permutation_config.get('subdomain_permutation_resolve', {})
@@ -587,20 +620,19 @@ def subdomain_discovery_flow(
'tool': 'subdomain_permutation_resolve',
'reason': f"采样检测到泛解析 (膨胀率 {ratio:.1f}x)"
})
user_log(scan_id, "subdomain_discovery", f"subdomain_permutation_resolve skipped: wildcard detected (ratio {ratio:.1f}x)", "warning")
else:
# === Step 3.2: 采样通过,执行完整变异 ===
logger.info("采样检测通过,执行完整变异...")
permuted_output = str(result_dir / f"subs_permuted_{timestamp}.txt")
permuted_result = _run_single_tool(
tool_name='subdomain_permutation_resolve',
tool_config=permutation_tool_config,
command_params={
'input_file': current_result,
'output_file': permuted_output,
},
result_dir=result_dir
result_dir=result_dir,
scan_id=scan_id
)
if permuted_result:
@@ -611,15 +643,21 @@ def subdomain_discovery_flow(
)
successful_tool_names.append('subdomain_permutation_resolve')
executed_tasks.append('permutation')
logger.info("✓ subdomain_permutation_resolve 执行完成")
user_log(scan_id, "subdomain_discovery", "subdomain_permutation_resolve completed")
else:
failed_tools.append({'tool': 'subdomain_permutation_resolve', 'reason': '执行失败'})
logger.warning("⚠️ subdomain_permutation_resolve 执行失败")
user_log(scan_id, "subdomain_discovery", "subdomain_permutation_resolve failed: execution failed", "error")
except subprocess.TimeoutExpired:
logger.warning(f"采样检测超时 ({SAMPLE_TIMEOUT}秒),跳过变异")
failed_tools.append({'tool': 'subdomain_permutation_resolve', 'reason': '采样检测超时'})
logger.warning(f"采样检测超时 ({SAMPLE_TIMEOUT}秒),跳过变异")
user_log(scan_id, "subdomain_discovery", "subdomain_permutation_resolve failed: sample detection timeout", "error")
except Exception as e:
logger.warning(f"采样检测失败: {e},跳过变异")
failed_tools.append({'tool': 'subdomain_permutation_resolve', 'reason': f'采样检测失败: {e}'})
logger.warning(f"采样检测失败: {e},跳过变异")
user_log(scan_id, "subdomain_discovery", f"subdomain_permutation_resolve failed: {str(e)}", "error")
# ==================== Stage 4: DNS 存活验证(可选)====================
# 无论是否启用 Stage 3只要 resolve.enabled 为 true 就会执行,对当前所有候选子域做统一 DNS 验证
@@ -628,6 +666,7 @@ def subdomain_discovery_flow(
logger.info("=" * 40)
logger.info("Stage 4: DNS 存活验证")
logger.info("=" * 40)
user_log(scan_id, "subdomain_discovery", "Stage 4: DNS resolve")
resolve_tool_config = resolve_config.get('subdomain_resolve', {})
@@ -651,30 +690,27 @@ def subdomain_discovery_flow(
**resolve_tool_config,
'timeout': timeout_value,
}
logger.info(
"subdomain_resolve 使用自动 timeout: %s 秒 (候选子域数=%s, 3秒/域名)",
timeout_value,
line_count_int,
)
alive_output = str(result_dir / f"subs_alive_{timestamp}.txt")
alive_result = _run_single_tool(
tool_name='subdomain_resolve',
tool_config=resolve_tool_config,
command_params={
'input_file': current_result,
'output_file': alive_output,
},
result_dir=result_dir
result_dir=result_dir,
scan_id=scan_id
)
if alive_result:
current_result = alive_result
successful_tool_names.append('subdomain_resolve')
executed_tasks.append('resolve')
logger.info("✓ subdomain_resolve 执行完成")
user_log(scan_id, "subdomain_discovery", "subdomain_resolve completed")
else:
failed_tools.append({'tool': 'subdomain_resolve', 'reason': '执行失败'})
logger.warning("⚠️ subdomain_resolve 执行失败")
user_log(scan_id, "subdomain_discovery", "subdomain_resolve failed: execution failed", "error")
# ==================== Final: 保存到数据库 ====================
logger.info("=" * 40)
@@ -695,7 +731,9 @@ def subdomain_discovery_flow(
processed_domains = save_result.get('processed_records', 0)
executed_tasks.append('save_domains')
# 记录 Flow 完成
logger.info("="*60 + "\n✓ 子域名发现扫描完成\n" + "="*60)
user_log(scan_id, "subdomain_discovery", f"subdomain_discovery completed: found {processed_domains} subdomains")
return {
'success': True,

View File

@@ -59,6 +59,8 @@ def domain_name_url_fetch_flow(
- IP 和 CIDR 类型会自动跳过waymore 等工具不支持)
- 工具会自动收集 *.target_name 的所有历史 URL无需遍历子域名
"""
from apps.scan.utils import user_log
try:
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
@@ -145,6 +147,9 @@ def domain_name_url_fetch_flow(
timeout,
)
# 记录工具开始执行日志
user_log(scan_id, "url_fetch", f"Running {tool_name}: {command}")
future = run_url_fetcher_task.submit(
tool_name=tool_name,
command=command,
@@ -163,22 +168,28 @@ def domain_name_url_fetch_flow(
if result and result.get("success"):
result_files.append(result["output_file"])
successful_tools.append(tool_name)
url_count = result.get("url_count", 0)
logger.info(
"✓ 工具 %s 执行成功 - 发现 URL: %d",
tool_name,
result.get("url_count", 0),
url_count,
)
user_log(scan_id, "url_fetch", f"{tool_name} completed: found {url_count} urls")
else:
reason = "未生成结果或无有效 URL"
failed_tools.append(
{
"tool": tool_name,
"reason": "未生成结果或无有效 URL",
"reason": reason,
}
)
logger.warning("⚠️ 工具 %s 未生成有效结果", tool_name)
user_log(scan_id, "url_fetch", f"{tool_name} failed: {reason}", "error")
except Exception as e:
failed_tools.append({"tool": tool_name, "reason": str(e)})
reason = str(e)
failed_tools.append({"tool": tool_name, "reason": reason})
logger.warning("⚠️ 工具 %s 执行失败: %s", tool_name, e)
user_log(scan_id, "url_fetch", f"{tool_name} failed: {reason}", "error")
logger.info(
"基于 domain_name 的 URL 获取完成 - 成功工具: %s, 失败工具: %s",

View File

@@ -25,6 +25,7 @@ from apps.scan.handlers.scan_flow_handlers import (
on_scan_flow_completed,
on_scan_flow_failed,
)
from apps.scan.utils import user_log
from .domain_name_url_fetch_flow import domain_name_url_fetch_flow
from .sites_url_fetch_flow import sites_url_fetch_flow
@@ -291,6 +292,8 @@ def url_fetch_flow(
"="*60
)
user_log(scan_id, "url_fetch", "Starting URL fetch")
# Step 1: 准备工作目录
logger.info("Step 1: 准备工作目录")
from apps.scan.utils import setup_scan_directory
@@ -403,7 +406,9 @@ def url_fetch_flow(
target_id=target_id
)
logger.info("="*60 + "\n✓ URL 获取扫描完成\n" + "="*60)
# 记录 Flow 完成
logger.info("✓ URL 获取完成 - 保存 endpoints: %d", saved_count)
user_log(scan_id, "url_fetch", f"url_fetch completed: found {saved_count} endpoints")
# 构建已执行的任务列表
executed_tasks = ['setup_directory', 'classify_tools']

View File

@@ -116,7 +116,8 @@ def sites_url_fetch_flow(
tools=enabled_tools,
input_file=sites_file,
input_type="sites_file",
output_dir=output_path
output_dir=output_path,
scan_id=scan_id
)
logger.info(

View File

@@ -152,7 +152,8 @@ def run_tools_parallel(
tools: dict,
input_file: str,
input_type: str,
output_dir: Path
output_dir: Path,
scan_id: int
) -> tuple[list, list, list]:
"""
并行执行工具列表
@@ -162,11 +163,13 @@ def run_tools_parallel(
input_file: 输入文件路径
input_type: 输入类型
output_dir: 输出目录
scan_id: 扫描任务 ID用于记录日志
Returns:
tuple: (result_files, failed_tools, successful_tool_names)
"""
from apps.scan.tasks.url_fetch import run_url_fetcher_task
from apps.scan.utils import user_log
futures: dict[str, object] = {}
failed_tools: list[dict] = []
@@ -192,6 +195,9 @@ def run_tools_parallel(
exec_params["timeout"],
)
# 记录工具开始执行日志
user_log(scan_id, "url_fetch", f"Running {tool_name}: {exec_params['command']}")
# 提交并行任务
future = run_url_fetcher_task.submit(
tool_name=tool_name,
@@ -208,22 +214,28 @@ def run_tools_parallel(
result = future.result()
if result and result['success']:
result_files.append(result['output_file'])
url_count = result['url_count']
logger.info(
"✓ 工具 %s 执行成功 - 发现 URL: %d",
tool_name, result['url_count']
tool_name, url_count
)
user_log(scan_id, "url_fetch", f"{tool_name} completed: found {url_count} urls")
else:
reason = '未生成结果或无有效URL'
failed_tools.append({
'tool': tool_name,
'reason': '未生成结果或无有效URL'
'reason': reason
})
logger.warning("⚠️ 工具 %s 未生成有效结果", tool_name)
user_log(scan_id, "url_fetch", f"{tool_name} failed: {reason}", "error")
except Exception as e:
reason = str(e)
failed_tools.append({
'tool': tool_name,
'reason': str(e)
'reason': reason
})
logger.warning("⚠️ 工具 %s 执行失败: %s", tool_name, e)
user_log(scan_id, "url_fetch", f"{tool_name} failed: {reason}", "error")
# 计算成功的工具列表
failed_tool_names = [f['tool'] for f in failed_tools]

View File

@@ -12,7 +12,7 @@ from apps.scan.handlers.scan_flow_handlers import (
on_scan_flow_completed,
on_scan_flow_failed,
)
from apps.scan.utils import build_scan_command, ensure_nuclei_templates_local
from apps.scan.utils import build_scan_command, ensure_nuclei_templates_local, user_log
from apps.scan.tasks.vuln_scan import (
export_endpoints_task,
run_vuln_tool_task,
@@ -141,6 +141,7 @@ def endpoints_vuln_scan_flow(
# Dalfox XSS 使用流式任务,一边解析一边保存漏洞结果
if tool_name == "dalfox_xss":
logger.info("开始执行漏洞扫描工具 %s(流式保存漏洞结果,已提交任务)", tool_name)
user_log(scan_id, "vuln_scan", f"Running {tool_name}: {command}")
future = run_and_stream_save_dalfox_vulns_task.submit(
cmd=command,
tool_name=tool_name,
@@ -163,6 +164,7 @@ def endpoints_vuln_scan_flow(
elif tool_name == "nuclei":
# Nuclei 使用流式任务
logger.info("开始执行漏洞扫描工具 %s(流式保存漏洞结果,已提交任务)", tool_name)
user_log(scan_id, "vuln_scan", f"Running {tool_name}: {command}")
future = run_and_stream_save_nuclei_vulns_task.submit(
cmd=command,
tool_name=tool_name,
@@ -185,6 +187,7 @@ def endpoints_vuln_scan_flow(
else:
# 其他工具仍使用非流式执行逻辑
logger.info("开始执行漏洞扫描工具 %s(已提交任务)", tool_name)
user_log(scan_id, "vuln_scan", f"Running {tool_name}: {command}")
future = run_vuln_tool_task.submit(
tool_name=tool_name,
command=command,
@@ -203,24 +206,34 @@ def endpoints_vuln_scan_flow(
# 统一收集所有工具的执行结果
for tool_name, meta in tool_futures.items():
future = meta["future"]
result = future.result()
try:
result = future.result()
if meta["mode"] == "streaming":
tool_results[tool_name] = {
"command": meta["command"],
"timeout": meta["timeout"],
"processed_records": result.get("processed_records"),
"created_vulns": result.get("created_vulns"),
"command_log_file": meta["log_file"],
}
else:
tool_results[tool_name] = {
"command": meta["command"],
"timeout": meta["timeout"],
"duration": result.get("duration"),
"returncode": result.get("returncode"),
"command_log_file": result.get("command_log_file"),
}
if meta["mode"] == "streaming":
created_vulns = result.get("created_vulns", 0)
tool_results[tool_name] = {
"command": meta["command"],
"timeout": meta["timeout"],
"processed_records": result.get("processed_records"),
"created_vulns": created_vulns,
"command_log_file": meta["log_file"],
}
logger.info("✓ 工具 %s 执行完成 - 漏洞: %d", tool_name, created_vulns)
user_log(scan_id, "vuln_scan", f"{tool_name} completed: found {created_vulns} vulnerabilities")
else:
tool_results[tool_name] = {
"command": meta["command"],
"timeout": meta["timeout"],
"duration": result.get("duration"),
"returncode": result.get("returncode"),
"command_log_file": result.get("command_log_file"),
}
logger.info("✓ 工具 %s 执行完成 - returncode=%s", tool_name, result.get("returncode"))
user_log(scan_id, "vuln_scan", f"{tool_name} completed")
except Exception as e:
reason = str(e)
logger.error("工具 %s 执行失败: %s", tool_name, e, exc_info=True)
user_log(scan_id, "vuln_scan", f"{tool_name} failed: {reason}", "error")
return {
"success": True,

View File

@@ -11,6 +11,7 @@ from apps.scan.handlers.scan_flow_handlers import (
on_scan_flow_failed,
)
from apps.scan.configs.command_templates import get_command_template
from apps.scan.utils import user_log
from .endpoints_vuln_scan_flow import endpoints_vuln_scan_flow
@@ -72,6 +73,9 @@ def vuln_scan_flow(
if not enabled_tools:
raise ValueError("enabled_tools 不能为空")
logger.info("开始漏洞扫描 - Scan ID: %s, Target: %s", scan_id, target_name)
user_log(scan_id, "vuln_scan", "Starting vulnerability scan")
# Step 1: 分类工具
endpoints_tools, other_tools = _classify_vuln_tools(enabled_tools)
@@ -99,6 +103,14 @@ def vuln_scan_flow(
enabled_tools=endpoints_tools,
)
# 记录 Flow 完成
total_vulns = sum(
r.get("created_vulns", 0)
for r in endpoint_result.get("tool_results", {}).values()
)
logger.info("✓ 漏洞扫描完成 - 新增漏洞: %d", total_vulns)
user_log(scan_id, "vuln_scan", f"vuln_scan completed: found {total_vulns} vulnerabilities")
# 目前只有一个子 Flow直接返回其结果
return endpoint_result

View File

@@ -14,6 +14,7 @@ from prefect import Flow
from prefect.client.schemas import FlowRun, State
from apps.scan.utils.performance import FlowPerformanceTracker
from apps.scan.utils import user_log
logger = logging.getLogger(__name__)
@@ -136,6 +137,7 @@ def on_scan_flow_failed(flow: Flow, flow_run: FlowRun, state: State) -> None:
- 更新阶段进度为 failed
- 发送扫描失败通知
- 记录性能指标(含错误信息)
- 写入 ScanLog 供前端显示
Args:
flow: Prefect Flow 对象
@@ -152,6 +154,11 @@ def on_scan_flow_failed(flow: Flow, flow_run: FlowRun, state: State) -> None:
# 提取错误信息
error_message = str(state.message) if state.message else "未知错误"
# 写入 ScanLog 供前端显示
stage = _get_stage_from_flow_name(flow.name)
if scan_id and stage:
user_log(scan_id, stage, f"Failed: {error_message}", "error")
# 记录性能指标(失败情况)
tracker = _flow_trackers.pop(str(flow_run.id), None)
if tracker:

View File

@@ -116,4 +116,35 @@ class Migration(migrations.Migration):
'indexes': [models.Index(fields=['-created_at'], name='scheduled_s_created_9b9c2e_idx'), models.Index(fields=['is_enabled', '-created_at'], name='scheduled_s_is_enab_23d660_idx'), models.Index(fields=['name'], name='scheduled_s_name_bf332d_idx')],
},
),
migrations.CreateModel(
name='ScanLog',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('level', models.CharField(choices=[('info', 'Info'), ('warning', 'Warning'), ('error', 'Error')], default='info', help_text='日志级别', max_length=10)),
('content', models.TextField(help_text='日志内容')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, help_text='创建时间')),
('scan', models.ForeignKey(db_index=True, help_text='关联的扫描任务', on_delete=django.db.models.deletion.CASCADE, related_name='logs', to='scan.scan')),
],
options={
'verbose_name': '扫描日志',
'verbose_name_plural': '扫描日志',
'db_table': 'scan_log',
'ordering': ['created_at'],
'indexes': [models.Index(fields=['scan', 'created_at'], name='scan_log_scan_id_e8c8f5_idx')],
},
),
migrations.CreateModel(
name='SubfinderProviderSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('providers', models.JSONField(default=dict, help_text='各 Provider 的 API Key 配置')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Subfinder Provider 配置',
'verbose_name_plural': 'Subfinder Provider 配置',
'db_table': 'subfinder_provider_settings',
},
),
]

View File

@@ -0,0 +1,18 @@
"""Scan Models - 统一导出"""
from .scan_models import Scan, SoftDeleteManager
from .scan_log_model import ScanLog
from .scheduled_scan_model import ScheduledScan
from .subfinder_provider_settings_model import SubfinderProviderSettings
# 兼容旧名称(已废弃,请使用 SubfinderProviderSettings
ProviderSettings = SubfinderProviderSettings
__all__ = [
'Scan',
'ScanLog',
'ScheduledScan',
'SoftDeleteManager',
'SubfinderProviderSettings',
'ProviderSettings', # 兼容旧名称
]

View File

@@ -0,0 +1,41 @@
"""扫描日志模型"""
from django.db import models
class ScanLog(models.Model):
"""扫描日志模型"""
class Level(models.TextChoices):
INFO = 'info', 'Info'
WARNING = 'warning', 'Warning'
ERROR = 'error', 'Error'
id = models.BigAutoField(primary_key=True)
scan = models.ForeignKey(
'Scan',
on_delete=models.CASCADE,
related_name='logs',
db_index=True,
help_text='关联的扫描任务'
)
level = models.CharField(
max_length=10,
choices=Level.choices,
default=Level.INFO,
help_text='日志级别'
)
content = models.TextField(help_text='日志内容')
created_at = models.DateTimeField(auto_now_add=True, db_index=True, help_text='创建时间')
class Meta:
db_table = 'scan_log'
verbose_name = '扫描日志'
verbose_name_plural = '扫描日志'
ordering = ['created_at']
indexes = [
models.Index(fields=['scan', 'created_at']),
]
def __str__(self):
return f"[{self.level}] {self.content[:50]}"

View File

@@ -1,9 +1,9 @@
"""扫描相关模型"""
from django.db import models
from django.contrib.postgres.fields import ArrayField
from ..common.definitions import ScanStatus
from apps.common.definitions import ScanStatus
class SoftDeleteManager(models.Manager):
@@ -97,99 +97,10 @@ class Scan(models.Model):
verbose_name_plural = '扫描任务'
ordering = ['-created_at']
indexes = [
models.Index(fields=['-created_at']), # 优化按创建时间降序排序list 查询的默认排序)
models.Index(fields=['target']), # 优化按目标查询扫描任务
models.Index(fields=['deleted_at', '-created_at']), # 软删除 + 时间索引
models.Index(fields=['-created_at']),
models.Index(fields=['target']),
models.Index(fields=['deleted_at', '-created_at']),
]
def __str__(self):
return f"Scan #{self.id} - {self.target.name}"
class ScheduledScan(models.Model):
"""
定时扫描任务模型
调度机制
- APScheduler 每分钟检查 next_run_time
- 到期任务通过 task_distributor 分发到 Worker 执行
- 支持 cron 表达式进行灵活调度
扫描模式二选一
- 组织扫描设置 organization执行时动态获取组织下所有目标
- 目标扫描设置 target扫描单个目标
- organization 优先级高于 target
"""
id = models.AutoField(primary_key=True)
# 基本信息
name = models.CharField(max_length=200, help_text='任务名称')
# 多引擎支持字段
engine_ids = ArrayField(
models.IntegerField(),
default=list,
help_text='引擎 ID 列表'
)
engine_names = models.JSONField(
default=list,
help_text='引擎名称列表,如 ["引擎A", "引擎B"]'
)
yaml_configuration = models.TextField(
default='',
help_text='YAML 格式的扫描配置'
)
# 关联的组织(组织扫描模式:执行时动态获取组织下所有目标)
organization = models.ForeignKey(
'targets.Organization',
on_delete=models.CASCADE,
related_name='scheduled_scans',
null=True,
blank=True,
help_text='扫描组织(设置后执行时动态获取组织下所有目标)'
)
# 关联的目标(目标扫描模式:扫描单个目标)
target = models.ForeignKey(
'targets.Target',
on_delete=models.CASCADE,
related_name='scheduled_scans',
null=True,
blank=True,
help_text='扫描单个目标(与 organization 二选一)'
)
# 调度配置 - 直接使用 Cron 表达式
cron_expression = models.CharField(
max_length=100,
default='0 2 * * *',
help_text='Cron 表达式,格式:分 时 日 月 周'
)
# 状态
is_enabled = models.BooleanField(default=True, db_index=True, help_text='是否启用')
# 执行统计
run_count = models.IntegerField(default=0, help_text='已执行次数')
last_run_time = models.DateTimeField(null=True, blank=True, help_text='上次执行时间')
next_run_time = models.DateTimeField(null=True, blank=True, help_text='下次执行时间')
# 时间戳
created_at = models.DateTimeField(auto_now_add=True, help_text='创建时间')
updated_at = models.DateTimeField(auto_now=True, help_text='更新时间')
class Meta:
db_table = 'scheduled_scan'
verbose_name = '定时扫描任务'
verbose_name_plural = '定时扫描任务'
ordering = ['-created_at']
indexes = [
models.Index(fields=['-created_at']),
models.Index(fields=['is_enabled', '-created_at']),
models.Index(fields=['name']), # 优化 name 搜索
]
def __str__(self):
return f"ScheduledScan #{self.id} - {self.name}"

View File

@@ -0,0 +1,73 @@
"""定时扫描任务模型"""
from django.db import models
from django.contrib.postgres.fields import ArrayField
class ScheduledScan(models.Model):
"""定时扫描任务模型"""
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, help_text='任务名称')
engine_ids = ArrayField(
models.IntegerField(),
default=list,
help_text='引擎 ID 列表'
)
engine_names = models.JSONField(
default=list,
help_text='引擎名称列表,如 ["引擎A", "引擎B"]'
)
yaml_configuration = models.TextField(
default='',
help_text='YAML 格式的扫描配置'
)
organization = models.ForeignKey(
'targets.Organization',
on_delete=models.CASCADE,
related_name='scheduled_scans',
null=True,
blank=True,
help_text='扫描组织(设置后执行时动态获取组织下所有目标)'
)
target = models.ForeignKey(
'targets.Target',
on_delete=models.CASCADE,
related_name='scheduled_scans',
null=True,
blank=True,
help_text='扫描单个目标(与 organization 二选一)'
)
cron_expression = models.CharField(
max_length=100,
default='0 2 * * *',
help_text='Cron 表达式,格式:分 时 日 月 周'
)
is_enabled = models.BooleanField(default=True, db_index=True, help_text='是否启用')
run_count = models.IntegerField(default=0, help_text='已执行次数')
last_run_time = models.DateTimeField(null=True, blank=True, help_text='上次执行时间')
next_run_time = models.DateTimeField(null=True, blank=True, help_text='下次执行时间')
created_at = models.DateTimeField(auto_now_add=True, help_text='创建时间')
updated_at = models.DateTimeField(auto_now=True, help_text='更新时间')
class Meta:
db_table = 'scheduled_scan'
verbose_name = '定时扫描任务'
verbose_name_plural = '定时扫描任务'
ordering = ['-created_at']
indexes = [
models.Index(fields=['-created_at']),
models.Index(fields=['is_enabled', '-created_at']),
models.Index(fields=['name']),
]
def __str__(self):
return f"ScheduledScan #{self.id} - {self.name}"

View File

@@ -0,0 +1,64 @@
"""Subfinder Provider 配置模型(单例模式)
用于存储 subfinder 第三方数据源的 API Key 配置
"""
from django.db import models
class SubfinderProviderSettings(models.Model):
"""
Subfinder Provider 配置(单例模式)
存储第三方数据源的 API Key 配置,用于 subfinder 子域名发现
支持的 Provider:
- fofa: email + api_key (composite)
- censys: api_id + api_secret (composite)
- hunter, shodan, zoomeye, securitytrails, threatbook, quake: api_key (single)
"""
providers = models.JSONField(
default=dict,
help_text='各 Provider 的 API Key 配置'
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'subfinder_provider_settings'
verbose_name = 'Subfinder Provider 配置'
verbose_name_plural = 'Subfinder Provider 配置'
DEFAULT_PROVIDERS = {
'fofa': {'enabled': False, 'email': '', 'api_key': ''},
'hunter': {'enabled': False, 'api_key': ''},
'shodan': {'enabled': False, 'api_key': ''},
'censys': {'enabled': False, 'api_id': '', 'api_secret': ''},
'zoomeye': {'enabled': False, 'api_key': ''},
'securitytrails': {'enabled': False, 'api_key': ''},
'threatbook': {'enabled': False, 'api_key': ''},
'quake': {'enabled': False, 'api_key': ''},
}
def save(self, *args, **kwargs):
self.pk = 1
super().save(*args, **kwargs)
@classmethod
def get_instance(cls) -> 'SubfinderProviderSettings':
"""获取或创建单例实例"""
obj, _ = cls.objects.get_or_create(
pk=1,
defaults={'providers': cls.DEFAULT_PROVIDERS.copy()}
)
return obj
def get_provider_config(self, provider: str) -> dict:
"""获取指定 Provider 的配置"""
return self.providers.get(provider, self.DEFAULT_PROVIDERS.get(provider, {}))
def is_provider_enabled(self, provider: str) -> bool:
"""检查指定 Provider 是否启用"""
config = self.get_provider_config(provider)
return config.get('enabled', False)

View File

@@ -1,401 +0,0 @@
from rest_framework import serializers
from django.db.models import Count
import yaml
from .models import Scan, ScheduledScan
# ==================== 通用验证 Mixin ====================
class DuplicateKeyLoader(yaml.SafeLoader):
"""自定义 YAML Loader检测重复 key"""
pass
def _check_duplicate_keys(loader, node, deep=False):
"""检测 YAML mapping 中的重复 key"""
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
if key in mapping:
raise yaml.constructor.ConstructorError(
"while constructing a mapping", node.start_mark,
f"发现重复的配置项 '{key}',后面的配置会覆盖前面的配置,请删除重复项", key_node.start_mark
)
mapping[key] = loader.construct_object(value_node, deep=deep)
return mapping
DuplicateKeyLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
_check_duplicate_keys
)
class ScanConfigValidationMixin:
"""扫描配置验证 Mixin提供通用的验证方法"""
def validate_configuration(self, value):
"""验证 YAML 配置格式,包括检测重复 key"""
import yaml
if not value or not value.strip():
raise serializers.ValidationError("configuration 不能为空")
try:
# 使用自定义 Loader 检测重复 key
yaml.load(value, Loader=DuplicateKeyLoader)
except yaml.YAMLError as e:
raise serializers.ValidationError(f"无效的 YAML 格式: {str(e)}")
return value
def validate_engine_ids(self, value):
"""验证引擎 ID 列表"""
if not value:
raise serializers.ValidationError("engine_ids 不能为空,请至少选择一个扫描引擎")
return value
def validate_engine_names(self, value):
"""验证引擎名称列表"""
if not value:
raise serializers.ValidationError("engine_names 不能为空")
return value
# ==================== 扫描任务序列化器 ====================
class ScanSerializer(serializers.ModelSerializer):
"""扫描任务序列化器"""
target_name = serializers.SerializerMethodField()
class Meta:
model = Scan
fields = [
'id', 'target', 'target_name', 'engine_ids', 'engine_names',
'created_at', 'stopped_at', 'status', 'results_dir',
'container_ids', 'error_message'
]
read_only_fields = [
'id', 'created_at', 'stopped_at', 'results_dir',
'container_ids', 'error_message', 'status'
]
def get_target_name(self, obj):
"""获取目标名称"""
return obj.target.name if obj.target else None
class ScanHistorySerializer(serializers.ModelSerializer):
"""扫描历史列表专用序列化器
为前端扫描历史页面提供优化的数据格式,包括:
- 扫描汇总统计(子域名、端点、漏洞数量)
- 进度百分比和当前阶段
- 执行节点信息
"""
# 字段映射
target_name = serializers.CharField(source='target.name', read_only=True)
worker_name = serializers.CharField(source='worker.name', read_only=True, allow_null=True)
# 计算字段
summary = serializers.SerializerMethodField()
# 进度跟踪字段(直接从模型读取)
progress = serializers.IntegerField(read_only=True)
current_stage = serializers.CharField(read_only=True)
stage_progress = serializers.JSONField(read_only=True)
class Meta:
model = Scan
fields = [
'id', 'target', 'target_name', 'engine_ids', 'engine_names',
'worker_name', 'created_at', 'status', 'error_message', 'summary',
'progress', 'current_stage', 'stage_progress'
]
def get_summary(self, obj):
"""获取扫描汇总数据。
设计原则:
- 子域名/网站/端点/IP/目录使用缓存字段(避免实时 COUNT
- 漏洞统计使用 Scan 上的缓存字段,在扫描结束时统一聚合
"""
# 1. 使用缓存字段构建基础统计子域名、网站、端点、IP、目录
summary = {
'subdomains': obj.cached_subdomains_count or 0,
'websites': obj.cached_websites_count or 0,
'endpoints': obj.cached_endpoints_count or 0,
'ips': obj.cached_ips_count or 0,
'directories': obj.cached_directories_count or 0,
}
# 2. 使用 Scan 模型上的缓存漏洞统计(按严重性聚合)
summary['vulnerabilities'] = {
'total': obj.cached_vulns_total or 0,
'critical': obj.cached_vulns_critical or 0,
'high': obj.cached_vulns_high or 0,
'medium': obj.cached_vulns_medium or 0,
'low': obj.cached_vulns_low or 0,
}
return summary
class QuickScanSerializer(ScanConfigValidationMixin, serializers.Serializer):
"""
快速扫描序列化器
功能:
- 接收目标列表和 YAML 配置
- 自动创建/获取目标
- 立即发起扫描
"""
# 批量创建的最大数量限制
MAX_BATCH_SIZE = 1000
# 目标列表
targets = serializers.ListField(
child=serializers.DictField(),
help_text='目标列表,每个目标包含 name 字段'
)
# YAML 配置(必填)
configuration = serializers.CharField(
required=True,
help_text='YAML 格式的扫描配置(必填)'
)
# 扫描引擎 ID 列表(必填,用于记录和显示)
engine_ids = serializers.ListField(
child=serializers.IntegerField(),
required=True,
help_text='使用的扫描引擎 ID 列表(必填)'
)
# 引擎名称列表(必填,用于记录和显示)
engine_names = serializers.ListField(
child=serializers.CharField(),
required=True,
help_text='引擎名称列表(必填)'
)
def validate_targets(self, value):
"""验证目标列表"""
if not value:
raise serializers.ValidationError("目标列表不能为空")
# 检查数量限制,防止服务器过载
if len(value) > self.MAX_BATCH_SIZE:
raise serializers.ValidationError(
f"快速扫描最多支持 {self.MAX_BATCH_SIZE} 个目标,当前提交了 {len(value)}"
)
# 验证每个目标的必填字段
for idx, target in enumerate(value):
if 'name' not in target:
raise serializers.ValidationError(f"{idx + 1} 个目标缺少 name 字段")
if not target['name']:
raise serializers.ValidationError(f"{idx + 1} 个目标的 name 不能为空")
return value
# ==================== 定时扫描序列化器 ====================
class ScheduledScanSerializer(serializers.ModelSerializer):
"""定时扫描任务序列化器(用于列表和详情)"""
# 关联字段
organization_id = serializers.IntegerField(source='organization.id', read_only=True, allow_null=True)
organization_name = serializers.CharField(source='organization.name', read_only=True, allow_null=True)
target_id = serializers.IntegerField(source='target.id', read_only=True, allow_null=True)
target_name = serializers.CharField(source='target.name', read_only=True, allow_null=True)
scan_mode = serializers.SerializerMethodField()
class Meta:
model = ScheduledScan
fields = [
'id', 'name',
'engine_ids', 'engine_names',
'organization_id', 'organization_name',
'target_id', 'target_name',
'scan_mode',
'cron_expression',
'is_enabled',
'run_count', 'last_run_time', 'next_run_time',
'created_at', 'updated_at'
]
read_only_fields = [
'id', 'run_count',
'last_run_time', 'next_run_time',
'created_at', 'updated_at'
]
def get_scan_mode(self, obj):
"""获取扫描模式organization 或 target"""
return 'organization' if obj.organization_id else 'target'
class CreateScheduledScanSerializer(ScanConfigValidationMixin, serializers.Serializer):
"""创建定时扫描任务序列化器
扫描模式(二选一):
- 组织扫描:提供 organization_id执行时动态获取组织下所有目标
- 目标扫描:提供 target_id扫描单个目标
"""
name = serializers.CharField(max_length=200, help_text='任务名称')
# YAML 配置(必填)
configuration = serializers.CharField(
required=True,
help_text='YAML 格式的扫描配置(必填)'
)
# 扫描引擎 ID 列表(必填,用于记录和显示)
engine_ids = serializers.ListField(
child=serializers.IntegerField(),
required=True,
help_text='扫描引擎 ID 列表(必填)'
)
# 引擎名称列表(必填,用于记录和显示)
engine_names = serializers.ListField(
child=serializers.CharField(),
required=True,
help_text='引擎名称列表(必填)'
)
# 组织扫描模式
organization_id = serializers.IntegerField(
required=False,
allow_null=True,
help_text='组织 ID组织扫描模式执行时动态获取组织下所有目标'
)
# 目标扫描模式
target_id = serializers.IntegerField(
required=False,
allow_null=True,
help_text='目标 ID目标扫描模式扫描单个目标'
)
cron_expression = serializers.CharField(
max_length=100,
default='0 2 * * *',
help_text='Cron 表达式,格式:分 时 日 月 周'
)
is_enabled = serializers.BooleanField(default=True, help_text='是否立即启用')
def validate(self, data):
"""验证 organization_id 和 target_id 互斥"""
organization_id = data.get('organization_id')
target_id = data.get('target_id')
if not organization_id and not target_id:
raise serializers.ValidationError('必须提供 organization_id 或 target_id 其中之一')
if organization_id and target_id:
raise serializers.ValidationError('organization_id 和 target_id 只能提供其中之一')
return data
class InitiateScanSerializer(ScanConfigValidationMixin, serializers.Serializer):
"""发起扫描任务序列化器
扫描模式(二选一):
- 组织扫描:提供 organization_id扫描组织下所有目标
- 目标扫描:提供 target_id扫描单个目标
"""
# YAML 配置(必填)
configuration = serializers.CharField(
required=True,
help_text='YAML 格式的扫描配置(必填)'
)
# 扫描引擎 ID 列表(必填)
engine_ids = serializers.ListField(
child=serializers.IntegerField(),
required=True,
help_text='扫描引擎 ID 列表(必填)'
)
# 引擎名称列表(必填)
engine_names = serializers.ListField(
child=serializers.CharField(),
required=True,
help_text='引擎名称列表(必填)'
)
# 组织扫描模式
organization_id = serializers.IntegerField(
required=False,
allow_null=True,
help_text='组织 ID组织扫描模式'
)
# 目标扫描模式
target_id = serializers.IntegerField(
required=False,
allow_null=True,
help_text='目标 ID目标扫描模式'
)
def validate(self, data):
"""验证 organization_id 和 target_id 互斥"""
organization_id = data.get('organization_id')
target_id = data.get('target_id')
if not organization_id and not target_id:
raise serializers.ValidationError('必须提供 organization_id 或 target_id 其中之一')
if organization_id and target_id:
raise serializers.ValidationError('organization_id 和 target_id 只能提供其中之一')
return data
class UpdateScheduledScanSerializer(serializers.Serializer):
"""更新定时扫描任务序列化器"""
name = serializers.CharField(max_length=200, required=False, help_text='任务名称')
engine_ids = serializers.ListField(
child=serializers.IntegerField(),
required=False,
help_text='扫描引擎 ID 列表'
)
# 组织扫描模式
organization_id = serializers.IntegerField(
required=False,
allow_null=True,
help_text='组织 ID设置后清空 target_id'
)
# 目标扫描模式
target_id = serializers.IntegerField(
required=False,
allow_null=True,
help_text='目标 ID设置后清空 organization_id'
)
cron_expression = serializers.CharField(max_length=100, required=False, help_text='Cron 表达式')
is_enabled = serializers.BooleanField(required=False, help_text='是否启用')
def validate_engine_ids(self, value):
"""验证引擎 ID 列表"""
if value is not None and not value:
raise serializers.ValidationError("engine_ids 不能为空")
return value
class ToggleScheduledScanSerializer(serializers.Serializer):
"""切换定时扫描启用状态序列化器"""
is_enabled = serializers.BooleanField(help_text='是否启用')

View File

@@ -0,0 +1,40 @@
"""Scan Serializers - 统一导出"""
from .mixins import ScanConfigValidationMixin
from .scan_serializers import (
ScanSerializer,
ScanHistorySerializer,
QuickScanSerializer,
InitiateScanSerializer,
)
from .scan_log_serializers import ScanLogSerializer
from .scheduled_scan_serializers import (
ScheduledScanSerializer,
CreateScheduledScanSerializer,
UpdateScheduledScanSerializer,
ToggleScheduledScanSerializer,
)
from .subfinder_provider_settings_serializers import SubfinderProviderSettingsSerializer
# 兼容旧名称
ProviderSettingsSerializer = SubfinderProviderSettingsSerializer
__all__ = [
# Mixins
'ScanConfigValidationMixin',
# Scan
'ScanSerializer',
'ScanHistorySerializer',
'QuickScanSerializer',
'InitiateScanSerializer',
# ScanLog
'ScanLogSerializer',
# Scheduled Scan
'ScheduledScanSerializer',
'CreateScheduledScanSerializer',
'UpdateScheduledScanSerializer',
'ToggleScheduledScanSerializer',
# Subfinder Provider Settings
'SubfinderProviderSettingsSerializer',
'ProviderSettingsSerializer', # 兼容旧名称
]

View File

@@ -0,0 +1,57 @@
"""序列化器通用 Mixin 和工具类"""
from rest_framework import serializers
import yaml
class DuplicateKeyLoader(yaml.SafeLoader):
"""自定义 YAML Loader检测重复 key"""
pass
def _check_duplicate_keys(loader, node, deep=False):
"""检测 YAML mapping 中的重复 key"""
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
if key in mapping:
raise yaml.constructor.ConstructorError(
"while constructing a mapping", node.start_mark,
f"发现重复的配置项 '{key}',后面的配置会覆盖前面的配置,请删除重复项", key_node.start_mark
)
mapping[key] = loader.construct_object(value_node, deep=deep)
return mapping
DuplicateKeyLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
_check_duplicate_keys
)
class ScanConfigValidationMixin:
"""扫描配置验证 Mixin"""
def validate_configuration(self, value):
"""验证 YAML 配置格式"""
if not value or not value.strip():
raise serializers.ValidationError("configuration 不能为空")
try:
yaml.load(value, Loader=DuplicateKeyLoader)
except yaml.YAMLError as e:
raise serializers.ValidationError(f"无效的 YAML 格式: {str(e)}")
return value
def validate_engine_ids(self, value):
"""验证引擎 ID 列表"""
if not value:
raise serializers.ValidationError("engine_ids 不能为空,请至少选择一个扫描引擎")
return value
def validate_engine_names(self, value):
"""验证引擎名称列表"""
if not value:
raise serializers.ValidationError("engine_names 不能为空")
return value

View File

@@ -0,0 +1,13 @@
"""扫描日志序列化器"""
from rest_framework import serializers
from ..models import ScanLog
class ScanLogSerializer(serializers.ModelSerializer):
"""扫描日志序列化器"""
class Meta:
model = ScanLog
fields = ['id', 'level', 'content', 'created_at']

View File

@@ -0,0 +1,111 @@
"""扫描任务序列化器"""
from rest_framework import serializers
from ..models import Scan
from .mixins import ScanConfigValidationMixin
class ScanSerializer(serializers.ModelSerializer):
"""扫描任务序列化器"""
target_name = serializers.SerializerMethodField()
class Meta:
model = Scan
fields = [
'id', 'target', 'target_name', 'engine_ids', 'engine_names',
'created_at', 'stopped_at', 'status', 'results_dir',
'container_ids', 'error_message'
]
read_only_fields = [
'id', 'created_at', 'stopped_at', 'results_dir',
'container_ids', 'error_message', 'status'
]
def get_target_name(self, obj):
return obj.target.name if obj.target else None
class ScanHistorySerializer(serializers.ModelSerializer):
"""扫描历史列表序列化器"""
target_name = serializers.CharField(source='target.name', read_only=True)
worker_name = serializers.CharField(source='worker.name', read_only=True, allow_null=True)
summary = serializers.SerializerMethodField()
progress = serializers.IntegerField(read_only=True)
current_stage = serializers.CharField(read_only=True)
stage_progress = serializers.JSONField(read_only=True)
class Meta:
model = Scan
fields = [
'id', 'target', 'target_name', 'engine_ids', 'engine_names',
'worker_name', 'created_at', 'status', 'error_message', 'summary',
'progress', 'current_stage', 'stage_progress'
]
def get_summary(self, obj):
summary = {
'subdomains': obj.cached_subdomains_count or 0,
'websites': obj.cached_websites_count or 0,
'endpoints': obj.cached_endpoints_count or 0,
'ips': obj.cached_ips_count or 0,
'directories': obj.cached_directories_count or 0,
}
summary['vulnerabilities'] = {
'total': obj.cached_vulns_total or 0,
'critical': obj.cached_vulns_critical or 0,
'high': obj.cached_vulns_high or 0,
'medium': obj.cached_vulns_medium or 0,
'low': obj.cached_vulns_low or 0,
}
return summary
class QuickScanSerializer(ScanConfigValidationMixin, serializers.Serializer):
"""快速扫描序列化器"""
MAX_BATCH_SIZE = 1000
targets = serializers.ListField(
child=serializers.DictField(),
help_text='目标列表,每个目标包含 name 字段'
)
configuration = serializers.CharField(required=True, help_text='YAML 格式的扫描配置')
engine_ids = serializers.ListField(child=serializers.IntegerField(), required=True)
engine_names = serializers.ListField(child=serializers.CharField(), required=True)
def validate_targets(self, value):
if not value:
raise serializers.ValidationError("目标列表不能为空")
if len(value) > self.MAX_BATCH_SIZE:
raise serializers.ValidationError(
f"快速扫描最多支持 {self.MAX_BATCH_SIZE} 个目标,当前提交了 {len(value)}"
)
for idx, target in enumerate(value):
if 'name' not in target:
raise serializers.ValidationError(f"{idx + 1} 个目标缺少 name 字段")
if not target['name']:
raise serializers.ValidationError(f"{idx + 1} 个目标的 name 不能为空")
return value
class InitiateScanSerializer(ScanConfigValidationMixin, serializers.Serializer):
"""发起扫描任务序列化器"""
configuration = serializers.CharField(required=True, help_text='YAML 格式的扫描配置')
engine_ids = serializers.ListField(child=serializers.IntegerField(), required=True)
engine_names = serializers.ListField(child=serializers.CharField(), required=True)
organization_id = serializers.IntegerField(required=False, allow_null=True)
target_id = serializers.IntegerField(required=False, allow_null=True)
def validate(self, data):
organization_id = data.get('organization_id')
target_id = data.get('target_id')
if not organization_id and not target_id:
raise serializers.ValidationError('必须提供 organization_id 或 target_id 其中之一')
if organization_id and target_id:
raise serializers.ValidationError('organization_id 和 target_id 只能提供其中之一')
return data

View File

@@ -0,0 +1,84 @@
"""定时扫描序列化器"""
from rest_framework import serializers
from ..models import ScheduledScan
from .mixins import ScanConfigValidationMixin
class ScheduledScanSerializer(serializers.ModelSerializer):
"""定时扫描任务序列化器(用于列表和详情)"""
organization_id = serializers.IntegerField(source='organization.id', read_only=True, allow_null=True)
organization_name = serializers.CharField(source='organization.name', read_only=True, allow_null=True)
target_id = serializers.IntegerField(source='target.id', read_only=True, allow_null=True)
target_name = serializers.CharField(source='target.name', read_only=True, allow_null=True)
scan_mode = serializers.SerializerMethodField()
class Meta:
model = ScheduledScan
fields = [
'id', 'name',
'engine_ids', 'engine_names',
'organization_id', 'organization_name',
'target_id', 'target_name',
'scan_mode',
'cron_expression',
'is_enabled',
'run_count', 'last_run_time', 'next_run_time',
'created_at', 'updated_at'
]
read_only_fields = [
'id', 'run_count',
'last_run_time', 'next_run_time',
'created_at', 'updated_at'
]
def get_scan_mode(self, obj):
return 'organization' if obj.organization_id else 'target'
class CreateScheduledScanSerializer(ScanConfigValidationMixin, serializers.Serializer):
"""创建定时扫描任务序列化器"""
name = serializers.CharField(max_length=200, help_text='任务名称')
configuration = serializers.CharField(required=True, help_text='YAML 格式的扫描配置')
engine_ids = serializers.ListField(child=serializers.IntegerField(), required=True)
engine_names = serializers.ListField(child=serializers.CharField(), required=True)
organization_id = serializers.IntegerField(required=False, allow_null=True)
target_id = serializers.IntegerField(required=False, allow_null=True)
cron_expression = serializers.CharField(max_length=100, default='0 2 * * *')
is_enabled = serializers.BooleanField(default=True)
def validate(self, data):
organization_id = data.get('organization_id')
target_id = data.get('target_id')
if not organization_id and not target_id:
raise serializers.ValidationError('必须提供 organization_id 或 target_id 其中之一')
if organization_id and target_id:
raise serializers.ValidationError('organization_id 和 target_id 只能提供其中之一')
return data
class UpdateScheduledScanSerializer(serializers.Serializer):
"""更新定时扫描任务序列化器"""
name = serializers.CharField(max_length=200, required=False)
engine_ids = serializers.ListField(child=serializers.IntegerField(), required=False)
organization_id = serializers.IntegerField(required=False, allow_null=True)
target_id = serializers.IntegerField(required=False, allow_null=True)
cron_expression = serializers.CharField(max_length=100, required=False)
is_enabled = serializers.BooleanField(required=False)
def validate_engine_ids(self, value):
if value is not None and not value:
raise serializers.ValidationError("engine_ids 不能为空")
return value
class ToggleScheduledScanSerializer(serializers.Serializer):
"""切换定时扫描启用状态序列化器"""
is_enabled = serializers.BooleanField(help_text='是否启用')

View File

@@ -0,0 +1,55 @@
"""Subfinder Provider 配置序列化器"""
from rest_framework import serializers
class SubfinderProviderSettingsSerializer(serializers.Serializer):
"""Subfinder Provider 配置序列化器
支持的 Provider:
- fofa: email + api_key (composite)
- censys: api_id + api_secret (composite)
- hunter, shodan, zoomeye, securitytrails, threatbook, quake: api_key (single)
注意djangorestframework-camel-case 会自动处理 camelCase <-> snake_case 转换
所以这里统一使用 snake_case
"""
VALID_PROVIDERS = {
'fofa', 'hunter', 'shodan', 'censys',
'zoomeye', 'securitytrails', 'threatbook', 'quake'
}
def to_internal_value(self, data):
"""验证并转换输入数据"""
if not isinstance(data, dict):
raise serializers.ValidationError('Expected a dictionary')
result = {}
for provider, config in data.items():
if provider not in self.VALID_PROVIDERS:
continue
if not isinstance(config, dict):
continue
db_config = {'enabled': bool(config.get('enabled', False))}
if provider == 'fofa':
db_config['email'] = str(config.get('email', ''))
db_config['api_key'] = str(config.get('api_key', ''))
elif provider == 'censys':
db_config['api_id'] = str(config.get('api_id', ''))
db_config['api_secret'] = str(config.get('api_secret', ''))
else:
db_config['api_key'] = str(config.get('api_key', ''))
result[provider] = db_config
return result
def to_representation(self, instance):
"""输出数据数据库格式camel-case 中间件会自动转换)"""
if isinstance(instance, dict):
return instance
return instance.providers if hasattr(instance, 'providers') else {}

View File

@@ -0,0 +1,138 @@
"""Subfinder Provider 配置文件生成服务
负责生成 subfinder 的 provider-config.yaml 配置文件
"""
import logging
import os
from pathlib import Path
from typing import Optional
import yaml
from ..models import SubfinderProviderSettings
logger = logging.getLogger(__name__)
class SubfinderProviderConfigService:
"""Subfinder Provider 配置文件生成服务"""
# Provider 格式定义
PROVIDER_FORMATS = {
'fofa': {'type': 'composite', 'format': '{email}:{api_key}'},
'censys': {'type': 'composite', 'format': '{api_id}:{api_secret}'},
'hunter': {'type': 'single', 'field': 'api_key'},
'shodan': {'type': 'single', 'field': 'api_key'},
'zoomeye': {'type': 'single', 'field': 'api_key'},
'securitytrails': {'type': 'single', 'field': 'api_key'},
'threatbook': {'type': 'single', 'field': 'api_key'},
'quake': {'type': 'single', 'field': 'api_key'},
}
def generate(self, output_dir: str) -> Optional[str]:
"""
生成 provider-config.yaml 文件
Args:
output_dir: 输出目录路径
Returns:
生成的配置文件路径,如果没有启用的 provider 则返回 None
"""
settings = SubfinderProviderSettings.get_instance()
config = {}
has_enabled = False
for provider, format_info in self.PROVIDER_FORMATS.items():
provider_config = settings.providers.get(provider, {})
if not provider_config.get('enabled'):
config[provider] = []
continue
value = self._build_provider_value(provider, provider_config)
if value:
config[provider] = [value] # 单个 key 放入数组
has_enabled = True
logger.debug(f"Provider {provider} 已启用")
else:
config[provider] = []
# 检查是否有任何启用的 provider
if not has_enabled:
logger.info("没有启用的 Provider跳过配置文件生成")
return None
# 确保输出目录存在
output_path = Path(output_dir) / 'provider-config.yaml'
output_path.parent.mkdir(parents=True, exist_ok=True)
# 写入 YAML 文件(使用默认列表格式,和 subfinder 一致)
with open(output_path, 'w', encoding='utf-8') as f:
yaml.dump(config, f, default_flow_style=False, allow_unicode=True)
# 设置文件权限为 600仅所有者可读写
os.chmod(output_path, 0o600)
logger.info(f"Provider 配置文件已生成: {output_path}")
return str(output_path)
def _build_provider_value(self, provider: str, config: dict) -> Optional[str]:
"""根据 provider 格式规则构建配置值
Args:
provider: provider 名称
config: provider 配置字典
Returns:
构建的配置值字符串,如果配置不完整则返回 None
"""
format_info = self.PROVIDER_FORMATS.get(provider)
if not format_info:
return None
if format_info['type'] == 'composite':
# 复合格式:需要多个字段
format_str = format_info['format']
try:
# 提取格式字符串中的字段名
# 例如 '{email}:{api_key}' -> ['email', 'api_key']
import re
fields = re.findall(r'\{(\w+)\}', format_str)
# 检查所有字段是否都有值
values = {}
for field in fields:
value = config.get(field, '').strip()
if not value:
logger.debug(f"Provider {provider} 缺少字段 {field}")
return None
values[field] = value
return format_str.format(**values)
except (KeyError, ValueError) as e:
logger.warning(f"构建 {provider} 配置值失败: {e}")
return None
else:
# 单字段格式
field = format_info['field']
value = config.get(field, '').strip()
if not value:
logger.debug(f"Provider {provider} 缺少字段 {field}")
return None
return value
def cleanup(self, config_path: str) -> None:
"""清理配置文件
Args:
config_path: 配置文件路径
"""
try:
if config_path and Path(config_path).exists():
Path(config_path).unlink()
logger.debug(f"已清理配置文件: {config_path}")
except Exception as e:
logger.warning(f"清理配置文件失败: {config_path} - {e}")

View File

@@ -1,6 +1,6 @@
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import ScanViewSet, ScheduledScanViewSet
from .views import ScanViewSet, ScheduledScanViewSet, ScanLogListView, SubfinderProviderSettingsView
from .notifications.views import notification_callback
from apps.asset.views import (
SubdomainSnapshotViewSet, WebsiteSnapshotViewSet, DirectorySnapshotViewSet,
@@ -31,6 +31,10 @@ urlpatterns = [
path('', include(router.urls)),
# Worker 回调 API
path('callbacks/notification/', notification_callback, name='notification-callback'),
# API Key 配置
path('settings/api-keys/', SubfinderProviderSettingsView.as_view(), name='subfinder-provider-settings'),
# 扫描日志 API
path('scans/<int:scan_id>/logs/', ScanLogListView.as_view(), name='scan-logs-list'),
# 嵌套路由:/api/scans/{scan_pk}/xxx/
path('scans/<int:scan_pk>/subdomains/', scan_subdomains_list, name='scan-subdomains-list'),
path('scans/<int:scan_pk>/subdomains/export/', scan_subdomains_export, name='scan-subdomains-export'),

View File

@@ -11,6 +11,7 @@ from .wordlist_helpers import ensure_wordlist_local
from .nuclei_helpers import ensure_nuclei_templates_local
from .performance import FlowPerformanceTracker, CommandPerformanceTracker
from .workspace_utils import setup_scan_workspace, setup_scan_directory
from .user_logger import user_log
from . import config_parser
__all__ = [
@@ -31,6 +32,8 @@ __all__ = [
# 性能监控
'FlowPerformanceTracker', # Flow 性能追踪器(含系统资源采样)
'CommandPerformanceTracker', # 命令性能追踪器
# 扫描日志
'user_log', # 用户可见扫描日志记录
# 配置解析
'config_parser',
]

View File

@@ -48,7 +48,7 @@ ENABLE_COMMAND_LOGGING = getattr(settings, 'ENABLE_COMMAND_LOGGING', True)
# 动态并发控制阈值(可在 Django settings 中覆盖)
SCAN_CPU_HIGH = getattr(settings, 'SCAN_CPU_HIGH', 90.0) # CPU 高水位(百分比)
SCAN_MEM_HIGH = getattr(settings, 'SCAN_MEM_HIGH', 80.0) # 内存高水位(百分比)
SCAN_LOAD_CHECK_INTERVAL = getattr(settings, 'SCAN_LOAD_CHECK_INTERVAL', 30) # 负载检查间隔(秒)
SCAN_LOAD_CHECK_INTERVAL = getattr(settings, 'SCAN_LOAD_CHECK_INTERVAL', 180) # 负载检查间隔(秒)
SCAN_COMMAND_STARTUP_DELAY = getattr(settings, 'SCAN_COMMAND_STARTUP_DELAY', 5) # 命令启动前等待(秒)
_ACTIVE_COMMANDS = 0
@@ -74,7 +74,7 @@ def _wait_for_system_load() -> None:
return
logger.info(
"系统负载较高,暂缓启动: cpu=%.1f%% (阈值 %.1f%%), mem=%.1f%% (阈值 %.1f%%)",
"系统负载较高,任务将排队执行防止oom: cpu=%.1f%% (阈值 %.1f%%), mem=%.1f%% (阈值 %.1f%%)",
cpu,
SCAN_CPU_HIGH,
mem,

View File

@@ -0,0 +1,56 @@
"""
扫描日志记录器
提供统一的日志记录接口,用于在 Flow 中记录用户可见的扫描进度日志。
特性:
- 简单的函数式 API
- 只写入数据库ScanLog 表),不写 Python logging
- 错误容忍(数据库失败不影响扫描执行)
职责分离:
- user_log: 用户可见日志(写数据库,前端展示)
- logger: 开发者日志(写日志文件/控制台,调试用)
使用示例:
from apps.scan.utils import user_log
# 用户日志(写数据库)
user_log(scan_id, "port_scan", "Starting port scan")
user_log(scan_id, "port_scan", "naabu completed: found 120 ports")
# 开发者日志(写日志文件)
logger.info("✓ 工具 %s 执行完成 - 记录数: %d", tool_name, count)
"""
import logging
from django.db import DatabaseError
logger = logging.getLogger(__name__)
def user_log(scan_id: int, stage: str, message: str, level: str = "info"):
"""
记录用户可见的扫描日志(只写数据库)
Args:
scan_id: 扫描任务 ID
stage: 阶段名称,如 "port_scan", "site_scan"
message: 日志消息
level: 日志级别,默认 "info",可选 "warning", "error"
数据库 content 格式: "[{stage}] {message}"
"""
formatted = f"[{stage}] {message}"
try:
from apps.scan.models import ScanLog
ScanLog.objects.create(
scan_id=scan_id,
level=level,
content=formatted
)
except DatabaseError as e:
logger.error("ScanLog write failed - scan_id=%s, error=%s", scan_id, e)
except Exception as e:
logger.error("ScanLog write unexpected error - scan_id=%s, error=%s", scan_id, e)

View File

@@ -2,8 +2,12 @@
from .scan_views import ScanViewSet
from .scheduled_scan_views import ScheduledScanViewSet
from .scan_log_views import ScanLogListView
from .subfinder_provider_settings_views import SubfinderProviderSettingsView
__all__ = [
'ScanViewSet',
'ScheduledScanViewSet',
'ScanLogListView',
'SubfinderProviderSettingsView',
]

View File

@@ -0,0 +1,56 @@
"""
扫描日志 API
提供扫描日志查询接口,支持游标分页用于增量轮询。
"""
from rest_framework.views import APIView
from rest_framework.response import Response
from apps.scan.models import ScanLog
from apps.scan.serializers import ScanLogSerializer
class ScanLogListView(APIView):
"""
GET /scans/{scan_id}/logs/
游标分页 API用于增量查询日志
查询参数:
- afterId: 只返回此 ID 之后的日志(用于增量轮询,避免时间戳重复导致的重复日志)
- limit: 返回数量限制(默认 200最大 1000
返回:
- results: 日志列表
- hasMore: 是否还有更多日志
"""
def get(self, request, scan_id: int):
# 参数解析
after_id = request.query_params.get('afterId')
try:
limit = min(int(request.query_params.get('limit', 200)), 1000)
except (ValueError, TypeError):
limit = 200
# 查询日志(按 ID 排序ID 是自增的,保证顺序一致)
queryset = ScanLog.objects.filter(scan_id=scan_id).order_by('id')
# 游标过滤(使用 ID 而非时间戳,避免同一时间戳多条日志导致重复)
if after_id:
try:
queryset = queryset.filter(id__gt=int(after_id))
except (ValueError, TypeError):
pass
# 限制返回数量(多取一条用于判断 hasMore
logs = list(queryset[:limit + 1])
has_more = len(logs) > limit
if has_more:
logs = logs[:limit]
return Response({
'results': ScanLogSerializer(logs, many=True).data,
'hasMore': has_more,
})

View File

@@ -0,0 +1,38 @@
"""Subfinder Provider 配置视图"""
import logging
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from ..models import SubfinderProviderSettings
from ..serializers import SubfinderProviderSettingsSerializer
logger = logging.getLogger(__name__)
class SubfinderProviderSettingsView(APIView):
"""Subfinder Provider 配置视图
GET /api/settings/api-keys/ - 获取配置
PUT /api/settings/api-keys/ - 更新配置
"""
def get(self, request):
"""获取 Subfinder Provider 配置"""
settings = SubfinderProviderSettings.get_instance()
serializer = SubfinderProviderSettingsSerializer(settings.providers)
return Response(serializer.data)
def put(self, request):
"""更新 Subfinder Provider 配置"""
serializer = SubfinderProviderSettingsSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
settings = SubfinderProviderSettings.get_instance()
settings.providers.update(serializer.validated_data)
settings.save()
logger.info("Subfinder Provider 配置已更新")
return Response(SubfinderProviderSettingsSerializer(settings.providers).data)

View File

@@ -102,7 +102,11 @@ RUN pip install uv --break-system-packages && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# 6. 复制后端代码
# 6. 设置 Prefect 配置目录(避免 home 目录不存在的警告)
ENV PREFECT_HOME=/app/.prefect
RUN mkdir -p /app/.prefect
# 7. 复制后端代码
COPY backend /app/backend
ENV PYTHONPATH=/app/backend

View File

@@ -0,0 +1,306 @@
"use client"
import React, { useState, useEffect } from 'react'
import { IconEye, IconEyeOff, IconWorldSearch, IconRadar2 } from '@tabler/icons-react'
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'
import { Button } from '@/components/ui/button'
import { Input } from '@/components/ui/input'
import { Switch } from '@/components/ui/switch'
import { Separator } from '@/components/ui/separator'
import { Badge } from '@/components/ui/badge'
import { Skeleton } from '@/components/ui/skeleton'
import { useApiKeySettings, useUpdateApiKeySettings } from '@/hooks/use-api-key-settings'
import type { ApiKeySettings } from '@/types/api-key-settings.types'
// 密码输入框组件(带显示/隐藏切换)
function PasswordInput({ value, onChange, placeholder, disabled }: {
value: string
onChange: (value: string) => void
placeholder?: string
disabled?: boolean
}) {
const [show, setShow] = useState(false)
return (
<div className="relative">
<Input
type={show ? 'text' : 'password'}
value={value}
onChange={(e) => onChange(e.target.value)}
placeholder={placeholder}
disabled={disabled}
className="pr-10"
/>
<button
type="button"
onClick={() => setShow(!show)}
className="absolute right-3 top-1/2 -translate-y-1/2 text-muted-foreground hover:text-foreground"
>
{show ? <IconEyeOff className="h-4 w-4" /> : <IconEye className="h-4 w-4" />}
</button>
</div>
)
}
// Provider 配置定义
const PROVIDERS = [
{
key: 'fofa',
name: 'FOFA',
description: '网络空间测绘平台,提供全球互联网资产搜索',
icon: IconWorldSearch,
color: 'text-blue-500',
bgColor: 'bg-blue-500/10',
fields: [
{ name: 'email', label: '邮箱', type: 'text', placeholder: 'your@email.com' },
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 FOFA API Key' },
],
docUrl: 'https://fofa.info/api',
},
{
key: 'hunter',
name: 'Hunter (鹰图)',
description: '奇安信威胁情报平台,提供网络空间资产测绘',
icon: IconRadar2,
color: 'text-orange-500',
bgColor: 'bg-orange-500/10',
fields: [
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 Hunter API Key' },
],
docUrl: 'https://hunter.qianxin.com/',
},
{
key: 'shodan',
name: 'Shodan',
description: '全球最大的互联网设备搜索引擎',
icon: IconWorldSearch,
color: 'text-red-500',
bgColor: 'bg-red-500/10',
fields: [
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 Shodan API Key' },
],
docUrl: 'https://developer.shodan.io/',
},
{
key: 'censys',
name: 'Censys',
description: '互联网资产搜索和监控平台',
icon: IconWorldSearch,
color: 'text-purple-500',
bgColor: 'bg-purple-500/10',
fields: [
{ name: 'apiId', label: 'API ID', type: 'text', placeholder: '输入 Censys API ID' },
{ name: 'apiSecret', label: 'API Secret', type: 'password', placeholder: '输入 Censys API Secret' },
],
docUrl: 'https://search.censys.io/api',
},
{
key: 'zoomeye',
name: 'ZoomEye (钟馗之眼)',
description: '知道创宇网络空间搜索引擎',
icon: IconWorldSearch,
color: 'text-green-500',
bgColor: 'bg-green-500/10',
fields: [
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 ZoomEye API Key' },
],
docUrl: 'https://www.zoomeye.org/doc',
},
{
key: 'securitytrails',
name: 'SecurityTrails',
description: 'DNS 历史记录和子域名数据平台',
icon: IconWorldSearch,
color: 'text-cyan-500',
bgColor: 'bg-cyan-500/10',
fields: [
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 SecurityTrails API Key' },
],
docUrl: 'https://securitytrails.com/corp/api',
},
{
key: 'threatbook',
name: 'ThreatBook (微步在线)',
description: '威胁情报平台,提供域名和 IP 情报查询',
icon: IconWorldSearch,
color: 'text-indigo-500',
bgColor: 'bg-indigo-500/10',
fields: [
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 ThreatBook API Key' },
],
docUrl: 'https://x.threatbook.com/api',
},
{
key: 'quake',
name: 'Quake (360)',
description: '360 网络空间测绘系统',
icon: IconWorldSearch,
color: 'text-teal-500',
bgColor: 'bg-teal-500/10',
fields: [
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 Quake API Key' },
],
docUrl: 'https://quake.360.net/quake/#/help',
},
]
// 默认配置
const DEFAULT_SETTINGS: ApiKeySettings = {
fofa: { enabled: false, email: '', apiKey: '' },
hunter: { enabled: false, apiKey: '' },
shodan: { enabled: false, apiKey: '' },
censys: { enabled: false, apiId: '', apiSecret: '' },
zoomeye: { enabled: false, apiKey: '' },
securitytrails: { enabled: false, apiKey: '' },
threatbook: { enabled: false, apiKey: '' },
quake: { enabled: false, apiKey: '' },
}
export default function ApiKeysSettingsPage() {
const { data: settings, isLoading } = useApiKeySettings()
const updateMutation = useUpdateApiKeySettings()
const [formData, setFormData] = useState<ApiKeySettings>(DEFAULT_SETTINGS)
const [hasChanges, setHasChanges] = useState(false)
// 当数据加载完成后,更新表单数据
useEffect(() => {
if (settings) {
setFormData({ ...DEFAULT_SETTINGS, ...settings })
setHasChanges(false)
}
}, [settings])
const updateProvider = (providerKey: string, field: string, value: any) => {
setFormData(prev => ({
...prev,
[providerKey]: {
...prev[providerKey as keyof ApiKeySettings],
[field]: value,
}
}))
setHasChanges(true)
}
const handleSave = async () => {
updateMutation.mutate(formData)
setHasChanges(false)
}
const enabledCount = Object.values(formData).filter((p: any) => p?.enabled).length
if (isLoading) {
return (
<div className="p-4 md:p-6 space-y-6">
<div>
<Skeleton className="h-8 w-48" />
<Skeleton className="h-4 w-96 mt-2" />
</div>
<div className="grid gap-4">
{[1, 2, 3].map((i) => (
<Skeleton key={i} className="h-24 w-full" />
))}
</div>
</div>
)
}
return (
<div className="p-4 md:p-6 space-y-6">
{/* 页面标题 */}
<div>
<div className="flex items-center gap-2">
<h1 className="text-2xl font-semibold">API </h1>
{enabledCount > 0 && (
<Badge variant="secondary">{enabledCount} </Badge>
)}
</div>
<p className="text-muted-foreground mt-1">
API subfinder 使
</p>
</div>
{/* Provider 卡片列表 */}
<div className="grid gap-4">
{PROVIDERS.map((provider) => {
const data = formData[provider.key as keyof ApiKeySettings] || {}
const isEnabled = (data as any)?.enabled || false
return (
<Card key={provider.key}>
<CardHeader className="pb-4">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<div className={`flex h-10 w-10 items-center justify-center rounded-lg ${provider.bgColor}`}>
<provider.icon className={`h-5 w-5 ${provider.color}`} />
</div>
<div>
<div className="flex items-center gap-2">
<CardTitle className="text-base">{provider.name}</CardTitle>
{isEnabled && <Badge variant="outline" className="text-xs text-green-600"></Badge>}
</div>
<CardDescription>{provider.description}</CardDescription>
</div>
</div>
<Switch
checked={isEnabled}
onCheckedChange={(checked) => updateProvider(provider.key, 'enabled', checked)}
/>
</div>
</CardHeader>
{/* 展开的配置表单 */}
{isEnabled && (
<CardContent className="pt-0">
<Separator className="mb-4" />
<div className="space-y-4">
{provider.fields.map((field) => (
<div key={field.name} className="space-y-2">
<label className="text-sm font-medium">{field.label}</label>
{field.type === 'password' ? (
<PasswordInput
value={(data as any)[field.name] || ''}
onChange={(value) => updateProvider(provider.key, field.name, value)}
placeholder={field.placeholder}
/>
) : (
<Input
type="text"
value={(data as any)[field.name] || ''}
onChange={(e) => updateProvider(provider.key, field.name, e.target.value)}
placeholder={field.placeholder}
/>
)}
</div>
))}
<p className="text-xs text-muted-foreground">
API Key
<a
href={provider.docUrl}
target="_blank"
rel="noopener noreferrer"
className="text-primary hover:underline ml-1"
>
{provider.docUrl}
</a>
</p>
</div>
</CardContent>
)}
</Card>
)
})}
</div>
{/* 保存按钮 */}
<div className="flex justify-end">
<Button
onClick={handleSave}
disabled={updateMutation.isPending || !hasChanges}
>
{updateMutation.isPending ? '保存中...' : '保存配置'}
</Button>
</div>
</div>
)
}

View File

@@ -17,6 +17,7 @@ import {
IconBug, // Vulnerability icon
IconMessageReport, // Feedback icon
IconSearch, // Search icon
IconKey, // API Key icon
} from "@tabler/icons-react"
// Import internationalization hook
import { useTranslations } from 'next-intl'
@@ -168,6 +169,11 @@ export function AppSidebar({ ...props }: React.ComponentProps<typeof Sidebar>) {
url: "/settings/notifications/",
icon: IconSettings,
},
{
name: t('apiKeys'),
url: "/settings/api-keys/",
icon: IconKey,
},
]
return (

View File

@@ -0,0 +1,111 @@
"use client"
import { useEffect, useRef, useMemo } from "react"
import type { ScanLog } from "@/services/scan.service"
interface ScanLogListProps {
logs: ScanLog[]
loading?: boolean
}
/**
* 格式化时间为 HH:mm:ss
*/
function formatTime(isoString: string): string {
try {
const date = new Date(isoString)
return date.toLocaleTimeString('zh-CN', {
hour: '2-digit',
minute: '2-digit',
second: '2-digit',
hour12: false,
})
} catch {
return isoString
}
}
/**
* HTML 转义,防止 XSS
*/
function escapeHtml(text: string): string {
return text
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&#039;')
}
/**
* 扫描日志列表组件
*
* 特性:
* - 预渲染 HTML 字符串,减少 DOM 节点提升性能
* - 颜色区分info=默认, warning=黄色, error=红色
* - 自动滚动到底部
*/
export function ScanLogList({ logs, loading }: ScanLogListProps) {
const containerRef = useRef<HTMLDivElement>(null)
const isAtBottomRef = useRef(true) // 跟踪用户是否在底部
// 预渲染 HTML 字符串
const htmlContent = useMemo(() => {
if (logs.length === 0) return ''
return logs.map(log => {
const time = formatTime(log.createdAt)
const content = escapeHtml(log.content)
const levelStyle = log.level === 'error'
? 'color:#ef4444'
: log.level === 'warning'
? 'color:#eab308'
: ''
return `<div style="line-height:1.625;word-break:break-all;${levelStyle}"><span style="color:#6b7280">${time}</span> ${content}</div>`
}).join('')
}, [logs])
// 监听滚动事件,检测用户是否在底部
useEffect(() => {
const container = containerRef.current
if (!container) return
const handleScroll = () => {
const { scrollTop, scrollHeight, clientHeight } = container
// 允许 30px 的容差,认为在底部附近
isAtBottomRef.current = scrollHeight - scrollTop - clientHeight < 30
}
container.addEventListener('scroll', handleScroll)
return () => container.removeEventListener('scroll', handleScroll)
}, [])
// 只有用户在底部时才自动滚动
useEffect(() => {
if (containerRef.current && isAtBottomRef.current) {
containerRef.current.scrollTop = containerRef.current.scrollHeight
}
}, [htmlContent])
return (
<div
ref={containerRef}
className="h-[400px] overflow-y-auto font-mono text-[11px] p-3 bg-muted/30 rounded-lg"
>
{logs.length === 0 && !loading && (
<div className="text-muted-foreground text-center py-8">
</div>
)}
{htmlContent && (
<div dangerouslySetInnerHTML={{ __html: htmlContent }} />
)}
{loading && logs.length === 0 && (
<div className="text-muted-foreground text-center py-8">
...
</div>
)}
</div>
)
}

View File

@@ -1,6 +1,7 @@
"use client"
import * as React from "react"
import { useState } from "react"
import {
Dialog,
DialogContent,
@@ -9,6 +10,7 @@ import {
} from "@/components/ui/dialog"
import { Badge } from "@/components/ui/badge"
import { Separator } from "@/components/ui/separator"
import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs"
import {
IconCircleCheck,
IconLoader,
@@ -19,6 +21,8 @@ import {
import { cn } from "@/lib/utils"
import { useTranslations, useLocale } from "next-intl"
import type { ScanStage, ScanRecord, StageProgress, StageStatus } from "@/types/scan.types"
import { useScanLogs } from "@/hooks/use-scan-logs"
import { ScanLogList } from "./scan-log-list"
/**
* Scan stage details
@@ -190,12 +194,26 @@ export function ScanProgressDialog({
}: ScanProgressDialogProps) {
const t = useTranslations("scan.progress")
const locale = useLocale()
const [activeTab, setActiveTab] = useState<'stages' | 'logs'>('stages')
// 判断扫描是否正在运行(用于控制轮询)
const isRunning = data?.status === 'running' || data?.status === 'initiated'
// 日志轮询 Hook
const { logs, loading: logsLoading } = useScanLogs({
scanId: data?.id ?? 0,
enabled: open && activeTab === 'logs' && !!data?.id,
pollingInterval: isRunning ? 3000 : 0, // 运行中时 3s 轮询,否则不轮询
})
if (!data) return null
// 固定宽度,切换 Tab 时不变化
const dialogWidth = 'sm:max-w-[600px] sm:min-w-[550px]'
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className="sm:max-w-[500px] sm:min-w-[450px]">
<DialogContent className={cn(dialogWidth, "transition-all duration-200")}>
<DialogHeader>
<DialogTitle className="flex items-center gap-2">
<ScanStatusIcon status={data.status} />
@@ -244,37 +262,26 @@ export function ScanProgressDialog({
<Separator />
{/* Total progress */}
<div className="space-y-2">
<div className="flex items-center justify-between text-sm">
<span className="font-medium">{t("totalProgress")}</span>
<span className="font-mono text-muted-foreground">{data.progress}%</span>
{/* Tab 切换 */}
<Tabs value={activeTab} onValueChange={(v) => setActiveTab(v as 'stages' | 'logs')}>
<TabsList className="grid w-full grid-cols-2">
<TabsTrigger value="stages">{t("tab_stages")}</TabsTrigger>
<TabsTrigger value="logs">{t("tab_logs")}</TabsTrigger>
</TabsList>
</Tabs>
{/* Tab 内容 */}
{activeTab === 'stages' ? (
/* Stage list */
<div className="space-y-2 max-h-[300px] overflow-y-auto">
{data.stages.map((stage) => (
<StageRow key={stage.stage} stage={stage} t={t} />
))}
</div>
<div className="h-2 bg-primary/10 rounded-full overflow-hidden border border-border">
<div
className={`h-full transition-all ${
data.status === "completed" ? "bg-[#238636]/80" :
data.status === "failed" ? "bg-[#da3633]/80" :
data.status === "running" ? "bg-[#d29922]/80 progress-striped" :
data.status === "cancelled" ? "bg-[#848d97]/80" :
data.status === "cancelling" ? "bg-[#d29922]/80 progress-striped" :
data.status === "initiated" ? "bg-[#d29922]/80 progress-striped" :
"bg-muted-foreground/80"
}`}
style={{ width: `${data.status === "completed" ? 100 : data.progress}%` }}
/>
</div>
</div>
<Separator />
{/* Stage list */}
<div className="space-y-2 max-h-[300px] overflow-y-auto">
{data.stages.map((stage) => (
<StageRow key={stage.stage} stage={stage} t={t} />
))}
</div>
) : (
/* Log list */
<ScanLogList logs={logs} loading={logsLoading} />
)}
</DialogContent>
</Dialog>
)

View File

@@ -36,6 +36,7 @@ const converter = new AnsiToHtml({
export function AnsiLogViewer({ content, className }: AnsiLogViewerProps) {
const containerRef = useRef<HTMLPreElement>(null)
const isAtBottomRef = useRef(true) // 跟踪用户是否在底部
// 将 ANSI 转换为 HTML
const htmlContent = useMemo(() => {
@@ -43,9 +44,24 @@ export function AnsiLogViewer({ content, className }: AnsiLogViewerProps) {
return converter.toHtml(content)
}, [content])
// 自动滚动到底部
// 监听滚动事件,检测用户是否在底部
useEffect(() => {
if (containerRef.current) {
const container = containerRef.current
if (!container) return
const handleScroll = () => {
const { scrollTop, scrollHeight, clientHeight } = container
// 允许 30px 的容差,认为在底部附近
isAtBottomRef.current = scrollHeight - scrollTop - clientHeight < 30
}
container.addEventListener('scroll', handleScroll)
return () => container.removeEventListener('scroll', handleScroll)
}, [])
// 只有用户在底部时才自动滚动
useEffect(() => {
if (containerRef.current && isAtBottomRef.current) {
containerRef.current.scrollTop = containerRef.current.scrollHeight
}
}, [htmlContent])

View File

@@ -0,0 +1,29 @@
import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
import { ApiKeySettingsService } from '@/services/api-key-settings.service'
import type { ApiKeySettings } from '@/types/api-key-settings.types'
import { useToastMessages } from '@/lib/toast-helpers'
import { getErrorCode } from '@/lib/response-parser'
export function useApiKeySettings() {
return useQuery({
queryKey: ['api-key-settings'],
queryFn: () => ApiKeySettingsService.getSettings(),
})
}
export function useUpdateApiKeySettings() {
const qc = useQueryClient()
const toastMessages = useToastMessages()
return useMutation({
mutationFn: (data: Partial<ApiKeySettings>) =>
ApiKeySettingsService.updateSettings(data),
onSuccess: () => {
qc.invalidateQueries({ queryKey: ['api-key-settings'] })
toastMessages.success('toast.apiKeys.settings.success')
},
onError: (error: any) => {
toastMessages.errorFromCode(getErrorCode(error?.response?.data), 'toast.apiKeys.settings.error')
},
})
}

View File

@@ -0,0 +1,106 @@
/**
* 扫描日志轮询 Hook
*
* 功能:
* - 初始加载获取全部日志
* - 增量轮询获取新日志3s 间隔)
* - 扫描结束后停止轮询
*/
import { useState, useEffect, useCallback, useRef } from 'react'
import { getScanLogs, type ScanLog } from '@/services/scan.service'
interface UseScanLogsOptions {
scanId: number
enabled?: boolean
pollingInterval?: number // 默认 3000ms
}
interface UseScanLogsReturn {
logs: ScanLog[]
loading: boolean
refetch: () => void
}
export function useScanLogs({
scanId,
enabled = true,
pollingInterval = 3000,
}: UseScanLogsOptions): UseScanLogsReturn {
const [logs, setLogs] = useState<ScanLog[]>([])
const [loading, setLoading] = useState(false)
const lastLogId = useRef<number | null>(null)
const isMounted = useRef(true)
const fetchLogs = useCallback(async (incremental = false) => {
if (!enabled || !isMounted.current) return
setLoading(true)
try {
const params: { limit: number; afterId?: number } = { limit: 200 }
if (incremental && lastLogId.current !== null) {
params.afterId = lastLogId.current
}
const response = await getScanLogs(scanId, params)
const newLogs = response.results
if (!isMounted.current) return
if (newLogs.length > 0) {
// 使用 ID 作为游标ID 是唯一且自增的,避免时间戳重复导致的重复日志
lastLogId.current = newLogs[newLogs.length - 1].id
if (incremental) {
// 按 ID 去重,防止 React Strict Mode 或竞态条件导致的重复
setLogs(prev => {
const existingIds = new Set(prev.map(l => l.id))
const uniqueNewLogs = newLogs.filter(l => !existingIds.has(l.id))
return uniqueNewLogs.length > 0 ? [...prev, ...uniqueNewLogs] : prev
})
} else {
setLogs(newLogs)
}
}
} catch (error) {
console.error('Failed to fetch scan logs:', error)
} finally {
if (isMounted.current) {
setLoading(false)
}
}
}, [scanId, enabled])
// 初始加载
useEffect(() => {
isMounted.current = true
if (enabled) {
// 重置状态
setLogs([])
lastLogId.current = null
fetchLogs(false)
}
return () => {
isMounted.current = false
}
}, [scanId, enabled])
// 轮询
useEffect(() => {
if (!enabled) return
const interval = setInterval(() => {
fetchLogs(true) // 增量查询
}, pollingInterval)
return () => clearInterval(interval)
}, [enabled, pollingInterval, fetchLogs])
const refetch = useCallback(() => {
setLogs([])
lastLogId.current = null
fetchLogs(false)
}, [fetchLogs])
return { logs, loading, refetch }
}

View File

@@ -319,6 +319,7 @@
"workers": "Workers",
"systemLogs": "System Logs",
"notifications": "Notifications",
"apiKeys": "API Keys",
"help": "Get Help",
"feedback": "Feedback"
},
@@ -737,6 +738,8 @@
"status": "Status",
"errorReason": "Error Reason",
"totalProgress": "Total Progress",
"tab_stages": "Stages",
"tab_logs": "Logs",
"status_running": "Scanning",
"status_cancelled": "Cancelled",
"status_completed": "Completed",
@@ -1688,6 +1691,12 @@
"error": "Notification connection error: {message}"
}
},
"apiKeys": {
"settings": {
"success": "API key settings saved",
"error": "Failed to save API key settings"
}
},
"tool": {
"create": {
"success": "Tool created successfully",

View File

@@ -319,6 +319,7 @@
"workers": "扫描节点",
"systemLogs": "系统日志",
"notifications": "通知设置",
"apiKeys": "API 密钥",
"help": "获取帮助",
"feedback": "反馈建议"
},
@@ -737,6 +738,8 @@
"status": "状态",
"errorReason": "错误原因",
"totalProgress": "总进度",
"tab_stages": "阶段",
"tab_logs": "日志",
"status_running": "扫描中",
"status_cancelled": "已取消",
"status_completed": "已完成",
@@ -1688,6 +1691,12 @@
"error": "通知连接错误: {message}"
}
},
"apiKeys": {
"settings": {
"success": "API 密钥配置已保存",
"error": "保存 API 密钥配置失败"
}
},
"tool": {
"create": {
"success": "工具创建成功",

View File

@@ -0,0 +1,187 @@
import type { Directory, DirectoryListResponse } from '@/types/directory.types'
export const mockDirectories: Directory[] = [
{
id: 1,
url: 'https://acme.com/admin',
status: 200,
contentLength: 12345,
words: 1234,
lines: 89,
contentType: 'text/html',
duration: 0.234,
websiteUrl: 'https://acme.com',
createdAt: '2024-12-28T10:00:00Z',
},
{
id: 2,
url: 'https://acme.com/api',
status: 301,
contentLength: 0,
words: 0,
lines: 0,
contentType: 'text/html',
duration: 0.056,
websiteUrl: 'https://acme.com',
createdAt: '2024-12-28T10:01:00Z',
},
{
id: 3,
url: 'https://acme.com/login',
status: 200,
contentLength: 8765,
words: 567,
lines: 45,
contentType: 'text/html',
duration: 0.189,
websiteUrl: 'https://acme.com',
createdAt: '2024-12-28T10:02:00Z',
},
{
id: 4,
url: 'https://acme.com/dashboard',
status: 302,
contentLength: 0,
words: 0,
lines: 0,
contentType: 'text/html',
duration: 0.078,
websiteUrl: 'https://acme.com',
createdAt: '2024-12-28T10:03:00Z',
},
{
id: 5,
url: 'https://acme.com/static/js/app.js',
status: 200,
contentLength: 456789,
words: 12345,
lines: 5678,
contentType: 'application/javascript',
duration: 0.345,
websiteUrl: 'https://acme.com',
createdAt: '2024-12-28T10:04:00Z',
},
{
id: 6,
url: 'https://acme.com/.git/config',
status: 200,
contentLength: 234,
words: 45,
lines: 12,
contentType: 'text/plain',
duration: 0.023,
websiteUrl: 'https://acme.com',
createdAt: '2024-12-28T10:05:00Z',
},
{
id: 7,
url: 'https://acme.com/backup.zip',
status: 200,
contentLength: 12345678,
words: null,
lines: null,
contentType: 'application/zip',
duration: 1.234,
websiteUrl: 'https://acme.com',
createdAt: '2024-12-28T10:06:00Z',
},
{
id: 8,
url: 'https://acme.com/robots.txt',
status: 200,
contentLength: 567,
words: 89,
lines: 23,
contentType: 'text/plain',
duration: 0.034,
websiteUrl: 'https://acme.com',
createdAt: '2024-12-28T10:07:00Z',
},
{
id: 9,
url: 'https://api.acme.com/v1/health',
status: 200,
contentLength: 45,
words: 5,
lines: 1,
contentType: 'application/json',
duration: 0.012,
websiteUrl: 'https://api.acme.com',
createdAt: '2024-12-28T10:08:00Z',
},
{
id: 10,
url: 'https://api.acme.com/swagger-ui.html',
status: 200,
contentLength: 23456,
words: 1234,
lines: 234,
contentType: 'text/html',
duration: 0.267,
websiteUrl: 'https://api.acme.com',
createdAt: '2024-12-28T10:09:00Z',
},
{
id: 11,
url: 'https://techstart.io/wp-admin',
status: 302,
contentLength: 0,
words: 0,
lines: 0,
contentType: 'text/html',
duration: 0.089,
websiteUrl: 'https://techstart.io',
createdAt: '2024-12-26T08:45:00Z',
},
{
id: 12,
url: 'https://techstart.io/wp-login.php',
status: 200,
contentLength: 4567,
words: 234,
lines: 78,
contentType: 'text/html',
duration: 0.156,
websiteUrl: 'https://techstart.io',
createdAt: '2024-12-26T08:46:00Z',
},
]
export function getMockDirectories(params?: {
page?: number
pageSize?: number
filter?: string
targetId?: number
scanId?: number
}): DirectoryListResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const filter = params?.filter?.toLowerCase() || ''
let filtered = mockDirectories
if (filter) {
filtered = filtered.filter(
d =>
d.url.toLowerCase().includes(filter) ||
d.contentType.toLowerCase().includes(filter)
)
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return {
results,
total,
page,
pageSize,
totalPages,
}
}
export function getMockDirectoryById(id: number): Directory | undefined {
return mockDirectories.find(d => d.id === id)
}

View File

@@ -0,0 +1,593 @@
import type {
EholeFingerprint,
GobyFingerprint,
WappalyzerFingerprint,
FingersFingerprint,
FingerPrintHubFingerprint,
ARLFingerprint,
FingerprintStats,
} from '@/types/fingerprint.types'
import type { PaginatedResponse } from '@/types/api-response.types'
// ==================== EHole 指纹数据(真实数据示例)====================
export const mockEholeFingerprints: EholeFingerprint[] = [
{
id: 1,
cms: '致远OA',
method: 'keyword',
location: 'body',
keyword: ['/seeyon/USER-DATA/IMAGES/LOGIN/login.gif'],
isImportant: true,
type: 'oa',
createdAt: '2024-12-20T10:00:00Z',
},
{
id: 2,
cms: '通达OA',
method: 'keyword',
location: 'body',
keyword: ['/static/images/tongda.ico'],
isImportant: true,
type: 'oa',
createdAt: '2024-12-20T10:01:00Z',
},
{
id: 3,
cms: 'Nexus Repository Manager',
method: 'keyword',
location: 'title',
keyword: ['Nexus Repository Manager'],
isImportant: true,
type: 'cloud',
createdAt: '2024-12-20T10:02:00Z',
},
{
id: 4,
cms: '禅道 zentao',
method: 'keyword',
location: 'title',
keyword: ['Welcome to use zentao'],
isImportant: true,
type: 'oa',
createdAt: '2024-12-20T10:03:00Z',
},
{
id: 5,
cms: 'Kibana',
method: 'keyword',
location: 'title',
keyword: ['Kibana'],
isImportant: true,
type: 'cloud',
createdAt: '2024-12-20T10:04:00Z',
},
{
id: 6,
cms: 'Spring env',
method: 'keyword',
location: 'body',
keyword: ['Whitelabel Error Page'],
isImportant: true,
type: 'framework',
createdAt: '2024-12-20T10:05:00Z',
},
{
id: 7,
cms: '泛微OA',
method: 'keyword',
location: 'header',
keyword: ['ecology_JSessionid'],
isImportant: true,
type: 'oa',
createdAt: '2024-12-20T10:06:00Z',
},
{
id: 8,
cms: '用友NC',
method: 'keyword',
location: 'body',
keyword: ['UFIDA', '/nc/servlet/nc.ui.iufo.login.Index'],
isImportant: true,
type: 'oa',
createdAt: '2024-12-20T10:07:00Z',
},
]
// ==================== Goby 指纹数据(真实数据示例)====================
export const mockGobyFingerprints: GobyFingerprint[] = [
{
id: 1,
name: 'WebSphere-App-Server',
logic: '((a||b) &&c&&d) || (e&&f&&g)',
rule: [
{ label: 'a', feature: 'Server: WebSphere Application Server', is_equal: true },
{ label: 'b', feature: 'IBM WebSphere Application Server', is_equal: true },
{ label: 'c', feature: 'couchdb', is_equal: false },
{ label: 'd', feature: 'drupal', is_equal: false },
{ label: 'e', feature: 'Server: WebSphere Application Server', is_equal: true },
{ label: 'f', feature: 'couchdb', is_equal: false },
{ label: 'g', feature: 'drupal', is_equal: false },
],
createdAt: '2024-12-20T10:00:00Z',
},
{
id: 2,
name: 'Wing-FTP-Server',
logic: 'a||b||c||d',
rule: [
{ label: 'a', feature: 'Server: Wing FTP Server', is_equal: true },
{ label: 'b', feature: 'Server: Wing FTP Server', is_equal: true },
{ label: 'c', feature: '/help_javascript.htm', is_equal: true },
{ label: 'd', feature: 'Wing FTP Server', is_equal: true },
],
createdAt: '2024-12-20T10:01:00Z',
},
{
id: 3,
name: 'Fortinet-sslvpn',
logic: 'a&&b',
rule: [
{ label: 'a', feature: 'fgt_lang', is_equal: true },
{ label: 'b', feature: '/sslvpn/portal.html', is_equal: true },
],
createdAt: '2024-12-20T10:02:00Z',
},
{
id: 4,
name: 'D-link-DSL-2640B',
logic: 'a||b',
rule: [
{ label: 'a', feature: 'Product : DSL-2640B', is_equal: true },
{ label: 'b', feature: 'D-Link DSL-2640B', is_equal: true },
],
createdAt: '2024-12-20T10:03:00Z',
},
{
id: 5,
name: 'Kedacom-NVR',
logic: 'a|| (b&&c) ||d',
rule: [
{ label: 'a', feature: 'NVR Station Web', is_equal: true },
{ label: 'b', feature: 'location="index_cn.htm";', is_equal: true },
{ label: 'c', feature: 'if(syslan == "zh-cn"', is_equal: true },
{ label: 'd', feature: 'WMS browse NVR', is_equal: true },
],
createdAt: '2024-12-20T10:04:00Z',
},
]
// ==================== Wappalyzer 指纹数据(真实数据示例)====================
export const mockWappalyzerFingerprints: WappalyzerFingerprint[] = [
{
id: 1,
name: '1C-Bitrix',
cats: [1, 6],
cookies: { bitrix_sm_guest_id: '', bitrix_sm_last_ip: '', bitrix_sm_sale_uid: '' },
headers: { 'set-cookie': 'bitrix_', 'x-powered-cms': 'bitrix site manager' },
scriptSrc: ['bitrix(?:\\.info/|/js/main/core)'],
js: [],
implies: ['PHP'],
meta: {},
html: [],
description: '1C-Bitrix is a system of web project management.',
website: 'https://www.1c-bitrix.ru',
cpe: '',
createdAt: '2024-12-20T10:00:00Z',
},
{
id: 2,
name: 'React',
cats: [12],
cookies: {},
headers: {},
scriptSrc: ['react(?:-dom)?(?:\\.min)?\\.js'],
js: ['React.version'],
implies: [],
meta: {},
html: ['data-reactroot'],
description: 'React is a JavaScript library for building user interfaces.',
website: 'https://reactjs.org',
cpe: 'cpe:/a:facebook:react',
createdAt: '2024-12-20T10:01:00Z',
},
{
id: 3,
name: 'Vue.js',
cats: [12],
cookies: {},
headers: {},
scriptSrc: ['vue(?:\\.min)?\\.js'],
js: ['Vue.version'],
implies: [],
meta: {},
html: ['data-v-'],
description: 'Vue.js is a progressive JavaScript framework.',
website: 'https://vuejs.org',
cpe: 'cpe:/a:vuejs:vue',
createdAt: '2024-12-20T10:02:00Z',
},
{
id: 4,
name: 'nginx',
cats: [22],
cookies: {},
headers: { server: 'nginx(?:/([\\d.]+))?\\;version:\\1' },
scriptSrc: [],
js: [],
implies: [],
meta: {},
html: [],
description: 'nginx is a web server.',
website: 'http://nginx.org/en',
cpe: 'cpe:/a:nginx:nginx',
createdAt: '2024-12-20T10:03:00Z',
},
{
id: 5,
name: 'WordPress',
cats: [1, 11],
cookies: {},
headers: { 'x-pingback': '/xmlrpc\\.php$' },
scriptSrc: ['/wp-(?:content|includes)/'],
js: [],
implies: ['PHP', 'MySQL'],
meta: { generator: ['WordPress(?: ([\\d.]+))?\\;version:\\1'] },
html: ['<link rel=["\']stylesheet["\'] [^>]+/wp-(?:content|includes)/'],
description: 'WordPress is a free and open-source CMS.',
website: 'https://wordpress.org',
cpe: 'cpe:/a:wordpress:wordpress',
createdAt: '2024-12-20T10:04:00Z',
},
]
// ==================== Fingers 指纹数据(真实数据示例)====================
export const mockFingersFingerprints: FingersFingerprint[] = [
{
id: 1,
name: 'jenkins',
link: '',
rule: [
{
favicon_hash: ['81586312'],
body: 'Jenkins',
header: 'X-Jenkins',
},
],
tag: ['cloud'],
focus: true,
defaultPort: [8080],
createdAt: '2024-12-20T10:00:00Z',
},
{
id: 2,
name: 'gitlab',
link: '',
rule: [
{
favicon_hash: ['516963061', '1278323681'],
body: 'GitLab',
header: '_gitlab_session',
},
],
tag: ['cloud'],
focus: true,
defaultPort: [80, 443],
createdAt: '2024-12-20T10:01:00Z',
},
{
id: 3,
name: 'nacos',
link: '',
rule: [
{
body: '<title>Nacos</title>',
send_data: '/nacos/',
},
],
tag: ['cloud'],
focus: true,
defaultPort: [8848],
createdAt: '2024-12-20T10:02:00Z',
},
{
id: 4,
name: 'elasticsearch',
link: '',
rule: [
{
body: '"cluster_name" : "elasticsearch"',
vuln: 'elasticsearch_unauth',
},
],
tag: ['cloud'],
focus: true,
defaultPort: [9200],
createdAt: '2024-12-20T10:03:00Z',
},
{
id: 5,
name: 'zabbix',
link: '',
rule: [
{
favicon_hash: ['892542951'],
body: 'images/general/zabbix.ico',
header: 'zbx_sessionid',
send_data: '/zabbix',
},
],
tag: ['cloud'],
focus: true,
defaultPort: [80, 443],
createdAt: '2024-12-20T10:04:00Z',
},
]
// ==================== FingerPrintHub 指纹数据(真实数据示例)====================
export const mockFingerPrintHubFingerprints: FingerPrintHubFingerprint[] = [
{
id: 1,
fpId: 'apache-tomcat',
name: 'Apache Tomcat',
author: 'pdteam',
tags: 'tech,apache,tomcat',
severity: 'info',
metadata: {
product: 'tomcat',
vendor: 'apache',
verified: true,
shodan_query: 'http.favicon.hash:"-297069493"',
fofa_query: 'app="Apache-Tomcat"',
},
http: [
{
method: 'GET',
path: '/',
matchers: [
{ type: 'word', part: 'body', words: ['Apache Tomcat'] },
{ type: 'status', status: [200] },
],
},
],
sourceFile: 'http/technologies/apache/apache-tomcat.yaml',
createdAt: '2024-12-20T10:00:00Z',
},
{
id: 2,
fpId: 'nginx-detect',
name: 'Nginx Server',
author: 'pdteam',
tags: 'tech,nginx',
severity: 'info',
metadata: {
product: 'nginx',
vendor: 'nginx',
verified: true,
},
http: [
{
method: 'GET',
path: '/',
matchers: [
{ type: 'regex', part: 'header', regex: ['[Nn]ginx'] },
],
extractors: [
{ type: 'regex', part: 'header', regex: ['nginx/([\\d.]+)'], group: 1 },
],
},
],
sourceFile: 'http/technologies/nginx/nginx-version.yaml',
createdAt: '2024-12-20T10:01:00Z',
},
{
id: 3,
fpId: 'spring-boot-detect',
name: 'Spring Boot',
author: 'pdteam',
tags: 'tech,spring,java',
severity: 'info',
metadata: {
product: 'spring-boot',
vendor: 'vmware',
verified: true,
},
http: [
{
method: 'GET',
path: '/',
matchers: [
{ type: 'word', part: 'body', words: ['Whitelabel Error Page'] },
],
},
],
sourceFile: 'http/technologies/spring/spring-boot.yaml',
createdAt: '2024-12-20T10:02:00Z',
},
]
// ==================== ARL 指纹数据(真实数据示例)====================
export const mockARLFingerprints: ARLFingerprint[] = [
{
id: 1,
name: 'Shiro',
rule: 'header="rememberMe="',
createdAt: '2024-12-20T10:00:00Z',
},
{
id: 2,
name: 'ThinkPHP',
rule: 'body="ThinkPHP" || header="ThinkPHP"',
createdAt: '2024-12-20T10:01:00Z',
},
{
id: 3,
name: 'Fastjson',
rule: 'body="fastjson" || body="com.alibaba.fastjson"',
createdAt: '2024-12-20T10:02:00Z',
},
{
id: 4,
name: 'Weblogic',
rule: 'body="WebLogic" || header="WebLogic" || body="bea_wls_internal"',
createdAt: '2024-12-20T10:03:00Z',
},
{
id: 5,
name: 'JBoss',
rule: 'body="JBoss" || header="JBoss" || body="jboss.css"',
createdAt: '2024-12-20T10:04:00Z',
},
{
id: 6,
name: 'Struts2',
rule: 'body=".action" || body="struts"',
createdAt: '2024-12-20T10:05:00Z',
},
]
// ==================== 统计数据 ====================
export const mockFingerprintStats: FingerprintStats = {
ehole: 1892,
goby: 4567,
wappalyzer: 3456,
fingers: 2345,
fingerprinthub: 8901,
arl: 1234,
}
// ==================== 查询函数 ====================
export function getMockEholeFingerprints(params?: {
page?: number
pageSize?: number
filter?: string
}): PaginatedResponse<EholeFingerprint> {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const filter = params?.filter?.toLowerCase() || ''
let filtered = mockEholeFingerprints
if (filter) {
filtered = filtered.filter(f => f.cms.toLowerCase().includes(filter))
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return { results, total, page, pageSize, totalPages }
}
export function getMockGobyFingerprints(params?: {
page?: number
pageSize?: number
filter?: string
}): PaginatedResponse<GobyFingerprint> {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const filter = params?.filter?.toLowerCase() || ''
let filtered = mockGobyFingerprints
if (filter) {
filtered = filtered.filter(f => f.name.toLowerCase().includes(filter))
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return { results, total, page, pageSize, totalPages }
}
export function getMockWappalyzerFingerprints(params?: {
page?: number
pageSize?: number
filter?: string
}): PaginatedResponse<WappalyzerFingerprint> {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const filter = params?.filter?.toLowerCase() || ''
let filtered = mockWappalyzerFingerprints
if (filter) {
filtered = filtered.filter(f => f.name.toLowerCase().includes(filter))
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return { results, total, page, pageSize, totalPages }
}
export function getMockFingersFingerprints(params?: {
page?: number
pageSize?: number
filter?: string
}): PaginatedResponse<FingersFingerprint> {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const filter = params?.filter?.toLowerCase() || ''
let filtered = mockFingersFingerprints
if (filter) {
filtered = filtered.filter(f => f.name.toLowerCase().includes(filter))
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return { results, total, page, pageSize, totalPages }
}
export function getMockFingerPrintHubFingerprints(params?: {
page?: number
pageSize?: number
filter?: string
}): PaginatedResponse<FingerPrintHubFingerprint> {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const filter = params?.filter?.toLowerCase() || ''
let filtered = mockFingerPrintHubFingerprints
if (filter) {
filtered = filtered.filter(f => f.name.toLowerCase().includes(filter))
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return { results, total, page, pageSize, totalPages }
}
export function getMockARLFingerprints(params?: {
page?: number
pageSize?: number
filter?: string
}): PaginatedResponse<ARLFingerprint> {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const filter = params?.filter?.toLowerCase() || ''
let filtered = mockARLFingerprints
if (filter) {
filtered = filtered.filter(f => f.name.toLowerCase().includes(filter))
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return { results, total, page, pageSize, totalPages }
}
export function getMockFingerprintStats(): FingerprintStats {
return mockFingerprintStats
}

View File

@@ -0,0 +1,118 @@
import type { IPAddress, GetIPAddressesResponse } from '@/types/ip-address.types'
// 使用函数生成IP地址
const ip = (a: number, b: number, c: number, d: number) => `${a}.${b}.${c}.${d}`
export const mockIPAddresses: IPAddress[] = [
{
ip: ip(192, 0, 2, 1),
hosts: ['router.local', 'gateway.lan'],
ports: [80, 443, 22, 53],
createdAt: '2024-12-28T10:00:00Z',
},
{
ip: ip(192, 0, 2, 10),
hosts: ['api.acme.com', 'backend.acme.com'],
ports: [80, 443, 8080, 3306],
createdAt: '2024-12-28T10:01:00Z',
},
{
ip: ip(192, 0, 2, 11),
hosts: ['web.acme.com', 'www.acme.com'],
ports: [80, 443],
createdAt: '2024-12-28T10:02:00Z',
},
{
ip: ip(198, 51, 100, 50),
hosts: ['db.internal.acme.com'],
ports: [3306, 5432, 27017],
createdAt: '2024-12-28T10:03:00Z',
},
{
ip: ip(203, 0, 113, 50),
hosts: ['cdn.acme.com'],
ports: [80, 443],
createdAt: '2024-12-28T10:04:00Z',
},
{
ip: ip(198, 51, 100, 10),
hosts: ['mail.acme.com', 'smtp.acme.com'],
ports: [25, 465, 587, 993, 995],
createdAt: '2024-12-28T10:05:00Z',
},
{
ip: ip(192, 0, 2, 100),
hosts: ['jenkins.acme.com'],
ports: [8080, 50000],
createdAt: '2024-12-28T10:06:00Z',
},
{
ip: ip(192, 0, 2, 101),
hosts: ['gitlab.acme.com'],
ports: [80, 443, 22],
createdAt: '2024-12-28T10:07:00Z',
},
{
ip: ip(192, 0, 2, 102),
hosts: ['k8s.acme.com', 'kubernetes.acme.com'],
ports: [6443, 10250, 10251, 10252],
createdAt: '2024-12-28T10:08:00Z',
},
{
ip: ip(192, 0, 2, 103),
hosts: ['elastic.acme.com'],
ports: [9200, 9300, 5601],
createdAt: '2024-12-28T10:09:00Z',
},
{
ip: ip(192, 0, 2, 104),
hosts: ['redis.acme.com'],
ports: [6379],
createdAt: '2024-12-28T10:10:00Z',
},
{
ip: ip(192, 0, 2, 105),
hosts: ['mq.acme.com', 'rabbitmq.acme.com'],
ports: [5672, 15672],
createdAt: '2024-12-28T10:11:00Z',
},
]
export function getMockIPAddresses(params?: {
page?: number
pageSize?: number
filter?: string
targetId?: number
scanId?: number
}): GetIPAddressesResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const filter = params?.filter?.toLowerCase() || ''
let filtered = mockIPAddresses
if (filter) {
filtered = filtered.filter(
ipAddr =>
ipAddr.ip.toLowerCase().includes(filter) ||
ipAddr.hosts.some(h => h.toLowerCase().includes(filter))
)
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return {
results,
total,
page,
pageSize,
totalPages,
}
}
export function getMockIPAddressByIP(ipStr: string): IPAddress | undefined {
return mockIPAddresses.find(addr => addr.ip === ipStr)
}

View File

@@ -0,0 +1,35 @@
import type {
NotificationSettings,
GetNotificationSettingsResponse,
UpdateNotificationSettingsResponse,
} from '@/types/notification-settings.types'
export const mockNotificationSettings: NotificationSettings = {
discord: {
enabled: true,
webhookUrl: 'https://discord.com/api/webhooks/1234567890/abcdefghijklmnop',
},
categories: {
scan: true,
vulnerability: true,
asset: true,
system: false,
},
}
export function getMockNotificationSettings(): GetNotificationSettingsResponse {
return mockNotificationSettings
}
export function updateMockNotificationSettings(
settings: NotificationSettings
): UpdateNotificationSettingsResponse {
// 模拟更新设置
Object.assign(mockNotificationSettings, settings)
return {
message: 'Notification settings updated successfully',
discord: mockNotificationSettings.discord,
categories: mockNotificationSettings.categories,
}
}

View File

@@ -0,0 +1,240 @@
import type {
NucleiTemplateTreeNode,
NucleiTemplateTreeResponse,
NucleiTemplateContent,
} from '@/types/nuclei.types'
export const mockNucleiTemplateTree: NucleiTemplateTreeNode[] = [
{
type: 'folder',
name: 'cves',
path: 'cves',
children: [
{
type: 'folder',
name: '2024',
path: 'cves/2024',
children: [
{
type: 'file',
name: 'CVE-2024-1234.yaml',
path: 'cves/2024/CVE-2024-1234.yaml',
templateId: 'CVE-2024-1234',
severity: 'critical',
tags: ['cve', 'rce'],
},
{
type: 'file',
name: 'CVE-2024-5678.yaml',
path: 'cves/2024/CVE-2024-5678.yaml',
templateId: 'CVE-2024-5678',
severity: 'high',
tags: ['cve', 'sqli'],
},
],
},
{
type: 'folder',
name: '2023',
path: 'cves/2023',
children: [
{
type: 'file',
name: 'CVE-2023-9876.yaml',
path: 'cves/2023/CVE-2023-9876.yaml',
templateId: 'CVE-2023-9876',
severity: 'high',
tags: ['cve', 'auth-bypass'],
},
],
},
],
},
{
type: 'folder',
name: 'vulnerabilities',
path: 'vulnerabilities',
children: [
{
type: 'folder',
name: 'generic',
path: 'vulnerabilities/generic',
children: [
{
type: 'file',
name: 'sqli-error-based.yaml',
path: 'vulnerabilities/generic/sqli-error-based.yaml',
templateId: 'sqli-error-based',
severity: 'high',
tags: ['sqli', 'generic'],
},
{
type: 'file',
name: 'xss-reflected.yaml',
path: 'vulnerabilities/generic/xss-reflected.yaml',
templateId: 'xss-reflected',
severity: 'medium',
tags: ['xss', 'generic'],
},
],
},
],
},
{
type: 'folder',
name: 'technologies',
path: 'technologies',
children: [
{
type: 'file',
name: 'nginx-version.yaml',
path: 'technologies/nginx-version.yaml',
templateId: 'nginx-version',
severity: 'info',
tags: ['tech', 'nginx'],
},
{
type: 'file',
name: 'apache-detect.yaml',
path: 'technologies/apache-detect.yaml',
templateId: 'apache-detect',
severity: 'info',
tags: ['tech', 'apache'],
},
],
},
{
type: 'folder',
name: 'exposures',
path: 'exposures',
children: [
{
type: 'folder',
name: 'configs',
path: 'exposures/configs',
children: [
{
type: 'file',
name: 'git-config.yaml',
path: 'exposures/configs/git-config.yaml',
templateId: 'git-config',
severity: 'medium',
tags: ['exposure', 'git'],
},
{
type: 'file',
name: 'env-file.yaml',
path: 'exposures/configs/env-file.yaml',
templateId: 'env-file',
severity: 'high',
tags: ['exposure', 'env'],
},
],
},
],
},
]
export const mockNucleiTemplateContent: Record<string, NucleiTemplateContent> = {
'cves/2024/CVE-2024-1234.yaml': {
path: 'cves/2024/CVE-2024-1234.yaml',
name: 'CVE-2024-1234.yaml',
templateId: 'CVE-2024-1234',
severity: 'critical',
tags: ['cve', 'rce'],
content: `id: CVE-2024-1234
info:
name: Example RCE Vulnerability
author: pdteam
severity: critical
description: |
Example remote code execution vulnerability.
reference:
- https://example.com/cve-2024-1234
tags: cve,cve2024,rce
http:
- method: POST
path:
- "{{BaseURL}}/api/execute"
headers:
Content-Type: application/json
body: '{"cmd": "id"}'
matchers:
- type: word
words:
- "uid="
- "gid="
condition: and
`,
},
'vulnerabilities/generic/sqli-error-based.yaml': {
path: 'vulnerabilities/generic/sqli-error-based.yaml',
name: 'sqli-error-based.yaml',
templateId: 'sqli-error-based',
severity: 'high',
tags: ['sqli', 'generic'],
content: `id: sqli-error-based
info:
name: Error Based SQL Injection
author: pdteam
severity: high
tags: sqli,generic
http:
- method: GET
path:
- "{{BaseURL}}/?id=1'"
matchers:
- type: word
words:
- "SQL syntax"
- "mysql_fetch"
- "You have an error"
condition: or
`,
},
'technologies/nginx-version.yaml': {
path: 'technologies/nginx-version.yaml',
name: 'nginx-version.yaml',
templateId: 'nginx-version',
severity: 'info',
tags: ['tech', 'nginx'],
content: `id: nginx-version
info:
name: Nginx Version Detection
author: pdteam
severity: info
tags: tech,nginx
http:
- method: GET
path:
- "{{BaseURL}}/"
matchers:
- type: regex
part: header
regex:
- "nginx/([\\d.]+)"
extractors:
- type: regex
part: header
group: 1
regex:
- "nginx/([\\d.]+)"
`,
},
}
export function getMockNucleiTemplateTree(): NucleiTemplateTreeResponse {
return {
roots: mockNucleiTemplateTree,
}
}
export function getMockNucleiTemplateContent(path: string): NucleiTemplateContent | undefined {
return mockNucleiTemplateContent[path]
}

View File

@@ -0,0 +1,154 @@
import type {
SearchResponse,
WebsiteSearchResult,
EndpointSearchResult,
AssetType,
} from '@/types/search.types'
import { mockWebsites } from './websites'
import { mockEndpoints } from './endpoints'
// 将 Website 转换为搜索结果格式
function websiteToSearchResult(website: typeof mockWebsites[0]): WebsiteSearchResult {
return {
id: website.id,
url: website.url,
host: website.host,
title: website.title,
technologies: website.tech || [],
statusCode: website.statusCode,
contentLength: website.contentLength,
contentType: website.contentType,
webserver: website.webserver,
location: website.location,
vhost: website.vhost,
responseHeaders: {},
responseBody: website.responseBody || '',
createdAt: website.createdAt,
targetId: website.target ?? 1,
vulnerabilities: [],
}
}
// 将 Endpoint 转换为搜索结果格式
function endpointToSearchResult(endpoint: typeof mockEndpoints[0]): EndpointSearchResult {
return {
id: endpoint.id,
url: endpoint.url,
host: endpoint.host || '',
title: endpoint.title,
technologies: endpoint.tech || [],
statusCode: endpoint.statusCode,
contentLength: endpoint.contentLength,
contentType: endpoint.contentType || '',
webserver: endpoint.webserver || '',
location: endpoint.location || '',
vhost: null,
responseHeaders: {},
responseBody: '',
createdAt: endpoint.createdAt ?? null,
targetId: 1,
matchedGfPatterns: endpoint.gfPatterns || [],
}
}
// 解析搜索表达式
function parseSearchQuery(query: string): { field: string; operator: string; value: string }[] {
const conditions: { field: string; operator: string; value: string }[] = []
// 简单解析field="value" 或 field=="value" 或 field!="value"
const regex = /(\w+)(==|!=|=)"([^"]+)"/g
let match
while ((match = regex.exec(query)) !== null) {
conditions.push({
field: match[1],
operator: match[2],
value: match[3],
})
}
return conditions
}
// 检查记录是否匹配条件
function matchesConditions(
record: WebsiteSearchResult | EndpointSearchResult,
conditions: { field: string; operator: string; value: string }[]
): boolean {
if (conditions.length === 0) return true
return conditions.every(cond => {
let fieldValue: string | number | null = null
switch (cond.field) {
case 'host':
fieldValue = record.host
break
case 'url':
fieldValue = record.url
break
case 'title':
fieldValue = record.title
break
case 'tech':
fieldValue = record.technologies.join(',')
break
case 'status':
fieldValue = String(record.statusCode)
break
default:
return true
}
if (fieldValue === null) return false
const strValue = String(fieldValue).toLowerCase()
const searchValue = cond.value.toLowerCase()
switch (cond.operator) {
case '=':
return strValue.includes(searchValue)
case '==':
return strValue === searchValue
case '!=':
return !strValue.includes(searchValue)
default:
return true
}
})
}
export function getMockSearchResults(params: {
q?: string
asset_type?: AssetType
page?: number
pageSize?: number
}): SearchResponse {
const { q = '', asset_type = 'website', page = 1, pageSize = 10 } = params
const conditions = parseSearchQuery(q)
let results: (WebsiteSearchResult | EndpointSearchResult)[]
if (asset_type === 'website') {
results = mockWebsites
.map(websiteToSearchResult)
.filter(r => matchesConditions(r, conditions))
} else {
results = mockEndpoints
.map(endpointToSearchResult)
.filter(r => matchesConditions(r, conditions))
}
const total = results.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const paginatedResults = results.slice(start, start + pageSize)
return {
results: paginatedResults,
total,
page,
pageSize,
totalPages,
assetType: asset_type,
}
}

View File

@@ -0,0 +1,100 @@
import type { SystemLogResponse, LogFilesResponse, LogFile } from '@/types/system-log.types'
export const mockLogFiles: LogFile[] = [
{
filename: 'xingrin.log',
category: 'system',
size: 1234567,
modifiedAt: '2024-12-28T10:00:00Z',
},
{
filename: 'xingrin-error.log',
category: 'error',
size: 45678,
modifiedAt: '2024-12-28T09:30:00Z',
},
{
filename: 'worker.log',
category: 'system',
size: 234567,
modifiedAt: '2024-12-28T10:00:00Z',
},
{
filename: 'celery.log',
category: 'system',
size: 567890,
modifiedAt: '2024-12-28T09:45:00Z',
},
{
filename: 'nginx-access.log',
category: 'system',
size: 12345678,
modifiedAt: '2024-12-28T10:00:00Z',
},
{
filename: 'nginx-error.log',
category: 'error',
size: 23456,
modifiedAt: '2024-12-28T08:00:00Z',
},
]
export const mockSystemLogContent = `[2024-12-28 10:00:00] INFO: Server started on port 8000
[2024-12-28 10:00:01] INFO: Database connection established
[2024-12-28 10:00:02] INFO: Redis connection established
[2024-12-28 10:00:03] INFO: Worker node registered: local-worker-1
[2024-12-28 10:00:05] INFO: Celery worker started with 4 concurrent tasks
[2024-12-28 10:01:00] INFO: New scan task created: scan-001
[2024-12-28 10:01:01] INFO: Task scan-001 assigned to worker local-worker-1
[2024-12-28 10:01:05] INFO: Subdomain enumeration started for target: acme.com
[2024-12-28 10:02:30] INFO: Found 45 subdomains for acme.com
[2024-12-28 10:02:31] INFO: Port scanning started for 45 hosts
[2024-12-28 10:05:00] INFO: Port scanning completed, found 123 open ports
[2024-12-28 10:05:01] INFO: HTTP probing started for 123 endpoints
[2024-12-28 10:08:00] INFO: HTTP probing completed, found 89 live websites
[2024-12-28 10:08:01] INFO: Fingerprint detection started
[2024-12-28 10:10:00] INFO: Fingerprint detection completed
[2024-12-28 10:10:01] INFO: Vulnerability scanning started with nuclei
[2024-12-28 10:15:00] INFO: Vulnerability scanning completed, found 5 vulnerabilities
[2024-12-28 10:15:01] INFO: Scan task scan-001 completed successfully
[2024-12-28 10:15:02] INFO: Results saved to database
[2024-12-28 10:15:03] INFO: Notification sent to Discord webhook`
export const mockErrorLogContent = `[2024-12-28 08:30:00] ERROR: Connection refused: Redis server not responding
[2024-12-28 08:30:01] ERROR: Retrying Redis connection in 5 seconds...
[2024-12-28 08:30:06] INFO: Redis connection recovered
[2024-12-28 09:15:00] WARNING: High memory usage detected (85%)
[2024-12-28 09:15:01] INFO: Running garbage collection
[2024-12-28 09:15:05] INFO: Memory usage reduced to 62%
[2024-12-28 09:30:00] ERROR: Worker node disconnected: remote-worker-2
[2024-12-28 09:30:01] WARNING: Reassigning 3 tasks from remote-worker-2
[2024-12-28 09:30:05] INFO: Tasks reassigned successfully`
export function getMockLogFiles(): LogFilesResponse {
return {
files: mockLogFiles,
}
}
export function getMockSystemLogs(params?: {
file?: string
lines?: number
}): SystemLogResponse {
const filename = params?.file || 'xingrin.log'
const lines = params?.lines || 100
let content: string
if (filename.includes('error')) {
content = mockErrorLogContent
} else {
content = mockSystemLogContent
}
// 模拟行数限制
const contentLines = content.split('\n')
const limitedContent = contentLines.slice(-lines).join('\n')
return {
content: limitedContent,
}
}

149
frontend/mock/data/tools.ts Normal file
View File

@@ -0,0 +1,149 @@
import type { Tool, GetToolsResponse } from '@/types/tool.types'
export const mockTools: Tool[] = [
{
id: 1,
name: 'subfinder',
type: 'opensource',
repoUrl: 'https://github.com/projectdiscovery/subfinder',
version: 'v2.6.3',
description: 'Fast passive subdomain enumeration tool.',
categoryNames: ['subdomain', 'recon'],
directory: '/opt/tools/subfinder',
installCommand: 'go install -v github.com/projectdiscovery/subfinder/v2/cmd/subfinder@latest',
updateCommand: 'go install -v github.com/projectdiscovery/subfinder/v2/cmd/subfinder@latest',
versionCommand: 'subfinder -version',
createdAt: '2024-12-20T10:00:00Z',
updatedAt: '2024-12-28T10:00:00Z',
},
{
id: 2,
name: 'httpx',
type: 'opensource',
repoUrl: 'https://github.com/projectdiscovery/httpx',
version: 'v1.6.0',
description: 'Fast and multi-purpose HTTP toolkit.',
categoryNames: ['http', 'recon'],
directory: '/opt/tools/httpx',
installCommand: 'go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest',
updateCommand: 'go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest',
versionCommand: 'httpx -version',
createdAt: '2024-12-20T10:01:00Z',
updatedAt: '2024-12-28T10:01:00Z',
},
{
id: 3,
name: 'nuclei',
type: 'opensource',
repoUrl: 'https://github.com/projectdiscovery/nuclei',
version: 'v3.1.0',
description: 'Fast and customizable vulnerability scanner.',
categoryNames: ['vulnerability'],
directory: '/opt/tools/nuclei',
installCommand: 'go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest',
updateCommand: 'go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest',
versionCommand: 'nuclei -version',
createdAt: '2024-12-20T10:02:00Z',
updatedAt: '2024-12-28T10:02:00Z',
},
{
id: 4,
name: 'naabu',
type: 'opensource',
repoUrl: 'https://github.com/projectdiscovery/naabu',
version: 'v2.2.1',
description: 'Fast port scanner written in go.',
categoryNames: ['port', 'network'],
directory: '/opt/tools/naabu',
installCommand: 'go install -v github.com/projectdiscovery/naabu/v2/cmd/naabu@latest',
updateCommand: 'go install -v github.com/projectdiscovery/naabu/v2/cmd/naabu@latest',
versionCommand: 'naabu -version',
createdAt: '2024-12-20T10:03:00Z',
updatedAt: '2024-12-28T10:03:00Z',
},
{
id: 5,
name: 'katana',
type: 'opensource',
repoUrl: 'https://github.com/projectdiscovery/katana',
version: 'v1.0.4',
description: 'Next-generation crawling and spidering framework.',
categoryNames: ['crawler', 'recon'],
directory: '/opt/tools/katana',
installCommand: 'go install github.com/projectdiscovery/katana/cmd/katana@latest',
updateCommand: 'go install github.com/projectdiscovery/katana/cmd/katana@latest',
versionCommand: 'katana -version',
createdAt: '2024-12-20T10:04:00Z',
updatedAt: '2024-12-28T10:04:00Z',
},
{
id: 6,
name: 'ffuf',
type: 'opensource',
repoUrl: 'https://github.com/ffuf/ffuf',
version: 'v2.1.0',
description: 'Fast web fuzzer written in Go.',
categoryNames: ['directory', 'fuzzer'],
directory: '/opt/tools/ffuf',
installCommand: 'go install github.com/ffuf/ffuf/v2@latest',
updateCommand: 'go install github.com/ffuf/ffuf/v2@latest',
versionCommand: 'ffuf -V',
createdAt: '2024-12-20T10:05:00Z',
updatedAt: '2024-12-28T10:05:00Z',
},
{
id: 7,
name: 'amass',
type: 'opensource',
repoUrl: 'https://github.com/owasp-amass/amass',
version: 'v4.2.0',
description: 'In-depth attack surface mapping and asset discovery.',
categoryNames: ['subdomain', 'recon'],
directory: '/opt/tools/amass',
installCommand: 'go install -v github.com/owasp-amass/amass/v4/...@master',
updateCommand: 'go install -v github.com/owasp-amass/amass/v4/...@master',
versionCommand: 'amass -version',
createdAt: '2024-12-20T10:06:00Z',
updatedAt: '2024-12-28T10:06:00Z',
},
{
id: 8,
name: 'xingfinger',
type: 'custom',
repoUrl: '',
version: '1.0.0',
description: '自定义指纹识别工具',
categoryNames: ['recon'],
directory: '/opt/tools/xingfinger',
installCommand: '',
updateCommand: '',
versionCommand: '',
createdAt: '2024-12-20T10:07:00Z',
updatedAt: '2024-12-28T10:07:00Z',
},
]
export function getMockTools(params?: {
page?: number
pageSize?: number
}): GetToolsResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const total = mockTools.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const tools = mockTools.slice(start, start + pageSize)
return {
tools,
total,
page,
pageSize,
totalPages,
}
}
export function getMockToolById(id: number): Tool | undefined {
return mockTools.find(t => t.id === id)
}

View File

@@ -0,0 +1,119 @@
import type { Wordlist, GetWordlistsResponse } from '@/types/wordlist.types'
export const mockWordlists: Wordlist[] = [
{
id: 1,
name: 'common-dirs.txt',
description: '常用目录字典',
fileSize: 45678,
lineCount: 4567,
fileHash: 'a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6',
createdAt: '2024-12-20T10:00:00Z',
updatedAt: '2024-12-28T10:00:00Z',
},
{
id: 2,
name: 'subdomains-top1million.txt',
description: 'Top 100万子域名字典',
fileSize: 12345678,
lineCount: 1000000,
fileHash: 'b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6q7',
createdAt: '2024-12-20T10:01:00Z',
updatedAt: '2024-12-28T10:01:00Z',
},
{
id: 3,
name: 'api-endpoints.txt',
description: 'API 端点字典',
fileSize: 23456,
lineCount: 2345,
fileHash: 'c3d4e5f6g7h8i9j0k1l2m3n4o5p6q7r8',
createdAt: '2024-12-20T10:02:00Z',
updatedAt: '2024-12-28T10:02:00Z',
},
{
id: 4,
name: 'params.txt',
description: '常用参数名字典',
fileSize: 8901,
lineCount: 890,
fileHash: 'd4e5f6g7h8i9j0k1l2m3n4o5p6q7r8s9',
createdAt: '2024-12-20T10:03:00Z',
updatedAt: '2024-12-28T10:03:00Z',
},
{
id: 5,
name: 'sensitive-files.txt',
description: '敏感文件字典',
fileSize: 5678,
lineCount: 567,
fileHash: 'e5f6g7h8i9j0k1l2m3n4o5p6q7r8s9t0',
createdAt: '2024-12-20T10:04:00Z',
updatedAt: '2024-12-28T10:04:00Z',
},
{
id: 6,
name: 'raft-large-directories.txt',
description: 'RAFT 大型目录字典',
fileSize: 987654,
lineCount: 98765,
fileHash: 'f6g7h8i9j0k1l2m3n4o5p6q7r8s9t0u1',
createdAt: '2024-12-20T10:05:00Z',
updatedAt: '2024-12-28T10:05:00Z',
},
]
export const mockWordlistContent = `admin
api
backup
config
dashboard
debug
dev
docs
download
files
images
js
login
logs
manager
private
public
static
test
upload
users
v1
v2
wp-admin
wp-content`
export function getMockWordlists(params?: {
page?: number
pageSize?: number
}): GetWordlistsResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const total = mockWordlists.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = mockWordlists.slice(start, start + pageSize)
return {
results,
total,
page,
pageSize,
totalPages,
}
}
export function getMockWordlistById(id: number): Wordlist | undefined {
return mockWordlists.find(w => w.id === id)
}
export function getMockWordlistContent(): string {
return mockWordlistContent
}

View File

@@ -105,3 +105,80 @@ export {
getMockScheduledScans,
getMockScheduledScanById,
} from './data/scheduled-scans'
// Directories
export {
mockDirectories,
getMockDirectories,
getMockDirectoryById,
} from './data/directories'
// Fingerprints
export {
mockEholeFingerprints,
mockGobyFingerprints,
mockWappalyzerFingerprints,
mockFingersFingerprints,
mockFingerPrintHubFingerprints,
mockARLFingerprints,
mockFingerprintStats,
getMockEholeFingerprints,
getMockGobyFingerprints,
getMockWappalyzerFingerprints,
getMockFingersFingerprints,
getMockFingerPrintHubFingerprints,
getMockARLFingerprints,
getMockFingerprintStats,
} from './data/fingerprints'
// IP Addresses
export {
mockIPAddresses,
getMockIPAddresses,
getMockIPAddressByIP,
} from './data/ip-addresses'
// Search
export {
getMockSearchResults,
} from './data/search'
// Tools
export {
mockTools,
getMockTools,
getMockToolById,
} from './data/tools'
// Wordlists
export {
mockWordlists,
mockWordlistContent,
getMockWordlists,
getMockWordlistById,
getMockWordlistContent,
} from './data/wordlists'
// Nuclei Templates
export {
mockNucleiTemplateTree,
mockNucleiTemplateContent,
getMockNucleiTemplateTree,
getMockNucleiTemplateContent,
} from './data/nuclei-templates'
// System Logs
export {
mockLogFiles,
mockSystemLogContent,
mockErrorLogContent,
getMockLogFiles,
getMockSystemLogs,
} from './data/system-logs'
// Notification Settings
export {
mockNotificationSettings,
getMockNotificationSettings,
updateMockNotificationSettings,
} from './data/notification-settings'

View File

@@ -0,0 +1,14 @@
import { api } from '@/lib/api-client'
import type { ApiKeySettings } from '@/types/api-key-settings.types'
export class ApiKeySettingsService {
static async getSettings(): Promise<ApiKeySettings> {
const res = await api.get<ApiKeySettings>('/settings/api-keys/')
return res.data
}
static async updateSettings(data: Partial<ApiKeySettings>): Promise<ApiKeySettings> {
const res = await api.put<ApiKeySettings>('/settings/api-keys/', data)
return res.data
}
}

View File

@@ -113,3 +113,40 @@ export async function getScanStatistics(): Promise<ScanStatistics> {
const res = await api.get<ScanStatistics>('/scans/statistics/')
return res.data
}
/**
* Scan log entry type
*/
export interface ScanLog {
id: number
level: 'info' | 'warning' | 'error'
content: string
createdAt: string
}
/**
* Get scan logs response type
*/
export interface GetScanLogsResponse {
results: ScanLog[]
hasMore: boolean
}
/**
* Get scan logs params type
*/
export interface GetScanLogsParams {
afterId?: number
limit?: number
}
/**
* Get scan logs
* @param scanId - Scan ID
* @param params - Query parameters (afterId for cursor, limit for max results)
* @returns Scan logs with hasMore indicator
*/
export async function getScanLogs(scanId: number, params?: GetScanLogsParams): Promise<GetScanLogsResponse> {
const res = await api.get<GetScanLogsResponse>(`/scans/${scanId}/logs/`, { params })
return res.data
}

View File

@@ -0,0 +1,42 @@
/**
* API Key 配置类型定义
* 用于 subfinder 第三方数据源配置
*/
// 单字段 Provider 配置hunter, shodan, zoomeye, securitytrails, threatbook, quake
export interface SingleFieldProviderConfig {
enabled: boolean
apiKey: string
}
// FOFA Provider 配置email + apiKey
export interface FofaProviderConfig {
enabled: boolean
email: string
apiKey: string
}
// Censys Provider 配置apiId + apiSecret
export interface CensysProviderConfig {
enabled: boolean
apiId: string
apiSecret: string
}
// 完整的 API Key 配置
export interface ApiKeySettings {
fofa: FofaProviderConfig
hunter: SingleFieldProviderConfig
shodan: SingleFieldProviderConfig
censys: CensysProviderConfig
zoomeye: SingleFieldProviderConfig
securitytrails: SingleFieldProviderConfig
threatbook: SingleFieldProviderConfig
quake: SingleFieldProviderConfig
}
// Provider 类型
export type ProviderKey = keyof ApiKeySettings
// Provider 配置联合类型
export type ProviderConfig = FofaProviderConfig | CensysProviderConfig | SingleFieldProviderConfig