mirror of
https://github.com/yyhuni/xingrin.git
synced 2026-02-02 04:33:10 +08:00
Compare commits
67 Commits
v1.1.8
...
v1.2.1-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6caf707072 | ||
|
|
2627b1fc40 | ||
|
|
ec6712b9b4 | ||
|
|
9d5e4d5408 | ||
|
|
c5d5b24c8f | ||
|
|
671cb56b62 | ||
|
|
51025f69a8 | ||
|
|
b2403b29c4 | ||
|
|
18ef01a47b | ||
|
|
0bf8108fb3 | ||
|
|
837ad19131 | ||
|
|
d7de9a7129 | ||
|
|
22b4e51b42 | ||
|
|
d03628ee45 | ||
|
|
0baabe0753 | ||
|
|
e1191d7abf | ||
|
|
82a2e9a0e7 | ||
|
|
1ccd1bc338 | ||
|
|
b4d42f5372 | ||
|
|
2c66450756 | ||
|
|
119d82dc89 | ||
|
|
fba7f7c508 | ||
|
|
99d384ce29 | ||
|
|
07f36718ab | ||
|
|
7e3f69c208 | ||
|
|
5f90473c3c | ||
|
|
e2a815b96a | ||
|
|
f86a1a9d47 | ||
|
|
d5945679aa | ||
|
|
51e2c51748 | ||
|
|
e2cbf98dda | ||
|
|
cd72bdf7c3 | ||
|
|
35abcf7e39 | ||
|
|
09f2d343a4 | ||
|
|
54d1f86bde | ||
|
|
a3997c9676 | ||
|
|
c90a55f85e | ||
|
|
2eab88b452 | ||
|
|
1baf0eb5e1 | ||
|
|
b61e73f7be | ||
|
|
e896734dfc | ||
|
|
cd83f52f35 | ||
|
|
3e29554c36 | ||
|
|
18e02b536e | ||
|
|
4c1c6f70ab | ||
|
|
a72e7675f5 | ||
|
|
93c2163764 | ||
|
|
de72c91561 | ||
|
|
3e6d060b75 | ||
|
|
766f045904 | ||
|
|
8acfe1cc33 | ||
|
|
7aec3eabb2 | ||
|
|
b1f11c36a4 | ||
|
|
d97fb5245a | ||
|
|
ddf9a1f5a4 | ||
|
|
47f9f96a4b | ||
|
|
6f43e73162 | ||
|
|
9b7d496f3e | ||
|
|
6390849d52 | ||
|
|
7a6d2054f6 | ||
|
|
73ebaab232 | ||
|
|
11899b29c2 | ||
|
|
877d2a56d1 | ||
|
|
dc1e94f038 | ||
|
|
9c3833d13d | ||
|
|
92f3b722ef | ||
|
|
9ef503c666 |
7
.github/workflows/docker-build.yml
vendored
7
.github/workflows/docker-build.yml
vendored
@@ -106,16 +106,17 @@ jobs:
|
||||
${{ steps.version.outputs.IS_RELEASE == 'true' && format('{0}/{1}:latest', env.IMAGE_PREFIX, matrix.image) || '' }}
|
||||
build-args: |
|
||||
IMAGE_TAG=${{ steps.version.outputs.VERSION }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: type=gha,scope=${{ matrix.image }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.image }}
|
||||
provenance: false
|
||||
sbom: false
|
||||
|
||||
# 所有镜像构建成功后,更新 VERSION 文件
|
||||
# 只有正式版本(不含 -dev, -alpha, -beta, -rc 等后缀)才更新
|
||||
update-version:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
if: startsWith(github.ref, 'refs/tags/v') && !contains(github.ref, '-')
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
13
README.md
13
README.md
@@ -177,11 +177,19 @@ cd xingrin
|
||||
|
||||
# 安装并启动(生产模式)
|
||||
sudo ./install.sh
|
||||
|
||||
# 🇨🇳 中国大陆用户推荐使用镜像加速
|
||||
sudo ./install.sh --mirror
|
||||
```
|
||||
|
||||
> **💡 --mirror 参数说明**
|
||||
> - 自动配置 Docker 镜像加速(国内镜像源)
|
||||
> - 加速 Git 仓库克隆(Nuclei 模板等)
|
||||
> - 大幅提升安装速度,避免网络超时
|
||||
|
||||
### 访问服务
|
||||
|
||||
- **Web 界面**: `https://localhost`
|
||||
- **Web 界面**: `https://ip:8083`
|
||||
|
||||
### 常用命令
|
||||
|
||||
@@ -197,9 +205,6 @@ sudo ./restart.sh
|
||||
|
||||
# 卸载
|
||||
sudo ./uninstall.sh
|
||||
|
||||
# 更新
|
||||
sudo ./update.sh
|
||||
```
|
||||
|
||||
## 🤝 反馈与贡献
|
||||
|
||||
@@ -134,8 +134,8 @@ class VulnerabilitySnapshotSerializer(serializers.ModelSerializer):
|
||||
class EndpointListSerializer(serializers.ModelSerializer):
|
||||
"""端点列表序列化器(用于目标端点列表页)"""
|
||||
|
||||
# 将 GF 匹配模式映射为前端使用的 tags 字段
|
||||
tags = serializers.ListField(
|
||||
# GF 匹配模式(gf-patterns 工具匹配的敏感 URL 模式)
|
||||
gfPatterns = serializers.ListField(
|
||||
child=serializers.CharField(),
|
||||
source='matched_gf_patterns',
|
||||
read_only=True,
|
||||
@@ -155,7 +155,7 @@ class EndpointListSerializer(serializers.ModelSerializer):
|
||||
'body_preview',
|
||||
'tech',
|
||||
'vhost',
|
||||
'tags',
|
||||
'gfPatterns',
|
||||
'created_at',
|
||||
]
|
||||
read_only_fields = fields
|
||||
@@ -258,8 +258,8 @@ class DirectorySnapshotSerializer(serializers.ModelSerializer):
|
||||
class EndpointSnapshotSerializer(serializers.ModelSerializer):
|
||||
"""端点快照序列化器(用于扫描历史)"""
|
||||
|
||||
# 将 GF 匹配模式映射为前端使用的 tags 字段
|
||||
tags = serializers.ListField(
|
||||
# GF 匹配模式(gf-patterns 工具匹配的敏感 URL 模式)
|
||||
gfPatterns = serializers.ListField(
|
||||
child=serializers.CharField(),
|
||||
source='matched_gf_patterns',
|
||||
read_only=True,
|
||||
@@ -280,7 +280,7 @@ class EndpointSnapshotSerializer(serializers.ModelSerializer):
|
||||
'body_preview',
|
||||
'tech',
|
||||
'vhost',
|
||||
'tags',
|
||||
'gfPatterns',
|
||||
'created_at',
|
||||
]
|
||||
read_only_fields = fields
|
||||
|
||||
@@ -66,12 +66,19 @@ def fetch_config_and_setup_django():
|
||||
os.environ.setdefault("ENABLE_COMMAND_LOGGING", str(config['logging']['enableCommandLogging']).lower())
|
||||
os.environ.setdefault("DEBUG", str(config['debug']))
|
||||
|
||||
# Git 加速配置(用于 Git clone 加速)
|
||||
git_mirror = config.get('gitMirror', '')
|
||||
if git_mirror:
|
||||
os.environ.setdefault("GIT_MIRROR", git_mirror)
|
||||
|
||||
print(f"[CONFIG] ✓ 配置获取成功")
|
||||
print(f"[CONFIG] DB_HOST: {db_host}")
|
||||
print(f"[CONFIG] DB_PORT: {db_port}")
|
||||
print(f"[CONFIG] DB_NAME: {db_name}")
|
||||
print(f"[CONFIG] DB_USER: {db_user}")
|
||||
print(f"[CONFIG] REDIS_URL: {config['redisUrl']}")
|
||||
if git_mirror:
|
||||
print(f"[CONFIG] GIT_MIRROR: {git_mirror}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[ERROR] 获取配置失败: {config_url} - {e}", file=sys.stderr)
|
||||
|
||||
@@ -21,8 +21,8 @@ class SystemLogService:
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# 日志文件路径(容器内路径,通过 volume 挂载到宿主机 /opt/xingrin/logs)
|
||||
self.log_file = "/app/backend/logs/xingrin.log"
|
||||
# 日志文件路径(统一使用 /opt/xingrin/logs)
|
||||
self.log_file = "/opt/xingrin/logs/xingrin.log"
|
||||
self.default_lines = 200 # 默认返回行数
|
||||
self.max_lines = 10000 # 最大返回行数限制
|
||||
self.timeout_seconds = 3 # tail 命令超时时间
|
||||
|
||||
@@ -13,6 +13,7 @@ from .csv_utils import (
|
||||
format_datetime,
|
||||
UTF8_BOM,
|
||||
)
|
||||
from .git_proxy import get_git_proxy_url
|
||||
|
||||
__all__ = [
|
||||
'deduplicate_for_bulk',
|
||||
@@ -25,4 +26,5 @@ __all__ = [
|
||||
'format_list_field',
|
||||
'format_datetime',
|
||||
'UTF8_BOM',
|
||||
'get_git_proxy_url',
|
||||
]
|
||||
|
||||
@@ -132,7 +132,8 @@ class QueryBuilder:
|
||||
cls,
|
||||
queryset: QuerySet,
|
||||
filter_groups: List[FilterGroup],
|
||||
field_mapping: Dict[str, str]
|
||||
field_mapping: Dict[str, str],
|
||||
json_array_fields: List[str] = None
|
||||
) -> QuerySet:
|
||||
"""构建 Django ORM 查询
|
||||
|
||||
@@ -140,6 +141,7 @@ class QueryBuilder:
|
||||
queryset: Django QuerySet
|
||||
filter_groups: 解析后的过滤条件组列表
|
||||
field_mapping: 字段映射
|
||||
json_array_fields: JSON 数组字段列表(使用 __contains 查询)
|
||||
|
||||
Returns:
|
||||
过滤后的 QuerySet
|
||||
@@ -147,6 +149,8 @@ class QueryBuilder:
|
||||
if not filter_groups:
|
||||
return queryset
|
||||
|
||||
json_array_fields = json_array_fields or []
|
||||
|
||||
# 构建 Q 对象
|
||||
combined_q = None
|
||||
|
||||
@@ -159,8 +163,11 @@ class QueryBuilder:
|
||||
logger.debug(f"忽略未知字段: {f.field}")
|
||||
continue
|
||||
|
||||
# 判断是否为 JSON 数组字段
|
||||
is_json_array = db_field in json_array_fields
|
||||
|
||||
# 构建单个条件的 Q 对象
|
||||
q = cls._build_single_q(db_field, f.operator, f.value)
|
||||
q = cls._build_single_q(db_field, f.operator, f.value, is_json_array)
|
||||
if q is None:
|
||||
continue
|
||||
|
||||
@@ -177,8 +184,12 @@ class QueryBuilder:
|
||||
return queryset
|
||||
|
||||
@classmethod
|
||||
def _build_single_q(cls, field: str, operator: str, value: str) -> Optional[Q]:
|
||||
def _build_single_q(cls, field: str, operator: str, value: str, is_json_array: bool = False) -> Optional[Q]:
|
||||
"""构建单个条件的 Q 对象"""
|
||||
if is_json_array:
|
||||
# JSON 数组字段使用 __contains 查询
|
||||
return Q(**{f'{field}__contains': [value]})
|
||||
|
||||
if operator == '!=':
|
||||
return cls._build_not_equal_q(field, value)
|
||||
elif operator == '==':
|
||||
@@ -219,7 +230,8 @@ class QueryBuilder:
|
||||
def apply_filters(
|
||||
queryset: QuerySet,
|
||||
query_string: str,
|
||||
field_mapping: Dict[str, str]
|
||||
field_mapping: Dict[str, str],
|
||||
json_array_fields: List[str] = None
|
||||
) -> QuerySet:
|
||||
"""应用过滤条件到 QuerySet
|
||||
|
||||
@@ -227,6 +239,7 @@ def apply_filters(
|
||||
queryset: Django QuerySet
|
||||
query_string: 查询语法字符串
|
||||
field_mapping: 字段映射
|
||||
json_array_fields: JSON 数组字段列表(使用 __contains 查询)
|
||||
|
||||
Returns:
|
||||
过滤后的 QuerySet
|
||||
@@ -242,6 +255,9 @@ def apply_filters(
|
||||
|
||||
# 混合查询
|
||||
apply_filters(qs, 'type="xss" || type="sqli" && severity="high"', mapping)
|
||||
|
||||
# JSON 数组字段查询
|
||||
apply_filters(qs, 'implies="PHP"', mapping, json_array_fields=['implies'])
|
||||
"""
|
||||
if not query_string or not query_string.strip():
|
||||
return queryset
|
||||
@@ -253,7 +269,12 @@ def apply_filters(
|
||||
return queryset
|
||||
|
||||
logger.debug(f"解析过滤条件: {filter_groups}")
|
||||
return QueryBuilder.build_query(queryset, filter_groups, field_mapping)
|
||||
return QueryBuilder.build_query(
|
||||
queryset,
|
||||
filter_groups,
|
||||
field_mapping,
|
||||
json_array_fields=json_array_fields
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"过滤解析错误: {e}, query: {query_string}")
|
||||
|
||||
39
backend/apps/common/utils/git_proxy.py
Normal file
39
backend/apps/common/utils/git_proxy.py
Normal file
@@ -0,0 +1,39 @@
|
||||
"""Git proxy utilities for URL acceleration."""
|
||||
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
|
||||
|
||||
def get_git_proxy_url(original_url: str) -> str:
|
||||
"""
|
||||
Convert Git repository URL to proxy format for acceleration.
|
||||
|
||||
Supports multiple mirror services (standard format):
|
||||
- gh-proxy.org: https://gh-proxy.org/https://github.com/user/repo.git
|
||||
- ghproxy.com: https://ghproxy.com/https://github.com/user/repo.git
|
||||
- mirror.ghproxy.com: https://mirror.ghproxy.com/https://github.com/user/repo.git
|
||||
- ghps.cc: https://ghps.cc/https://github.com/user/repo.git
|
||||
|
||||
Args:
|
||||
original_url: Original repository URL, e.g., https://github.com/user/repo.git
|
||||
|
||||
Returns:
|
||||
Converted URL based on GIT_MIRROR setting.
|
||||
If GIT_MIRROR is not set, returns the original URL unchanged.
|
||||
"""
|
||||
git_mirror = os.getenv("GIT_MIRROR", "").strip()
|
||||
if not git_mirror:
|
||||
return original_url
|
||||
|
||||
# Remove trailing slash from mirror URL if present
|
||||
git_mirror = git_mirror.rstrip("/")
|
||||
|
||||
parsed = urlparse(original_url)
|
||||
host = parsed.netloc.lower()
|
||||
|
||||
# Only support GitHub for now
|
||||
if "github.com" not in host:
|
||||
return original_url
|
||||
|
||||
# Standard format: https://mirror.example.com/https://github.com/user/repo.git
|
||||
return f"{git_mirror}/{original_url}"
|
||||
@@ -242,8 +242,9 @@ class WorkerDeployConsumer(AsyncWebsocketConsumer):
|
||||
return
|
||||
|
||||
# 远程 Worker 通过 nginx HTTPS 访问(nginx 反代到后端 8888)
|
||||
# 使用 https://{PUBLIC_HOST} 而不是直连 8888 端口
|
||||
heartbeat_api_url = f"https://{public_host}" # 基础 URL,agent 会加 /api/...
|
||||
# 使用 https://{PUBLIC_HOST}:{PUBLIC_PORT} 而不是直连 8888 端口
|
||||
public_port = getattr(settings, 'PUBLIC_PORT', '8083')
|
||||
heartbeat_api_url = f"https://{public_host}:{public_port}"
|
||||
|
||||
session_name = f'xingrin_deploy_{self.worker_id}'
|
||||
remote_script_path = '/tmp/xingrin_deploy.sh'
|
||||
|
||||
160
backend/apps/engine/management/commands/init_fingerprints.py
Normal file
160
backend/apps/engine/management/commands/init_fingerprints.py
Normal file
@@ -0,0 +1,160 @@
|
||||
"""初始化内置指纹库
|
||||
|
||||
- EHole 指纹: ehole.json -> 导入到数据库
|
||||
- Goby 指纹: goby.json -> 导入到数据库
|
||||
- Wappalyzer 指纹: wappalyzer.json -> 导入到数据库
|
||||
|
||||
可重复执行:如果数据库已有数据则跳过,只在空库时导入。
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from apps.engine.models import EholeFingerprint, GobyFingerprint, WappalyzerFingerprint
|
||||
from apps.engine.services.fingerprints import (
|
||||
EholeFingerprintService,
|
||||
GobyFingerprintService,
|
||||
WappalyzerFingerprintService,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# 内置指纹配置
|
||||
DEFAULT_FINGERPRINTS = [
|
||||
{
|
||||
"type": "ehole",
|
||||
"filename": "ehole.json",
|
||||
"model": EholeFingerprint,
|
||||
"service": EholeFingerprintService,
|
||||
"data_key": "fingerprint", # JSON 中指纹数组的 key
|
||||
},
|
||||
{
|
||||
"type": "goby",
|
||||
"filename": "goby.json",
|
||||
"model": GobyFingerprint,
|
||||
"service": GobyFingerprintService,
|
||||
"data_key": None, # Goby 是数组格式,直接使用整个 JSON
|
||||
},
|
||||
{
|
||||
"type": "wappalyzer",
|
||||
"filename": "wappalyzer.json",
|
||||
"model": WappalyzerFingerprint,
|
||||
"service": WappalyzerFingerprintService,
|
||||
"data_key": "apps", # Wappalyzer 使用 apps 对象
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "初始化内置指纹库"
|
||||
|
||||
def handle(self, *args, **options):
|
||||
project_base = Path(settings.BASE_DIR).parent # /app/backend -> /app
|
||||
fingerprints_dir = project_base / "backend" / "fingerprints"
|
||||
|
||||
initialized = 0
|
||||
skipped = 0
|
||||
failed = 0
|
||||
|
||||
for item in DEFAULT_FINGERPRINTS:
|
||||
fp_type = item["type"]
|
||||
filename = item["filename"]
|
||||
model = item["model"]
|
||||
service_class = item["service"]
|
||||
data_key = item["data_key"]
|
||||
|
||||
# 检查数据库是否已有数据
|
||||
existing_count = model.objects.count()
|
||||
if existing_count > 0:
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f"[{fp_type}] 数据库已有 {existing_count} 条记录,跳过初始化"
|
||||
))
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
# 查找源文件
|
||||
src_path = fingerprints_dir / filename
|
||||
if not src_path.exists():
|
||||
self.stdout.write(self.style.WARNING(
|
||||
f"[{fp_type}] 未找到内置指纹文件: {src_path},跳过"
|
||||
))
|
||||
failed += 1
|
||||
continue
|
||||
|
||||
# 读取并解析 JSON
|
||||
try:
|
||||
with open(src_path, "r", encoding="utf-8") as f:
|
||||
json_data = json.load(f)
|
||||
except (json.JSONDecodeError, OSError) as exc:
|
||||
self.stdout.write(self.style.ERROR(
|
||||
f"[{fp_type}] 读取指纹文件失败: {exc}"
|
||||
))
|
||||
failed += 1
|
||||
continue
|
||||
|
||||
# 提取指纹数据(根据不同格式处理)
|
||||
fingerprints = self._extract_fingerprints(json_data, data_key, fp_type)
|
||||
if not fingerprints:
|
||||
self.stdout.write(self.style.WARNING(
|
||||
f"[{fp_type}] 指纹文件中没有有效数据,跳过"
|
||||
))
|
||||
failed += 1
|
||||
continue
|
||||
|
||||
# 使用 Service 批量导入
|
||||
try:
|
||||
service = service_class()
|
||||
result = service.batch_create_fingerprints(fingerprints)
|
||||
created = result.get("created", 0)
|
||||
failed_count = result.get("failed", 0)
|
||||
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f"[{fp_type}] 导入成功: 创建 {created} 条,失败 {failed_count} 条"
|
||||
))
|
||||
initialized += 1
|
||||
except Exception as exc:
|
||||
self.stdout.write(self.style.ERROR(
|
||||
f"[{fp_type}] 导入失败: {exc}"
|
||||
))
|
||||
failed += 1
|
||||
continue
|
||||
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f"指纹初始化完成: 成功 {initialized}, 已存在跳过 {skipped}, 失败 {failed}"
|
||||
))
|
||||
|
||||
def _extract_fingerprints(self, json_data, data_key, fp_type):
|
||||
"""
|
||||
根据不同格式提取指纹数据,兼容数组和对象两种格式
|
||||
|
||||
支持的格式:
|
||||
- 数组格式: [...] 或 {"key": [...]}
|
||||
- 对象格式: {...} 或 {"key": {...}} -> 转换为 [{"name": k, ...v}]
|
||||
"""
|
||||
# 获取目标数据
|
||||
if data_key is None:
|
||||
# 直接使用整个 JSON
|
||||
target = json_data
|
||||
else:
|
||||
# 从指定 key 获取,支持多个可能的 key(如 apps/technologies)
|
||||
if data_key == "apps":
|
||||
target = json_data.get("apps") or json_data.get("technologies") or {}
|
||||
else:
|
||||
target = json_data.get(data_key, [])
|
||||
|
||||
# 根据数据类型处理
|
||||
if isinstance(target, list):
|
||||
# 已经是数组格式,直接返回
|
||||
return target
|
||||
elif isinstance(target, dict):
|
||||
# 对象格式,转换为数组 [{"name": key, ...value}]
|
||||
return [{"name": name, **data} if isinstance(data, dict) else {"name": name}
|
||||
for name, data in target.items()]
|
||||
|
||||
return []
|
||||
@@ -3,12 +3,17 @@
|
||||
项目安装后执行此命令,自动创建官方模板仓库记录。
|
||||
|
||||
使用方式:
|
||||
python manage.py init_nuclei_templates # 只创建记录
|
||||
python manage.py init_nuclei_templates # 只创建记录(检测本地已有仓库)
|
||||
python manage.py init_nuclei_templates --sync # 创建并同步(git clone)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.engine.models import NucleiTemplateRepo
|
||||
from apps.engine.services import NucleiTemplateRepoService
|
||||
@@ -26,6 +31,20 @@ DEFAULT_REPOS = [
|
||||
]
|
||||
|
||||
|
||||
def get_local_commit_hash(local_path: Path) -> str:
|
||||
"""获取本地 Git 仓库的 commit hash"""
|
||||
if not (local_path / ".git").is_dir():
|
||||
return ""
|
||||
result = subprocess.run(
|
||||
["git", "-C", str(local_path), "rev-parse", "HEAD"],
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
return result.stdout.strip() if result.returncode == 0 else ""
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "初始化 Nuclei 模板仓库(创建官方模板仓库记录)"
|
||||
|
||||
@@ -46,6 +65,8 @@ class Command(BaseCommand):
|
||||
force = options.get("force", False)
|
||||
|
||||
service = NucleiTemplateRepoService()
|
||||
base_dir = Path(getattr(settings, "NUCLEI_TEMPLATES_REPOS_BASE_DIR", "/opt/xingrin/nuclei-repos"))
|
||||
|
||||
created = 0
|
||||
skipped = 0
|
||||
synced = 0
|
||||
@@ -87,20 +108,30 @@ class Command(BaseCommand):
|
||||
|
||||
# 创建新仓库记录
|
||||
try:
|
||||
# 检查本地是否已有仓库(由 install.sh 预下载)
|
||||
local_path = base_dir / name
|
||||
local_commit = get_local_commit_hash(local_path)
|
||||
|
||||
repo = NucleiTemplateRepo.objects.create(
|
||||
name=name,
|
||||
repo_url=repo_url,
|
||||
local_path=str(local_path) if local_commit else "",
|
||||
commit_hash=local_commit,
|
||||
last_synced_at=timezone.now() if local_commit else None,
|
||||
)
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f"[{name}] 创建成功: id={repo.id}"
|
||||
))
|
||||
|
||||
if local_commit:
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f"[{name}] 创建成功(检测到本地仓库): commit={local_commit[:8]}"
|
||||
))
|
||||
else:
|
||||
self.stdout.write(self.style.SUCCESS(
|
||||
f"[{name}] 创建成功: id={repo.id}"
|
||||
))
|
||||
created += 1
|
||||
|
||||
# 初始化本地路径
|
||||
service.ensure_local_path(repo)
|
||||
|
||||
# 如果需要同步
|
||||
if do_sync:
|
||||
# 如果本地没有仓库且需要同步
|
||||
if not local_commit and do_sync:
|
||||
try:
|
||||
self.stdout.write(self.style.WARNING(
|
||||
f"[{name}] 正在同步(首次可能需要几分钟)..."
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
"""初始化所有内置字典 Wordlist 记录
|
||||
|
||||
- 目录扫描默认字典: dir_default.txt -> /app/backend/wordlist/dir_default.txt
|
||||
- 子域名爆破默认字典: subdomains-top1million-110000.txt -> /app/backend/wordlist/subdomains-top1million-110000.txt
|
||||
内置字典从镜像内 /app/backend/wordlist/ 复制到运行时目录 /opt/xingrin/wordlists/:
|
||||
- 目录扫描默认字典: dir_default.txt
|
||||
- 子域名爆破默认字典: subdomains-top1million-110000.txt
|
||||
|
||||
可重复执行:如果已存在同名记录且文件有效则跳过,只在缺失或文件丢失时创建/修复。
|
||||
"""
|
||||
|
||||
19
backend/apps/engine/models/__init__.py
Normal file
19
backend/apps/engine/models/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""Engine Models
|
||||
|
||||
导出所有 Engine 模块的 Models
|
||||
"""
|
||||
|
||||
from .engine import WorkerNode, ScanEngine, Wordlist, NucleiTemplateRepo
|
||||
from .fingerprints import EholeFingerprint, GobyFingerprint, WappalyzerFingerprint
|
||||
|
||||
__all__ = [
|
||||
# 核心 Models
|
||||
"WorkerNode",
|
||||
"ScanEngine",
|
||||
"Wordlist",
|
||||
"NucleiTemplateRepo",
|
||||
# 指纹 Models
|
||||
"EholeFingerprint",
|
||||
"GobyFingerprint",
|
||||
"WappalyzerFingerprint",
|
||||
]
|
||||
@@ -1,3 +1,8 @@
|
||||
"""Engine 模块核心 Models
|
||||
|
||||
包含 WorkerNode, ScanEngine, Wordlist, NucleiTemplateRepo
|
||||
"""
|
||||
|
||||
from django.db import models
|
||||
|
||||
|
||||
@@ -78,6 +83,7 @@ class ScanEngine(models.Model):
|
||||
indexes = [
|
||||
models.Index(fields=['-created_at']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return str(self.name or f'ScanEngine {self.id}')
|
||||
|
||||
108
backend/apps/engine/models/fingerprints.py
Normal file
108
backend/apps/engine/models/fingerprints.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""指纹相关 Models
|
||||
|
||||
包含 EHole、Goby、Wappalyzer 等指纹格式的数据模型
|
||||
"""
|
||||
|
||||
from django.db import models
|
||||
|
||||
|
||||
class GobyFingerprint(models.Model):
|
||||
"""Goby 格式指纹规则
|
||||
|
||||
Goby 使用逻辑表达式和规则数组进行匹配:
|
||||
- logic: 逻辑表达式,如 "a||b", "(a&&b)||c"
|
||||
- rule: 规则数组,每条规则包含 label, feature, is_equal
|
||||
"""
|
||||
|
||||
name = models.CharField(max_length=300, unique=True, help_text='产品名称')
|
||||
logic = models.CharField(max_length=500, help_text='逻辑表达式')
|
||||
rule = models.JSONField(default=list, help_text='规则数组')
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
class Meta:
|
||||
db_table = 'goby_fingerprint'
|
||||
verbose_name = 'Goby 指纹'
|
||||
verbose_name_plural = 'Goby 指纹'
|
||||
ordering = ['-created_at']
|
||||
indexes = [
|
||||
models.Index(fields=['name']),
|
||||
models.Index(fields=['logic']),
|
||||
models.Index(fields=['-created_at']),
|
||||
]
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self.name} ({self.logic})"
|
||||
|
||||
|
||||
class EholeFingerprint(models.Model):
|
||||
"""EHole 格式指纹规则(字段与 ehole.json 一致)"""
|
||||
|
||||
cms = models.CharField(max_length=200, help_text='产品/CMS名称')
|
||||
method = models.CharField(max_length=200, default='keyword', help_text='匹配方式')
|
||||
location = models.CharField(max_length=200, default='body', help_text='匹配位置')
|
||||
keyword = models.JSONField(default=list, help_text='关键词列表')
|
||||
is_important = models.BooleanField(default=False, help_text='是否重点资产')
|
||||
type = models.CharField(max_length=100, blank=True, default='-', help_text='分类')
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
class Meta:
|
||||
db_table = 'ehole_fingerprint'
|
||||
verbose_name = 'EHole 指纹'
|
||||
verbose_name_plural = 'EHole 指纹'
|
||||
ordering = ['-created_at']
|
||||
indexes = [
|
||||
# 搜索过滤字段索引
|
||||
models.Index(fields=['cms']),
|
||||
models.Index(fields=['method']),
|
||||
models.Index(fields=['location']),
|
||||
models.Index(fields=['type']),
|
||||
models.Index(fields=['is_important']),
|
||||
# 排序字段索引
|
||||
models.Index(fields=['-created_at']),
|
||||
]
|
||||
constraints = [
|
||||
# 唯一约束:cms + method + location 组合不能重复
|
||||
models.UniqueConstraint(
|
||||
fields=['cms', 'method', 'location'],
|
||||
name='unique_ehole_fingerprint'
|
||||
),
|
||||
]
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self.cms} ({self.method}@{self.location})"
|
||||
|
||||
|
||||
class WappalyzerFingerprint(models.Model):
|
||||
"""Wappalyzer 格式指纹规则
|
||||
|
||||
Wappalyzer 支持多种检测方式:cookies, headers, scriptSrc, js, meta, html 等
|
||||
"""
|
||||
|
||||
name = models.CharField(max_length=300, unique=True, help_text='应用名称')
|
||||
cats = models.JSONField(default=list, help_text='分类 ID 数组')
|
||||
cookies = models.JSONField(default=dict, blank=True, help_text='Cookie 检测规则')
|
||||
headers = models.JSONField(default=dict, blank=True, help_text='HTTP Header 检测规则')
|
||||
script_src = models.JSONField(default=list, blank=True, help_text='脚本 URL 正则数组')
|
||||
js = models.JSONField(default=list, blank=True, help_text='JavaScript 变量检测规则')
|
||||
implies = models.JSONField(default=list, blank=True, help_text='依赖关系数组')
|
||||
meta = models.JSONField(default=dict, blank=True, help_text='HTML meta 标签检测规则')
|
||||
html = models.JSONField(default=list, blank=True, help_text='HTML 内容正则数组')
|
||||
description = models.TextField(blank=True, default='', help_text='应用描述')
|
||||
website = models.URLField(max_length=500, blank=True, default='', help_text='官网链接')
|
||||
cpe = models.CharField(max_length=300, blank=True, default='', help_text='CPE 标识符')
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
class Meta:
|
||||
db_table = 'wappalyzer_fingerprint'
|
||||
verbose_name = 'Wappalyzer 指纹'
|
||||
verbose_name_plural = 'Wappalyzer 指纹'
|
||||
ordering = ['-created_at']
|
||||
indexes = [
|
||||
models.Index(fields=['name']),
|
||||
models.Index(fields=['website']),
|
||||
models.Index(fields=['cpe']),
|
||||
models.Index(fields=['-created_at']),
|
||||
]
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self.name}"
|
||||
14
backend/apps/engine/serializers/fingerprints/__init__.py
Normal file
14
backend/apps/engine/serializers/fingerprints/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""指纹管理 Serializers
|
||||
|
||||
导出所有指纹相关的 Serializer 类
|
||||
"""
|
||||
|
||||
from .ehole import EholeFingerprintSerializer
|
||||
from .goby import GobyFingerprintSerializer
|
||||
from .wappalyzer import WappalyzerFingerprintSerializer
|
||||
|
||||
__all__ = [
|
||||
"EholeFingerprintSerializer",
|
||||
"GobyFingerprintSerializer",
|
||||
"WappalyzerFingerprintSerializer",
|
||||
]
|
||||
27
backend/apps/engine/serializers/fingerprints/ehole.py
Normal file
27
backend/apps/engine/serializers/fingerprints/ehole.py
Normal file
@@ -0,0 +1,27 @@
|
||||
"""EHole 指纹 Serializer"""
|
||||
|
||||
from rest_framework import serializers
|
||||
|
||||
from apps.engine.models import EholeFingerprint
|
||||
|
||||
|
||||
class EholeFingerprintSerializer(serializers.ModelSerializer):
|
||||
"""EHole 指纹序列化器"""
|
||||
|
||||
class Meta:
|
||||
model = EholeFingerprint
|
||||
fields = ['id', 'cms', 'method', 'location', 'keyword',
|
||||
'is_important', 'type', 'created_at']
|
||||
read_only_fields = ['id', 'created_at']
|
||||
|
||||
def validate_cms(self, value):
|
||||
"""校验 cms 字段"""
|
||||
if not value or not value.strip():
|
||||
raise serializers.ValidationError("cms 字段不能为空")
|
||||
return value.strip()
|
||||
|
||||
def validate_keyword(self, value):
|
||||
"""校验 keyword 字段"""
|
||||
if not isinstance(value, list):
|
||||
raise serializers.ValidationError("keyword 必须是数组")
|
||||
return value
|
||||
26
backend/apps/engine/serializers/fingerprints/goby.py
Normal file
26
backend/apps/engine/serializers/fingerprints/goby.py
Normal file
@@ -0,0 +1,26 @@
|
||||
"""Goby 指纹 Serializer"""
|
||||
|
||||
from rest_framework import serializers
|
||||
|
||||
from apps.engine.models import GobyFingerprint
|
||||
|
||||
|
||||
class GobyFingerprintSerializer(serializers.ModelSerializer):
|
||||
"""Goby 指纹序列化器"""
|
||||
|
||||
class Meta:
|
||||
model = GobyFingerprint
|
||||
fields = ['id', 'name', 'logic', 'rule', 'created_at']
|
||||
read_only_fields = ['id', 'created_at']
|
||||
|
||||
def validate_name(self, value):
|
||||
"""校验 name 字段"""
|
||||
if not value or not value.strip():
|
||||
raise serializers.ValidationError("name 字段不能为空")
|
||||
return value.strip()
|
||||
|
||||
def validate_rule(self, value):
|
||||
"""校验 rule 字段"""
|
||||
if not isinstance(value, list):
|
||||
raise serializers.ValidationError("rule 必须是数组")
|
||||
return value
|
||||
24
backend/apps/engine/serializers/fingerprints/wappalyzer.py
Normal file
24
backend/apps/engine/serializers/fingerprints/wappalyzer.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""Wappalyzer 指纹 Serializer"""
|
||||
|
||||
from rest_framework import serializers
|
||||
|
||||
from apps.engine.models import WappalyzerFingerprint
|
||||
|
||||
|
||||
class WappalyzerFingerprintSerializer(serializers.ModelSerializer):
|
||||
"""Wappalyzer 指纹序列化器"""
|
||||
|
||||
class Meta:
|
||||
model = WappalyzerFingerprint
|
||||
fields = [
|
||||
'id', 'name', 'cats', 'cookies', 'headers', 'script_src',
|
||||
'js', 'implies', 'meta', 'html', 'description', 'website',
|
||||
'cpe', 'created_at'
|
||||
]
|
||||
read_only_fields = ['id', 'created_at']
|
||||
|
||||
def validate_name(self, value):
|
||||
"""校验 name 字段"""
|
||||
if not value or not value.strip():
|
||||
raise serializers.ValidationError("name 字段不能为空")
|
||||
return value.strip()
|
||||
16
backend/apps/engine/services/fingerprints/__init__.py
Normal file
16
backend/apps/engine/services/fingerprints/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""指纹管理 Services
|
||||
|
||||
导出所有指纹相关的 Service 类
|
||||
"""
|
||||
|
||||
from .base import BaseFingerprintService
|
||||
from .ehole import EholeFingerprintService
|
||||
from .goby import GobyFingerprintService
|
||||
from .wappalyzer import WappalyzerFingerprintService
|
||||
|
||||
__all__ = [
|
||||
"BaseFingerprintService",
|
||||
"EholeFingerprintService",
|
||||
"GobyFingerprintService",
|
||||
"WappalyzerFingerprintService",
|
||||
]
|
||||
144
backend/apps/engine/services/fingerprints/base.py
Normal file
144
backend/apps/engine/services/fingerprints/base.py
Normal file
@@ -0,0 +1,144 @@
|
||||
"""指纹管理基类 Service
|
||||
|
||||
提供通用的批量操作和缓存逻辑,供 EHole/Goby/Wappalyzer 等子类继承
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseFingerprintService:
|
||||
"""指纹管理基类 Service,提供通用的批量操作和缓存逻辑"""
|
||||
|
||||
model = None # 子类必须指定
|
||||
BATCH_SIZE = 1000 # 每批处理数量
|
||||
|
||||
def validate_fingerprint(self, item: dict) -> bool:
|
||||
"""
|
||||
校验单条指纹,子类必须实现
|
||||
|
||||
Args:
|
||||
item: 单条指纹数据
|
||||
|
||||
Returns:
|
||||
bool: 是否有效
|
||||
"""
|
||||
raise NotImplementedError("子类必须实现 validate_fingerprint 方法")
|
||||
|
||||
def validate_fingerprints(self, raw_data: list) -> tuple[list, list]:
|
||||
"""
|
||||
批量校验指纹数据
|
||||
|
||||
Args:
|
||||
raw_data: 原始指纹数据列表
|
||||
|
||||
Returns:
|
||||
tuple: (valid_items, invalid_items)
|
||||
"""
|
||||
valid, invalid = [], []
|
||||
for item in raw_data:
|
||||
if self.validate_fingerprint(item):
|
||||
valid.append(item)
|
||||
else:
|
||||
invalid.append(item)
|
||||
return valid, invalid
|
||||
|
||||
def to_model_data(self, item: dict) -> dict:
|
||||
"""
|
||||
转换为 Model 字段,子类必须实现
|
||||
|
||||
Args:
|
||||
item: 原始指纹数据
|
||||
|
||||
Returns:
|
||||
dict: Model 字段数据
|
||||
"""
|
||||
raise NotImplementedError("子类必须实现 to_model_data 方法")
|
||||
|
||||
def bulk_create(self, fingerprints: list) -> int:
|
||||
"""
|
||||
批量创建指纹记录(已校验的数据)
|
||||
|
||||
Args:
|
||||
fingerprints: 已校验的指纹数据列表
|
||||
|
||||
Returns:
|
||||
int: 成功创建数量
|
||||
"""
|
||||
if not fingerprints:
|
||||
return 0
|
||||
|
||||
objects = [self.model(**self.to_model_data(item)) for item in fingerprints]
|
||||
created = self.model.objects.bulk_create(objects, ignore_conflicts=True)
|
||||
return len(created)
|
||||
|
||||
def batch_create_fingerprints(self, raw_data: list) -> dict:
|
||||
"""
|
||||
完整流程:分批校验 + 批量创建
|
||||
|
||||
Args:
|
||||
raw_data: 原始指纹数据列表
|
||||
|
||||
Returns:
|
||||
dict: {'created': int, 'failed': int}
|
||||
"""
|
||||
total_created = 0
|
||||
total_failed = 0
|
||||
|
||||
for i in range(0, len(raw_data), self.BATCH_SIZE):
|
||||
batch = raw_data[i:i + self.BATCH_SIZE]
|
||||
valid, invalid = self.validate_fingerprints(batch)
|
||||
total_created += self.bulk_create(valid)
|
||||
total_failed += len(invalid)
|
||||
|
||||
logger.info(
|
||||
"批量创建指纹完成: created=%d, failed=%d, total=%d",
|
||||
total_created, total_failed, len(raw_data)
|
||||
)
|
||||
return {'created': total_created, 'failed': total_failed}
|
||||
|
||||
def get_export_data(self) -> dict:
|
||||
"""
|
||||
获取导出数据,子类必须实现
|
||||
|
||||
Returns:
|
||||
dict: 导出的 JSON 数据
|
||||
"""
|
||||
raise NotImplementedError("子类必须实现 get_export_data 方法")
|
||||
|
||||
def export_to_file(self, output_path: str) -> int:
|
||||
"""
|
||||
导出所有指纹到 JSON 文件
|
||||
|
||||
Args:
|
||||
output_path: 输出文件路径
|
||||
|
||||
Returns:
|
||||
int: 导出的指纹数量
|
||||
"""
|
||||
data = self.get_export_data()
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, ensure_ascii=False)
|
||||
count = len(data.get('fingerprint', []))
|
||||
logger.info("导出指纹文件: %s, 数量: %d", output_path, count)
|
||||
return count
|
||||
|
||||
def get_fingerprint_version(self) -> str:
|
||||
"""
|
||||
获取指纹库版本标识(用于缓存校验)
|
||||
|
||||
Returns:
|
||||
str: 版本标识,格式 "{count}_{latest_timestamp}"
|
||||
|
||||
版本变化场景:
|
||||
- 新增记录 → count 变化
|
||||
- 删除记录 → count 变化
|
||||
- 清空全部 → count 变为 0
|
||||
"""
|
||||
count = self.model.objects.count()
|
||||
latest = self.model.objects.order_by('-created_at').first()
|
||||
latest_ts = int(latest.created_at.timestamp()) if latest else 0
|
||||
return f"{count}_{latest_ts}"
|
||||
84
backend/apps/engine/services/fingerprints/ehole.py
Normal file
84
backend/apps/engine/services/fingerprints/ehole.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""EHole 指纹管理 Service
|
||||
|
||||
实现 EHole 格式指纹的校验、转换和导出逻辑
|
||||
"""
|
||||
|
||||
from apps.engine.models import EholeFingerprint
|
||||
from .base import BaseFingerprintService
|
||||
|
||||
|
||||
class EholeFingerprintService(BaseFingerprintService):
|
||||
"""EHole 指纹管理服务(继承基类,实现 EHole 特定逻辑)"""
|
||||
|
||||
model = EholeFingerprint
|
||||
|
||||
def validate_fingerprint(self, item: dict) -> bool:
|
||||
"""
|
||||
校验单条 EHole 指纹
|
||||
|
||||
校验规则:
|
||||
- cms 字段必须存在且非空
|
||||
- keyword 字段必须是数组
|
||||
|
||||
Args:
|
||||
item: 单条指纹数据
|
||||
|
||||
Returns:
|
||||
bool: 是否有效
|
||||
"""
|
||||
cms = item.get('cms', '')
|
||||
keyword = item.get('keyword')
|
||||
return bool(cms and str(cms).strip()) and isinstance(keyword, list)
|
||||
|
||||
def to_model_data(self, item: dict) -> dict:
|
||||
"""
|
||||
转换 EHole JSON 格式为 Model 字段
|
||||
|
||||
字段映射:
|
||||
- isImportant (JSON) → is_important (Model)
|
||||
|
||||
Args:
|
||||
item: 原始 EHole JSON 数据
|
||||
|
||||
Returns:
|
||||
dict: Model 字段数据
|
||||
"""
|
||||
return {
|
||||
'cms': str(item.get('cms', '')).strip(),
|
||||
'method': item.get('method', 'keyword'),
|
||||
'location': item.get('location', 'body'),
|
||||
'keyword': item.get('keyword', []),
|
||||
'is_important': item.get('isImportant', False),
|
||||
'type': item.get('type', '-'),
|
||||
}
|
||||
|
||||
def get_export_data(self) -> dict:
|
||||
"""
|
||||
获取导出数据(EHole JSON 格式)
|
||||
|
||||
Returns:
|
||||
dict: EHole 格式的 JSON 数据
|
||||
{
|
||||
"fingerprint": [
|
||||
{"cms": "...", "method": "...", "location": "...",
|
||||
"keyword": [...], "isImportant": false, "type": "..."},
|
||||
...
|
||||
],
|
||||
"version": "1000_1703836800"
|
||||
}
|
||||
"""
|
||||
fingerprints = self.model.objects.all()
|
||||
data = []
|
||||
for fp in fingerprints:
|
||||
data.append({
|
||||
'cms': fp.cms,
|
||||
'method': fp.method,
|
||||
'location': fp.location,
|
||||
'keyword': fp.keyword,
|
||||
'isImportant': fp.is_important, # 转回 JSON 格式
|
||||
'type': fp.type,
|
||||
})
|
||||
return {
|
||||
'fingerprint': data,
|
||||
'version': self.get_fingerprint_version(),
|
||||
}
|
||||
70
backend/apps/engine/services/fingerprints/goby.py
Normal file
70
backend/apps/engine/services/fingerprints/goby.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""Goby 指纹管理 Service
|
||||
|
||||
实现 Goby 格式指纹的校验、转换和导出逻辑
|
||||
"""
|
||||
|
||||
from apps.engine.models import GobyFingerprint
|
||||
from .base import BaseFingerprintService
|
||||
|
||||
|
||||
class GobyFingerprintService(BaseFingerprintService):
|
||||
"""Goby 指纹管理服务(继承基类,实现 Goby 特定逻辑)"""
|
||||
|
||||
model = GobyFingerprint
|
||||
|
||||
def validate_fingerprint(self, item: dict) -> bool:
|
||||
"""
|
||||
校验单条 Goby 指纹
|
||||
|
||||
校验规则:
|
||||
- name 字段必须存在且非空
|
||||
- logic 字段必须存在
|
||||
- rule 字段必须是数组
|
||||
|
||||
Args:
|
||||
item: 单条指纹数据
|
||||
|
||||
Returns:
|
||||
bool: 是否有效
|
||||
"""
|
||||
name = item.get('name', '')
|
||||
logic = item.get('logic', '')
|
||||
rule = item.get('rule')
|
||||
return bool(name and str(name).strip()) and bool(logic) and isinstance(rule, list)
|
||||
|
||||
def to_model_data(self, item: dict) -> dict:
|
||||
"""
|
||||
转换 Goby JSON 格式为 Model 字段
|
||||
|
||||
Args:
|
||||
item: 原始 Goby JSON 数据
|
||||
|
||||
Returns:
|
||||
dict: Model 字段数据
|
||||
"""
|
||||
return {
|
||||
'name': str(item.get('name', '')).strip(),
|
||||
'logic': item.get('logic', ''),
|
||||
'rule': item.get('rule', []),
|
||||
}
|
||||
|
||||
def get_export_data(self) -> list:
|
||||
"""
|
||||
获取导出数据(Goby JSON 格式 - 数组)
|
||||
|
||||
Returns:
|
||||
list: Goby 格式的 JSON 数据(数组格式)
|
||||
[
|
||||
{"name": "...", "logic": "...", "rule": [...]},
|
||||
...
|
||||
]
|
||||
"""
|
||||
fingerprints = self.model.objects.all()
|
||||
return [
|
||||
{
|
||||
'name': fp.name,
|
||||
'logic': fp.logic,
|
||||
'rule': fp.rule,
|
||||
}
|
||||
for fp in fingerprints
|
||||
]
|
||||
99
backend/apps/engine/services/fingerprints/wappalyzer.py
Normal file
99
backend/apps/engine/services/fingerprints/wappalyzer.py
Normal file
@@ -0,0 +1,99 @@
|
||||
"""Wappalyzer 指纹管理 Service
|
||||
|
||||
实现 Wappalyzer 格式指纹的校验、转换和导出逻辑
|
||||
"""
|
||||
|
||||
from apps.engine.models import WappalyzerFingerprint
|
||||
from .base import BaseFingerprintService
|
||||
|
||||
|
||||
class WappalyzerFingerprintService(BaseFingerprintService):
|
||||
"""Wappalyzer 指纹管理服务(继承基类,实现 Wappalyzer 特定逻辑)"""
|
||||
|
||||
model = WappalyzerFingerprint
|
||||
|
||||
def validate_fingerprint(self, item: dict) -> bool:
|
||||
"""
|
||||
校验单条 Wappalyzer 指纹
|
||||
|
||||
校验规则:
|
||||
- name 字段必须存在且非空(从 apps 对象的 key 传入)
|
||||
|
||||
Args:
|
||||
item: 单条指纹数据
|
||||
|
||||
Returns:
|
||||
bool: 是否有效
|
||||
"""
|
||||
name = item.get('name', '')
|
||||
return bool(name and str(name).strip())
|
||||
|
||||
def to_model_data(self, item: dict) -> dict:
|
||||
"""
|
||||
转换 Wappalyzer JSON 格式为 Model 字段
|
||||
|
||||
字段映射:
|
||||
- scriptSrc (JSON) → script_src (Model)
|
||||
|
||||
Args:
|
||||
item: 原始 Wappalyzer JSON 数据
|
||||
|
||||
Returns:
|
||||
dict: Model 字段数据
|
||||
"""
|
||||
return {
|
||||
'name': str(item.get('name', '')).strip(),
|
||||
'cats': item.get('cats', []),
|
||||
'cookies': item.get('cookies', {}),
|
||||
'headers': item.get('headers', {}),
|
||||
'script_src': item.get('scriptSrc', []), # JSON: scriptSrc -> Model: script_src
|
||||
'js': item.get('js', []),
|
||||
'implies': item.get('implies', []),
|
||||
'meta': item.get('meta', {}),
|
||||
'html': item.get('html', []),
|
||||
'description': item.get('description', ''),
|
||||
'website': item.get('website', ''),
|
||||
'cpe': item.get('cpe', ''),
|
||||
}
|
||||
|
||||
def get_export_data(self) -> dict:
|
||||
"""
|
||||
获取导出数据(Wappalyzer JSON 格式)
|
||||
|
||||
Returns:
|
||||
dict: Wappalyzer 格式的 JSON 数据
|
||||
{
|
||||
"apps": {
|
||||
"AppName": {"cats": [...], "cookies": {...}, ...},
|
||||
...
|
||||
}
|
||||
}
|
||||
"""
|
||||
fingerprints = self.model.objects.all()
|
||||
apps = {}
|
||||
for fp in fingerprints:
|
||||
app_data = {}
|
||||
if fp.cats:
|
||||
app_data['cats'] = fp.cats
|
||||
if fp.cookies:
|
||||
app_data['cookies'] = fp.cookies
|
||||
if fp.headers:
|
||||
app_data['headers'] = fp.headers
|
||||
if fp.script_src:
|
||||
app_data['scriptSrc'] = fp.script_src # Model: script_src -> JSON: scriptSrc
|
||||
if fp.js:
|
||||
app_data['js'] = fp.js
|
||||
if fp.implies:
|
||||
app_data['implies'] = fp.implies
|
||||
if fp.meta:
|
||||
app_data['meta'] = fp.meta
|
||||
if fp.html:
|
||||
app_data['html'] = fp.html
|
||||
if fp.description:
|
||||
app_data['description'] = fp.description
|
||||
if fp.website:
|
||||
app_data['website'] = fp.website
|
||||
if fp.cpe:
|
||||
app_data['cpe'] = fp.cpe
|
||||
apps[fp.name] = app_data
|
||||
return {'apps': apps}
|
||||
@@ -186,6 +186,7 @@ class NucleiTemplateRepoService:
|
||||
RuntimeError: Git 命令执行失败
|
||||
"""
|
||||
import subprocess
|
||||
from apps.common.utils.git_proxy import get_git_proxy_url
|
||||
|
||||
obj = self._get_repo_obj(repo_id)
|
||||
|
||||
@@ -196,9 +197,14 @@ class NucleiTemplateRepoService:
|
||||
cmd: List[str]
|
||||
action: str
|
||||
|
||||
# 获取代理后的 URL(如果启用了 Git 加速)
|
||||
proxied_url = get_git_proxy_url(obj.repo_url)
|
||||
if proxied_url != obj.repo_url:
|
||||
logger.info("使用 Git 加速: %s -> %s", obj.repo_url, proxied_url)
|
||||
|
||||
# 判断是 clone 还是 pull
|
||||
if git_dir.is_dir():
|
||||
# 检查远程地址是否变化
|
||||
# 检查远程地址是否变化(比较原始 URL,不是代理 URL)
|
||||
current_remote = subprocess.run(
|
||||
["git", "-C", str(local_path), "remote", "get-url", "origin"],
|
||||
check=False,
|
||||
@@ -208,12 +214,13 @@ class NucleiTemplateRepoService:
|
||||
)
|
||||
current_url = current_remote.stdout.strip() if current_remote.returncode == 0 else ""
|
||||
|
||||
if current_url != obj.repo_url:
|
||||
# 检查是否需要重新 clone(原始 URL 或代理 URL 变化都需要)
|
||||
if current_url not in [obj.repo_url, proxied_url]:
|
||||
# 远程地址变化,删除旧目录重新 clone
|
||||
logger.info("nuclei 模板仓库 %s 远程地址变化,重新 clone: %s -> %s", obj.id, current_url, obj.repo_url)
|
||||
shutil.rmtree(local_path)
|
||||
local_path.mkdir(parents=True, exist_ok=True)
|
||||
cmd = ["git", "clone", "--depth", "1", obj.repo_url, str(local_path)]
|
||||
cmd = ["git", "clone", "--depth", "1", proxied_url, str(local_path)]
|
||||
action = "clone"
|
||||
else:
|
||||
# 已有仓库且地址未变,执行 pull
|
||||
@@ -224,7 +231,7 @@ class NucleiTemplateRepoService:
|
||||
if local_path.exists() and not local_path.is_dir():
|
||||
raise RuntimeError(f"本地路径已存在且不是目录: {local_path}")
|
||||
# --depth 1 浅克隆,只获取最新提交,节省空间和时间
|
||||
cmd = ["git", "clone", "--depth", "1", obj.repo_url, str(local_path)]
|
||||
cmd = ["git", "clone", "--depth", "1", proxied_url, str(local_path)]
|
||||
action = "clone"
|
||||
|
||||
# 执行 Git 命令
|
||||
|
||||
@@ -76,8 +76,8 @@ class TaskDistributor:
|
||||
self.docker_image = settings.TASK_EXECUTOR_IMAGE
|
||||
if not self.docker_image:
|
||||
raise ValueError("TASK_EXECUTOR_IMAGE 未配置,请确保 IMAGE_TAG 环境变量已设置")
|
||||
self.results_mount = getattr(settings, 'CONTAINER_RESULTS_MOUNT', '/app/backend/results')
|
||||
self.logs_mount = getattr(settings, 'CONTAINER_LOGS_MOUNT', '/app/backend/logs')
|
||||
# 统一使用 /opt/xingrin 下的路径
|
||||
self.logs_mount = "/opt/xingrin/logs"
|
||||
self.submit_interval = getattr(settings, 'TASK_SUBMIT_INTERVAL', 5)
|
||||
|
||||
def get_online_workers(self) -> list[WorkerNode]:
|
||||
@@ -153,30 +153,68 @@ class TaskDistributor:
|
||||
else:
|
||||
scored_workers.append((worker, score, cpu, mem))
|
||||
|
||||
# 降级策略:如果没有正常负载的,等待后重新选择
|
||||
# 降级策略:如果没有正常负载的,循环等待后重新检测
|
||||
if not scored_workers:
|
||||
if high_load_workers:
|
||||
# 高负载时先等待,给系统喘息时间(默认 60 秒)
|
||||
# 高负载等待参数(默认每 60 秒检测一次,最多 10 次)
|
||||
high_load_wait = getattr(settings, 'HIGH_LOAD_WAIT_SECONDS', 60)
|
||||
logger.warning("所有 Worker 高负载,等待 %d 秒后重试...", high_load_wait)
|
||||
time.sleep(high_load_wait)
|
||||
high_load_max_retries = getattr(settings, 'HIGH_LOAD_MAX_RETRIES', 10)
|
||||
|
||||
# 重新选择(递归调用,可能负载已降下来)
|
||||
# 为避免无限递归,这里直接使用高负载中最低的
|
||||
# 开始等待前发送高负载通知
|
||||
high_load_workers.sort(key=lambda x: x[1])
|
||||
best_worker, _, cpu, mem = high_load_workers[0]
|
||||
|
||||
# 发送高负载通知
|
||||
_, _, first_cpu, first_mem = high_load_workers[0]
|
||||
from apps.common.signals import all_workers_high_load
|
||||
all_workers_high_load.send(
|
||||
sender=self.__class__,
|
||||
worker_name=best_worker.name,
|
||||
cpu=cpu,
|
||||
mem=mem
|
||||
worker_name="所有节点",
|
||||
cpu=first_cpu,
|
||||
mem=first_mem
|
||||
)
|
||||
|
||||
logger.info("选择 Worker: %s (CPU: %.1f%%, MEM: %.1f%%)", best_worker.name, cpu, mem)
|
||||
return best_worker
|
||||
for retry in range(high_load_max_retries):
|
||||
logger.warning(
|
||||
"所有 Worker 高负载,等待 %d 秒后重试... (%d/%d)",
|
||||
high_load_wait, retry + 1, high_load_max_retries
|
||||
)
|
||||
time.sleep(high_load_wait)
|
||||
|
||||
# 重新获取负载数据
|
||||
loads = worker_load_service.get_all_loads(worker_ids)
|
||||
|
||||
# 重新评估
|
||||
scored_workers = []
|
||||
high_load_workers = []
|
||||
|
||||
for worker in workers:
|
||||
load = loads.get(worker.id)
|
||||
if not load:
|
||||
continue
|
||||
|
||||
cpu = load.get('cpu', 0)
|
||||
mem = load.get('mem', 0)
|
||||
score = cpu * 0.7 + mem * 0.3
|
||||
|
||||
if cpu > 85 or mem > 85:
|
||||
high_load_workers.append((worker, score, cpu, mem))
|
||||
else:
|
||||
scored_workers.append((worker, score, cpu, mem))
|
||||
|
||||
# 如果有正常负载的 Worker,跳出循环
|
||||
if scored_workers:
|
||||
logger.info("检测到正常负载 Worker,结束等待")
|
||||
break
|
||||
|
||||
# 超时或仍然高负载,选择负载最低的
|
||||
if not scored_workers and high_load_workers:
|
||||
high_load_workers.sort(key=lambda x: x[1])
|
||||
best_worker, _, cpu, mem = high_load_workers[0]
|
||||
|
||||
logger.warning(
|
||||
"等待超时,强制分发到高负载 Worker: %s (CPU: %.1f%%, MEM: %.1f%%)",
|
||||
best_worker.name, cpu, mem
|
||||
)
|
||||
return best_worker
|
||||
return best_worker
|
||||
else:
|
||||
logger.warning("没有可用的 Worker")
|
||||
return None
|
||||
@@ -234,11 +272,10 @@ class TaskDistributor:
|
||||
else:
|
||||
# 远程:通过 Nginx 反向代理访问(HTTPS,不直连 8888 端口)
|
||||
network_arg = ""
|
||||
server_url = f"https://{settings.PUBLIC_HOST}"
|
||||
server_url = f"https://{settings.PUBLIC_HOST}:{settings.PUBLIC_PORT}"
|
||||
|
||||
# 挂载路径(所有节点统一使用固定路径)
|
||||
host_results_dir = settings.HOST_RESULTS_DIR # /opt/xingrin/results
|
||||
host_logs_dir = settings.HOST_LOGS_DIR # /opt/xingrin/logs
|
||||
# 挂载路径(统一挂载 /opt/xingrin)
|
||||
host_xingrin_dir = "/opt/xingrin"
|
||||
|
||||
# 环境变量:SERVER_URL + IS_LOCAL,其他配置容器启动时从配置中心获取
|
||||
# IS_LOCAL 用于 Worker 向配置中心声明身份,决定返回的数据库地址
|
||||
@@ -251,15 +288,12 @@ class TaskDistributor:
|
||||
"-e PREFECT_SERVER_EPHEMERAL_ENABLED=true", # 启用 ephemeral server(本地临时服务器)
|
||||
"-e PREFECT_SERVER_EPHEMERAL_STARTUP_TIMEOUT_SECONDS=120", # 增加启动超时时间
|
||||
"-e PREFECT_SERVER_DATABASE_CONNECTION_URL=sqlite+aiosqlite:////tmp/.prefect/prefect.db", # 使用 /tmp 下的 SQLite
|
||||
"-e PREFECT_LOGGING_LEVEL=DEBUG", # 启用 DEBUG 级别日志
|
||||
"-e PREFECT_LOGGING_SERVER_LEVEL=DEBUG", # Server 日志级别
|
||||
"-e PREFECT_DEBUG_MODE=true", # 启用调试模式
|
||||
"-e PREFECT_LOGGING_LEVEL=WARNING", # 日志级别(减少 DEBUG 噪音)
|
||||
]
|
||||
|
||||
# 挂载卷
|
||||
# 挂载卷(统一挂载整个 /opt/xingrin 目录)
|
||||
volumes = [
|
||||
f"-v {host_results_dir}:{self.results_mount}",
|
||||
f"-v {host_logs_dir}:{self.logs_mount}",
|
||||
f"-v {host_xingrin_dir}:{host_xingrin_dir}",
|
||||
]
|
||||
|
||||
# 构建命令行参数
|
||||
@@ -520,7 +554,7 @@ class TaskDistributor:
|
||||
try:
|
||||
# 构建 docker run 命令(清理过期扫描结果目录)
|
||||
script_args = {
|
||||
'results_dir': '/app/backend/results',
|
||||
'results_dir': '/opt/xingrin/results',
|
||||
'retention_days': retention_days,
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,11 @@ from .views import (
|
||||
WordlistViewSet,
|
||||
NucleiTemplateRepoViewSet,
|
||||
)
|
||||
from .views.fingerprints import (
|
||||
EholeFingerprintViewSet,
|
||||
GobyFingerprintViewSet,
|
||||
WappalyzerFingerprintViewSet,
|
||||
)
|
||||
|
||||
|
||||
# 创建路由器
|
||||
@@ -15,6 +20,10 @@ router.register(r"engines", ScanEngineViewSet, basename="engine")
|
||||
router.register(r"workers", WorkerNodeViewSet, basename="worker")
|
||||
router.register(r"wordlists", WordlistViewSet, basename="wordlist")
|
||||
router.register(r"nuclei/repos", NucleiTemplateRepoViewSet, basename="nuclei-repos")
|
||||
# 指纹管理
|
||||
router.register(r"fingerprints/ehole", EholeFingerprintViewSet, basename="ehole-fingerprint")
|
||||
router.register(r"fingerprints/goby", GobyFingerprintViewSet, basename="goby-fingerprint")
|
||||
router.register(r"fingerprints/wappalyzer", WappalyzerFingerprintViewSet, basename="wappalyzer-fingerprint")
|
||||
|
||||
urlpatterns = [
|
||||
path("", include(router.urls)),
|
||||
|
||||
16
backend/apps/engine/views/fingerprints/__init__.py
Normal file
16
backend/apps/engine/views/fingerprints/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""指纹管理 ViewSets
|
||||
|
||||
导出所有指纹相关的 ViewSet 类
|
||||
"""
|
||||
|
||||
from .base import BaseFingerprintViewSet
|
||||
from .ehole import EholeFingerprintViewSet
|
||||
from .goby import GobyFingerprintViewSet
|
||||
from .wappalyzer import WappalyzerFingerprintViewSet
|
||||
|
||||
__all__ = [
|
||||
"BaseFingerprintViewSet",
|
||||
"EholeFingerprintViewSet",
|
||||
"GobyFingerprintViewSet",
|
||||
"WappalyzerFingerprintViewSet",
|
||||
]
|
||||
202
backend/apps/engine/views/fingerprints/base.py
Normal file
202
backend/apps/engine/views/fingerprints/base.py
Normal file
@@ -0,0 +1,202 @@
|
||||
"""指纹管理基类 ViewSet
|
||||
|
||||
提供通用的 CRUD 和批量操作,供 EHole/Goby/Wappalyzer 等子类继承
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from django.http import HttpResponse
|
||||
from rest_framework import viewsets, status, filters
|
||||
from rest_framework.decorators import action
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.exceptions import ValidationError
|
||||
|
||||
from apps.common.pagination import BasePagination
|
||||
from apps.common.utils.filter_utils import apply_filters
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseFingerprintViewSet(viewsets.ModelViewSet):
|
||||
"""指纹管理基类 ViewSet,供 EHole/Goby/Wappalyzer 等子类继承
|
||||
|
||||
提供的 API:
|
||||
|
||||
标准 CRUD(继承自 ModelViewSet):
|
||||
- GET / 列表查询(分页 + 智能过滤)
|
||||
- POST / 创建单条
|
||||
- GET /{id}/ 获取详情
|
||||
- PUT /{id}/ 更新
|
||||
- DELETE /{id}/ 删除
|
||||
|
||||
批量操作(本类实现):
|
||||
- POST /batch_create/ 批量创建(JSON body)
|
||||
- POST /import_file/ 文件导入(multipart/form-data,适合 10MB+ 大文件)
|
||||
- POST /bulk-delete/ 批量删除
|
||||
- POST /delete-all/ 删除所有
|
||||
- GET /export/ 导出下载
|
||||
|
||||
智能过滤语法(filter 参数):
|
||||
- field="value" 模糊匹配(包含)
|
||||
- field=="value" 精确匹配
|
||||
- 多条件空格分隔 AND 关系
|
||||
- || 或 or OR 关系
|
||||
|
||||
子类必须实现:
|
||||
- service_class Service 类
|
||||
- parse_import_data 解析导入数据格式
|
||||
- get_export_filename 导出文件名
|
||||
"""
|
||||
|
||||
pagination_class = BasePagination
|
||||
filter_backends = [filters.OrderingFilter]
|
||||
ordering = ['-created_at']
|
||||
|
||||
# 子类必须指定
|
||||
service_class = None # Service 类
|
||||
|
||||
# 智能过滤字段映射,子类必须覆盖
|
||||
FILTER_FIELD_MAPPING = {}
|
||||
|
||||
# JSON 数组字段列表(使用 __contains 查询),子类可覆盖
|
||||
JSON_ARRAY_FIELDS = []
|
||||
|
||||
def get_queryset(self):
|
||||
"""支持智能过滤语法"""
|
||||
queryset = super().get_queryset()
|
||||
filter_query = self.request.query_params.get('filter', None)
|
||||
if filter_query:
|
||||
queryset = apply_filters(
|
||||
queryset,
|
||||
filter_query,
|
||||
self.FILTER_FIELD_MAPPING,
|
||||
json_array_fields=getattr(self, 'JSON_ARRAY_FIELDS', [])
|
||||
)
|
||||
return queryset
|
||||
|
||||
def get_service(self):
|
||||
"""获取 Service 实例"""
|
||||
if self.service_class is None:
|
||||
raise NotImplementedError("子类必须指定 service_class")
|
||||
return self.service_class()
|
||||
|
||||
def parse_import_data(self, json_data: dict) -> list:
|
||||
"""
|
||||
解析导入数据,子类必须实现
|
||||
|
||||
Args:
|
||||
json_data: 解析后的 JSON 数据
|
||||
|
||||
Returns:
|
||||
list: 指纹数据列表
|
||||
"""
|
||||
raise NotImplementedError("子类必须实现 parse_import_data 方法")
|
||||
|
||||
def get_export_filename(self) -> str:
|
||||
"""
|
||||
导出文件名,子类必须实现
|
||||
|
||||
Returns:
|
||||
str: 文件名
|
||||
"""
|
||||
raise NotImplementedError("子类必须实现 get_export_filename 方法")
|
||||
|
||||
@action(detail=False, methods=['post'])
|
||||
def batch_create(self, request):
|
||||
"""
|
||||
批量创建指纹规则
|
||||
POST /api/engine/fingerprints/{type}/batch_create/
|
||||
|
||||
请求格式:
|
||||
{
|
||||
"fingerprints": [
|
||||
{"cms": "WordPress", "method": "keyword", ...},
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
返回:
|
||||
{
|
||||
"created": 2,
|
||||
"failed": 0
|
||||
}
|
||||
"""
|
||||
fingerprints = request.data.get('fingerprints', [])
|
||||
if not fingerprints:
|
||||
raise ValidationError('fingerprints 不能为空')
|
||||
if not isinstance(fingerprints, list):
|
||||
raise ValidationError('fingerprints 必须是数组')
|
||||
|
||||
result = self.get_service().batch_create_fingerprints(fingerprints)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
|
||||
@action(detail=False, methods=['post'])
|
||||
def import_file(self, request):
|
||||
"""
|
||||
文件导入(适合大文件,10MB+)
|
||||
POST /api/engine/fingerprints/{type}/import_file/
|
||||
|
||||
请求格式:multipart/form-data
|
||||
- file: JSON 文件
|
||||
|
||||
返回:同 batch_create
|
||||
"""
|
||||
file = request.FILES.get('file')
|
||||
if not file:
|
||||
raise ValidationError('缺少文件')
|
||||
|
||||
try:
|
||||
json_data = json.load(file)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValidationError(f'无效的 JSON 格式: {e}')
|
||||
|
||||
fingerprints = self.parse_import_data(json_data)
|
||||
if not fingerprints:
|
||||
raise ValidationError('文件中没有有效的指纹数据')
|
||||
|
||||
result = self.get_service().batch_create_fingerprints(fingerprints)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
|
||||
@action(detail=False, methods=['post'], url_path='bulk-delete')
|
||||
def bulk_delete(self, request):
|
||||
"""
|
||||
批量删除
|
||||
POST /api/engine/fingerprints/{type}/bulk-delete/
|
||||
|
||||
请求格式:{"ids": [1, 2, 3]}
|
||||
返回:{"deleted": 3}
|
||||
"""
|
||||
ids = request.data.get('ids', [])
|
||||
if not ids:
|
||||
raise ValidationError('ids 不能为空')
|
||||
if not isinstance(ids, list):
|
||||
raise ValidationError('ids 必须是数组')
|
||||
|
||||
deleted_count = self.queryset.model.objects.filter(id__in=ids).delete()[0]
|
||||
return Response({'deleted': deleted_count})
|
||||
|
||||
@action(detail=False, methods=['post'], url_path='delete-all')
|
||||
def delete_all(self, request):
|
||||
"""
|
||||
删除所有指纹
|
||||
POST /api/engine/fingerprints/{type}/delete-all/
|
||||
|
||||
返回:{"deleted": 1000}
|
||||
"""
|
||||
deleted_count = self.queryset.model.objects.all().delete()[0]
|
||||
return Response({'deleted': deleted_count})
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def export(self, request):
|
||||
"""
|
||||
导出指纹(前端下载)
|
||||
GET /api/engine/fingerprints/{type}/export/
|
||||
|
||||
返回:JSON 文件下载
|
||||
"""
|
||||
data = self.get_service().get_export_data()
|
||||
content = json.dumps(data, ensure_ascii=False, indent=2)
|
||||
response = HttpResponse(content, content_type='application/json')
|
||||
response['Content-Disposition'] = f'attachment; filename="{self.get_export_filename()}"'
|
||||
return response
|
||||
67
backend/apps/engine/views/fingerprints/ehole.py
Normal file
67
backend/apps/engine/views/fingerprints/ehole.py
Normal file
@@ -0,0 +1,67 @@
|
||||
"""EHole 指纹管理 ViewSet"""
|
||||
|
||||
from apps.common.pagination import BasePagination
|
||||
from apps.engine.models import EholeFingerprint
|
||||
from apps.engine.serializers.fingerprints import EholeFingerprintSerializer
|
||||
from apps.engine.services.fingerprints import EholeFingerprintService
|
||||
|
||||
from .base import BaseFingerprintViewSet
|
||||
|
||||
|
||||
class EholeFingerprintViewSet(BaseFingerprintViewSet):
|
||||
"""EHole 指纹管理 ViewSet
|
||||
|
||||
继承自 BaseFingerprintViewSet,提供以下 API:
|
||||
|
||||
标准 CRUD(ModelViewSet):
|
||||
- GET / 列表查询(分页)
|
||||
- POST / 创建单条
|
||||
- GET /{id}/ 获取详情
|
||||
- PUT /{id}/ 更新
|
||||
- DELETE /{id}/ 删除
|
||||
|
||||
批量操作(继承自基类):
|
||||
- POST /batch_create/ 批量创建(JSON body)
|
||||
- POST /import_file/ 文件导入(multipart/form-data)
|
||||
- POST /bulk-delete/ 批量删除
|
||||
- POST /delete-all/ 删除所有
|
||||
- GET /export/ 导出下载
|
||||
|
||||
智能过滤语法(filter 参数):
|
||||
- cms="word" 模糊匹配 cms 字段
|
||||
- cms=="WordPress" 精确匹配
|
||||
- type="CMS" 按类型筛选
|
||||
- method="keyword" 按匹配方式筛选
|
||||
- location="body" 按匹配位置筛选
|
||||
"""
|
||||
|
||||
queryset = EholeFingerprint.objects.all()
|
||||
serializer_class = EholeFingerprintSerializer
|
||||
pagination_class = BasePagination
|
||||
service_class = EholeFingerprintService
|
||||
|
||||
# 排序配置
|
||||
ordering_fields = ['created_at', 'cms']
|
||||
ordering = ['-created_at']
|
||||
|
||||
# EHole 过滤字段映射
|
||||
FILTER_FIELD_MAPPING = {
|
||||
'cms': 'cms',
|
||||
'method': 'method',
|
||||
'location': 'location',
|
||||
'type': 'type',
|
||||
'isImportant': 'is_important',
|
||||
}
|
||||
|
||||
def parse_import_data(self, json_data: dict) -> list:
|
||||
"""
|
||||
解析 EHole JSON 格式的导入数据
|
||||
|
||||
输入格式:{"fingerprint": [...]}
|
||||
返回:指纹列表
|
||||
"""
|
||||
return json_data.get('fingerprint', [])
|
||||
|
||||
def get_export_filename(self) -> str:
|
||||
"""导出文件名"""
|
||||
return 'ehole.json'
|
||||
65
backend/apps/engine/views/fingerprints/goby.py
Normal file
65
backend/apps/engine/views/fingerprints/goby.py
Normal file
@@ -0,0 +1,65 @@
|
||||
"""Goby 指纹管理 ViewSet"""
|
||||
|
||||
from apps.common.pagination import BasePagination
|
||||
from apps.engine.models import GobyFingerprint
|
||||
from apps.engine.serializers.fingerprints import GobyFingerprintSerializer
|
||||
from apps.engine.services.fingerprints import GobyFingerprintService
|
||||
|
||||
from .base import BaseFingerprintViewSet
|
||||
|
||||
|
||||
class GobyFingerprintViewSet(BaseFingerprintViewSet):
|
||||
"""Goby 指纹管理 ViewSet
|
||||
|
||||
继承自 BaseFingerprintViewSet,提供以下 API:
|
||||
|
||||
标准 CRUD(ModelViewSet):
|
||||
- GET / 列表查询(分页)
|
||||
- POST / 创建单条
|
||||
- GET /{id}/ 获取详情
|
||||
- PUT /{id}/ 更新
|
||||
- DELETE /{id}/ 删除
|
||||
|
||||
批量操作(继承自基类):
|
||||
- POST /batch_create/ 批量创建(JSON body)
|
||||
- POST /import_file/ 文件导入(multipart/form-data)
|
||||
- POST /bulk-delete/ 批量删除
|
||||
- POST /delete-all/ 删除所有
|
||||
- GET /export/ 导出下载
|
||||
|
||||
智能过滤语法(filter 参数):
|
||||
- name="word" 模糊匹配 name 字段
|
||||
- name=="ProductName" 精确匹配
|
||||
"""
|
||||
|
||||
queryset = GobyFingerprint.objects.all()
|
||||
serializer_class = GobyFingerprintSerializer
|
||||
pagination_class = BasePagination
|
||||
service_class = GobyFingerprintService
|
||||
|
||||
# 排序配置
|
||||
ordering_fields = ['created_at', 'name']
|
||||
ordering = ['-created_at']
|
||||
|
||||
# Goby 过滤字段映射
|
||||
FILTER_FIELD_MAPPING = {
|
||||
'name': 'name',
|
||||
'logic': 'logic',
|
||||
}
|
||||
|
||||
def parse_import_data(self, json_data) -> list:
|
||||
"""
|
||||
解析 Goby JSON 格式的导入数据
|
||||
|
||||
Goby 格式是数组格式:[{...}, {...}, ...]
|
||||
|
||||
输入格式:[{"name": "...", "logic": "...", "rule": [...]}, ...]
|
||||
返回:指纹列表
|
||||
"""
|
||||
if isinstance(json_data, list):
|
||||
return json_data
|
||||
return []
|
||||
|
||||
def get_export_filename(self) -> str:
|
||||
"""导出文件名"""
|
||||
return 'goby.json'
|
||||
75
backend/apps/engine/views/fingerprints/wappalyzer.py
Normal file
75
backend/apps/engine/views/fingerprints/wappalyzer.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""Wappalyzer 指纹管理 ViewSet"""
|
||||
|
||||
from apps.common.pagination import BasePagination
|
||||
from apps.engine.models import WappalyzerFingerprint
|
||||
from apps.engine.serializers.fingerprints import WappalyzerFingerprintSerializer
|
||||
from apps.engine.services.fingerprints import WappalyzerFingerprintService
|
||||
|
||||
from .base import BaseFingerprintViewSet
|
||||
|
||||
|
||||
class WappalyzerFingerprintViewSet(BaseFingerprintViewSet):
|
||||
"""Wappalyzer 指纹管理 ViewSet
|
||||
|
||||
继承自 BaseFingerprintViewSet,提供以下 API:
|
||||
|
||||
标准 CRUD(ModelViewSet):
|
||||
- GET / 列表查询(分页)
|
||||
- POST / 创建单条
|
||||
- GET /{id}/ 获取详情
|
||||
- PUT /{id}/ 更新
|
||||
- DELETE /{id}/ 删除
|
||||
|
||||
批量操作(继承自基类):
|
||||
- POST /batch_create/ 批量创建(JSON body)
|
||||
- POST /import_file/ 文件导入(multipart/form-data)
|
||||
- POST /bulk-delete/ 批量删除
|
||||
- POST /delete-all/ 删除所有
|
||||
- GET /export/ 导出下载
|
||||
|
||||
智能过滤语法(filter 参数):
|
||||
- name="word" 模糊匹配 name 字段
|
||||
- name=="AppName" 精确匹配
|
||||
"""
|
||||
|
||||
queryset = WappalyzerFingerprint.objects.all()
|
||||
serializer_class = WappalyzerFingerprintSerializer
|
||||
pagination_class = BasePagination
|
||||
service_class = WappalyzerFingerprintService
|
||||
|
||||
# 排序配置
|
||||
ordering_fields = ['created_at', 'name']
|
||||
ordering = ['-created_at']
|
||||
|
||||
# Wappalyzer 过滤字段映射
|
||||
# 注意:implies 是 JSON 数组字段,使用 __contains 查询
|
||||
FILTER_FIELD_MAPPING = {
|
||||
'name': 'name',
|
||||
'description': 'description',
|
||||
'website': 'website',
|
||||
'cpe': 'cpe',
|
||||
'implies': 'implies', # JSON 数组字段
|
||||
}
|
||||
|
||||
# JSON 数组字段列表(使用 __contains 查询)
|
||||
JSON_ARRAY_FIELDS = ['implies']
|
||||
|
||||
def parse_import_data(self, json_data: dict) -> list:
|
||||
"""
|
||||
解析 Wappalyzer JSON 格式的导入数据
|
||||
|
||||
Wappalyzer 格式是 apps 对象格式:{"apps": {"AppName": {...}, ...}}
|
||||
|
||||
输入格式:{"apps": {"1C-Bitrix": {"cats": [...], ...}, ...}}
|
||||
返回:指纹列表(每个 app 转换为带 name 字段的 dict)
|
||||
"""
|
||||
apps = json_data.get('apps', {})
|
||||
fingerprints = []
|
||||
for name, data in apps.items():
|
||||
item = {'name': name, **data}
|
||||
fingerprints.append(item)
|
||||
return fingerprints
|
||||
|
||||
def get_export_filename(self) -> str:
|
||||
"""导出文件名"""
|
||||
return 'wappalyzer.json'
|
||||
@@ -238,7 +238,7 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
|
||||
docker run -d --pull=always \
|
||||
--name xingrin-agent \
|
||||
--restart always \
|
||||
-e HEARTBEAT_API_URL="https://{django_settings.PUBLIC_HOST}" \
|
||||
-e HEARTBEAT_API_URL="https://{django_settings.PUBLIC_HOST}:{getattr(django_settings, 'PUBLIC_PORT', '8083')}" \
|
||||
-e WORKER_ID="{worker_id}" \
|
||||
-e IMAGE_TAG="{target_version}" \
|
||||
-v /proc:/host/proc:ro \
|
||||
@@ -390,12 +390,14 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
|
||||
},
|
||||
'redisUrl': worker_redis_url,
|
||||
'paths': {
|
||||
'results': getattr(settings, 'CONTAINER_RESULTS_MOUNT', '/app/backend/results'),
|
||||
'logs': getattr(settings, 'CONTAINER_LOGS_MOUNT', '/app/backend/logs'),
|
||||
'results': getattr(settings, 'CONTAINER_RESULTS_MOUNT', '/opt/xingrin/results'),
|
||||
'logs': getattr(settings, 'CONTAINER_LOGS_MOUNT', '/opt/xingrin/logs'),
|
||||
},
|
||||
'logging': {
|
||||
'level': os.getenv('LOG_LEVEL', 'INFO'),
|
||||
'enableCommandLogging': os.getenv('ENABLE_COMMAND_LOGGING', 'true').lower() == 'true',
|
||||
},
|
||||
'debug': settings.DEBUG
|
||||
'debug': settings.DEBUG,
|
||||
# Git 加速配置(用于 Git clone 加速,如 Nuclei 模板仓库)
|
||||
'gitMirror': os.getenv('GIT_MIRROR', ''),
|
||||
})
|
||||
|
||||
@@ -225,12 +225,32 @@ VULN_SCAN_COMMANDS = {
|
||||
}
|
||||
|
||||
|
||||
# ==================== 指纹识别 ====================
|
||||
|
||||
FINGERPRINT_DETECT_COMMANDS = {
|
||||
'xingfinger': {
|
||||
# 流式输出模式(不使用 -o,输出到 stdout)
|
||||
# -l: URL 列表文件输入
|
||||
# -s: 静默模式,只输出命中结果
|
||||
# --json: JSON 格式输出(每行一条)
|
||||
'base': "xingfinger -l '{urls_file}' -s --json",
|
||||
'optional': {
|
||||
# 自定义指纹库路径
|
||||
'ehole': '--ehole {ehole}',
|
||||
'goby': '--goby {goby}',
|
||||
'wappalyzer': '--wappalyzer {wappalyzer}',
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# ==================== 工具映射 ====================
|
||||
|
||||
COMMAND_TEMPLATES = {
|
||||
'subdomain_discovery': SUBDOMAIN_DISCOVERY_COMMANDS,
|
||||
'port_scan': PORT_SCAN_COMMANDS,
|
||||
'site_scan': SITE_SCAN_COMMANDS,
|
||||
'fingerprint_detect': FINGERPRINT_DETECT_COMMANDS,
|
||||
'directory_scan': DIRECTORY_SCAN_COMMANDS,
|
||||
'url_fetch': URL_FETCH_COMMANDS,
|
||||
'vuln_scan': VULN_SCAN_COMMANDS,
|
||||
@@ -242,7 +262,7 @@ COMMAND_TEMPLATES = {
|
||||
EXECUTION_STAGES = [
|
||||
{
|
||||
'mode': 'sequential',
|
||||
'flows': ['subdomain_discovery', 'port_scan', 'site_scan']
|
||||
'flows': ['subdomain_discovery', 'port_scan', 'site_scan', 'fingerprint_detect']
|
||||
},
|
||||
{
|
||||
'mode': 'parallel',
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
# 引擎配置
|
||||
#
|
||||
# 参数命名:统一用中划线(如 rate-limit),系统自动转换为下划线
|
||||
# 必需参数:enabled(是否启用)、timeout(超时秒数,auto 表示自动计算)
|
||||
# 必需参数:enabled(是否启用)
|
||||
# 可选参数:timeout(超时秒数,默认 auto 自动计算)
|
||||
|
||||
# ==================== 子域名发现 ====================
|
||||
#
|
||||
@@ -39,7 +40,7 @@ subdomain_discovery:
|
||||
bruteforce:
|
||||
enabled: false
|
||||
subdomain_bruteforce:
|
||||
timeout: auto # 自动根据字典行数计算
|
||||
# timeout: auto # 自动根据字典行数计算
|
||||
wordlist-name: subdomains-top1million-110000.txt # 对应「字典管理」中的 Wordlist.name
|
||||
|
||||
# === Stage 3: 变异生成 + 存活验证(可选)===
|
||||
@@ -52,14 +53,14 @@ subdomain_discovery:
|
||||
resolve:
|
||||
enabled: true
|
||||
subdomain_resolve:
|
||||
timeout: auto # 自动根据候选子域数量计算
|
||||
# timeout: auto # 自动根据候选子域数量计算
|
||||
|
||||
# ==================== 端口扫描 ====================
|
||||
port_scan:
|
||||
tools:
|
||||
naabu_active:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(目标数 × 端口数 × 0.5秒),范围 60秒 ~ 2天
|
||||
# timeout: auto # 自动计算(目标数 × 端口数 × 0.5秒),范围 60秒 ~ 2天
|
||||
threads: 200 # 并发连接数(默认 5)
|
||||
# ports: 1-65535 # 扫描端口范围(默认 1-65535)
|
||||
top-ports: 100 # 扫描 nmap top 100 端口
|
||||
@@ -67,25 +68,33 @@ port_scan:
|
||||
|
||||
naabu_passive:
|
||||
enabled: true
|
||||
timeout: auto # 被动扫描通常较快
|
||||
# timeout: auto # 被动扫描通常较快
|
||||
|
||||
# ==================== 站点扫描 ====================
|
||||
site_scan:
|
||||
tools:
|
||||
httpx:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(每个 URL 约 1 秒)
|
||||
# timeout: auto # 自动计算(每个 URL 约 1 秒)
|
||||
# threads: 50 # 并发线程数(默认 50)
|
||||
# rate-limit: 150 # 每秒请求数(默认 150)
|
||||
# request-timeout: 10 # 单个请求超时秒数(默认 10)
|
||||
# retries: 2 # 请求失败重试次数
|
||||
|
||||
# ==================== 指纹识别 ====================
|
||||
# 在 site_scan 后串行执行,识别 WebSite 的技术栈
|
||||
fingerprint_detect:
|
||||
tools:
|
||||
xingfinger:
|
||||
enabled: true
|
||||
fingerprint-libs: [ehole, goby, wappalyzer] # 启用的指纹库:ehole, goby, wappalyzer, fingers, fingerprinthub
|
||||
|
||||
# ==================== 目录扫描 ====================
|
||||
directory_scan:
|
||||
tools:
|
||||
ffuf:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(字典行数 × 0.02秒),范围 60秒 ~ 2小时
|
||||
# timeout: auto # 自动计算(字典行数 × 0.02秒),范围 60秒 ~ 2小时
|
||||
max-workers: 5 # 并发扫描站点数(默认 5)
|
||||
wordlist-name: dir_default.txt # 对应「字典管理」中的 Wordlist.name
|
||||
delay: 0.1-2.0 # 请求间隔,支持范围随机(如 "0.1-2.0")
|
||||
@@ -103,7 +112,7 @@ url_fetch:
|
||||
|
||||
katana:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(根据站点数量)
|
||||
# timeout: auto # 自动计算(根据站点数量)
|
||||
depth: 5 # 爬取最大深度(默认 3)
|
||||
threads: 10 # 全局并发数
|
||||
rate-limit: 30 # 每秒最多请求数
|
||||
@@ -113,7 +122,7 @@ url_fetch:
|
||||
|
||||
uro:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(每 100 个 URL 约 1 秒),范围 30 ~ 300 秒
|
||||
# timeout: auto # 自动计算(每 100 个 URL 约 1 秒),范围 30 ~ 300 秒
|
||||
# whitelist: # 只保留指定扩展名
|
||||
# - php
|
||||
# - asp
|
||||
@@ -127,7 +136,7 @@ url_fetch:
|
||||
|
||||
httpx:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(每个 URL 约 1 秒)
|
||||
# timeout: auto # 自动计算(每个 URL 约 1 秒)
|
||||
# threads: 50 # 并发线程数(默认 50)
|
||||
# rate-limit: 150 # 每秒请求数(默认 150)
|
||||
# request-timeout: 10 # 单个请求超时秒数(默认 10)
|
||||
@@ -138,7 +147,7 @@ vuln_scan:
|
||||
tools:
|
||||
dalfox_xss:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(endpoints 行数 × 100 秒)
|
||||
# timeout: auto # 自动计算(endpoints 行数 × 100 秒)
|
||||
request-timeout: 10 # 单个请求超时秒数
|
||||
only-poc: r # 只输出 POC 结果(r: 反射型)
|
||||
ignore-return: "302,404,403" # 忽略的返回码
|
||||
@@ -149,7 +158,7 @@ vuln_scan:
|
||||
|
||||
nuclei:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(根据 endpoints 行数)
|
||||
# timeout: auto # 自动计算(根据 endpoints 行数)
|
||||
template-repo-names: # 模板仓库列表,对应「Nuclei 模板」中的仓库名
|
||||
- nuclei-templates
|
||||
# - nuclei-custom # 可追加自定义仓库
|
||||
|
||||
@@ -5,8 +5,10 @@
|
||||
|
||||
from .initiate_scan_flow import initiate_scan_flow
|
||||
from .subdomain_discovery_flow import subdomain_discovery_flow
|
||||
from .fingerprint_detect_flow import fingerprint_detect_flow
|
||||
|
||||
__all__ = [
|
||||
'initiate_scan_flow',
|
||||
'subdomain_discovery_flow',
|
||||
'fingerprint_detect_flow',
|
||||
]
|
||||
|
||||
@@ -140,28 +140,7 @@ def _get_max_workers(tool_config: dict, default: int = DEFAULT_MAX_WORKERS) -> i
|
||||
return default
|
||||
|
||||
|
||||
def _setup_directory_scan_directory(scan_workspace_dir: str) -> Path:
|
||||
"""
|
||||
创建并验证目录扫描工作目录
|
||||
|
||||
Args:
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
|
||||
Returns:
|
||||
Path: 目录扫描目录路径
|
||||
|
||||
Raises:
|
||||
RuntimeError: 目录创建或验证失败
|
||||
"""
|
||||
directory_scan_dir = Path(scan_workspace_dir) / 'directory_scan'
|
||||
directory_scan_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not directory_scan_dir.is_dir():
|
||||
raise RuntimeError(f"目录扫描目录创建失败: {directory_scan_dir}")
|
||||
if not os.access(directory_scan_dir, os.W_OK):
|
||||
raise RuntimeError(f"目录扫描目录不可写: {directory_scan_dir}")
|
||||
|
||||
return directory_scan_dir
|
||||
|
||||
|
||||
|
||||
def _export_site_urls(target_id: int, target_name: str, directory_scan_dir: Path) -> tuple[str, int]:
|
||||
@@ -185,8 +164,7 @@ def _export_site_urls(target_id: int, target_name: str, directory_scan_dir: Path
|
||||
export_result = export_sites_task(
|
||||
target_id=target_id,
|
||||
output_file=sites_file,
|
||||
batch_size=1000, # 每次读取 1000 条,优化内存占用
|
||||
target_name=target_name # 传入 target_name 用于懒加载
|
||||
batch_size=1000 # 每次读取 1000 条,优化内存占用
|
||||
)
|
||||
|
||||
site_count = export_result['total_count']
|
||||
@@ -483,13 +461,23 @@ def _run_scans_concurrently(
|
||||
logger.warning("没有有效的扫描任务")
|
||||
continue
|
||||
|
||||
# 使用 ThreadPoolTaskRunner 并发执行
|
||||
logger.info("开始并发提交 %d 个扫描任务...", len(scan_params_list))
|
||||
# ============================================================
|
||||
# 分批执行策略:控制实际并发的 ffuf 进程数
|
||||
# ============================================================
|
||||
total_tasks = len(scan_params_list)
|
||||
logger.info("开始分批执行 %d 个扫描任务(每批 %d 个)...", total_tasks, max_workers)
|
||||
|
||||
with ThreadPoolTaskRunner(max_workers=max_workers) as task_runner:
|
||||
# 提交所有任务
|
||||
batch_num = 0
|
||||
for batch_start in range(0, total_tasks, max_workers):
|
||||
batch_end = min(batch_start + max_workers, total_tasks)
|
||||
batch_params = scan_params_list[batch_start:batch_end]
|
||||
batch_num += 1
|
||||
|
||||
logger.info("执行第 %d 批任务(%d-%d/%d)...", batch_num, batch_start + 1, batch_end, total_tasks)
|
||||
|
||||
# 提交当前批次的任务(非阻塞,立即返回 future)
|
||||
futures = []
|
||||
for params in scan_params_list:
|
||||
for params in batch_params:
|
||||
future = run_and_stream_save_directories_task.submit(
|
||||
cmd=params['command'],
|
||||
tool_name=tool_name,
|
||||
@@ -504,12 +492,10 @@ def _run_scans_concurrently(
|
||||
)
|
||||
futures.append((params['idx'], params['site_url'], future))
|
||||
|
||||
logger.info("✓ 已提交 %d 个扫描任务,等待完成...", len(futures))
|
||||
|
||||
# 等待所有任务完成并聚合结果
|
||||
# 等待当前批次所有任务完成(阻塞,确保本批完成后再启动下一批)
|
||||
for idx, site_url, future in futures:
|
||||
try:
|
||||
result = future.result()
|
||||
result = future.result() # 阻塞等待单个任务完成
|
||||
directories_found = result.get('created_directories', 0)
|
||||
total_directories += directories_found
|
||||
processed_sites_count += 1
|
||||
@@ -521,7 +507,6 @@ def _run_scans_concurrently(
|
||||
|
||||
except Exception as exc:
|
||||
failed_sites.append(site_url)
|
||||
# 判断是否为超时异常
|
||||
if 'timeout' in str(exc).lower() or isinstance(exc, subprocess.TimeoutExpired):
|
||||
logger.warning(
|
||||
"⚠️ [%d/%d] 站点扫描超时: %s - 错误: %s",
|
||||
@@ -633,7 +618,8 @@ def directory_scan_flow(
|
||||
raise ValueError("enabled_tools 不能为空")
|
||||
|
||||
# Step 0: 创建工作目录
|
||||
directory_scan_dir = _setup_directory_scan_directory(scan_workspace_dir)
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
directory_scan_dir = setup_scan_directory(scan_workspace_dir, 'directory_scan')
|
||||
|
||||
# Step 1: 导出站点 URL(支持懒加载)
|
||||
sites_file, site_count = _export_site_urls(target_id, target_name, directory_scan_dir)
|
||||
|
||||
380
backend/apps/scan/flows/fingerprint_detect_flow.py
Normal file
380
backend/apps/scan/flows/fingerprint_detect_flow.py
Normal file
@@ -0,0 +1,380 @@
|
||||
"""
|
||||
指纹识别 Flow
|
||||
|
||||
负责编排指纹识别的完整流程
|
||||
|
||||
架构:
|
||||
- Flow 负责编排多个原子 Task
|
||||
- 在 site_scan 后串行执行
|
||||
- 使用 xingfinger 工具识别技术栈
|
||||
- 流式处理输出,批量更新数据库
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
from prefect import flow
|
||||
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.tasks.fingerprint_detect import (
|
||||
export_urls_for_fingerprint_task,
|
||||
run_xingfinger_and_stream_update_tech_task,
|
||||
)
|
||||
from apps.scan.utils import build_scan_command
|
||||
from apps.scan.utils.fingerprint_helpers import get_fingerprint_paths
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def calculate_fingerprint_detect_timeout(
|
||||
url_count: int,
|
||||
base_per_url: float = 3.0,
|
||||
min_timeout: int = 60
|
||||
) -> int:
|
||||
"""
|
||||
根据 URL 数量计算超时时间
|
||||
|
||||
公式:超时时间 = URL 数量 × 每 URL 基础时间
|
||||
最小值:60秒
|
||||
无上限
|
||||
|
||||
Args:
|
||||
url_count: URL 数量
|
||||
base_per_url: 每 URL 基础时间(秒),默认 3秒
|
||||
min_timeout: 最小超时时间(秒),默认 60秒
|
||||
|
||||
Returns:
|
||||
int: 计算出的超时时间(秒)
|
||||
|
||||
示例:
|
||||
100 URL × 3秒 = 300秒
|
||||
1000 URL × 3秒 = 3000秒(50分钟)
|
||||
10000 URL × 3秒 = 30000秒(8.3小时)
|
||||
"""
|
||||
timeout = int(url_count * base_per_url)
|
||||
return max(min_timeout, timeout)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _export_urls(
|
||||
target_id: int,
|
||||
fingerprint_dir: Path,
|
||||
source: str = 'website'
|
||||
) -> tuple[str, int]:
|
||||
"""
|
||||
导出 URL 到文件
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
fingerprint_dir: 指纹识别目录
|
||||
source: 数据源类型
|
||||
|
||||
Returns:
|
||||
tuple: (urls_file, total_count)
|
||||
"""
|
||||
logger.info("Step 1: 导出 URL 列表 (source=%s)", source)
|
||||
|
||||
urls_file = str(fingerprint_dir / 'urls.txt')
|
||||
export_result = export_urls_for_fingerprint_task(
|
||||
target_id=target_id,
|
||||
output_file=urls_file,
|
||||
source=source,
|
||||
batch_size=1000
|
||||
)
|
||||
|
||||
total_count = export_result['total_count']
|
||||
|
||||
logger.info(
|
||||
"✓ URL 导出完成 - 文件: %s, 数量: %d",
|
||||
export_result['output_file'],
|
||||
total_count
|
||||
)
|
||||
|
||||
return export_result['output_file'], total_count
|
||||
|
||||
|
||||
def _run_fingerprint_detect(
|
||||
enabled_tools: dict,
|
||||
urls_file: str,
|
||||
url_count: int,
|
||||
fingerprint_dir: Path,
|
||||
scan_id: int,
|
||||
target_id: int,
|
||||
source: str
|
||||
) -> tuple[dict, list]:
|
||||
"""
|
||||
执行指纹识别任务
|
||||
|
||||
Args:
|
||||
enabled_tools: 已启用的工具配置字典
|
||||
urls_file: URL 文件路径
|
||||
url_count: URL 总数
|
||||
fingerprint_dir: 指纹识别目录
|
||||
scan_id: 扫描任务 ID
|
||||
target_id: 目标 ID
|
||||
source: 数据源类型
|
||||
|
||||
Returns:
|
||||
tuple: (tool_stats, failed_tools)
|
||||
"""
|
||||
tool_stats = {}
|
||||
failed_tools = []
|
||||
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# 1. 获取指纹库路径
|
||||
lib_names = tool_config.get('fingerprint_libs', ['ehole'])
|
||||
fingerprint_paths = get_fingerprint_paths(lib_names)
|
||||
|
||||
if not fingerprint_paths:
|
||||
reason = f"没有可用的指纹库: {lib_names}"
|
||||
logger.warning(reason)
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
continue
|
||||
|
||||
# 2. 将指纹库路径合并到 tool_config(用于命令构建)
|
||||
tool_config_with_paths = {**tool_config, **fingerprint_paths}
|
||||
|
||||
# 3. 构建命令
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='fingerprint_detect',
|
||||
command_params={
|
||||
'urls_file': urls_file
|
||||
},
|
||||
tool_config=tool_config_with_paths
|
||||
)
|
||||
except Exception as e:
|
||||
reason = f"命令构建失败: {str(e)}"
|
||||
logger.error("构建 %s 命令失败: %s", tool_name, e)
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
continue
|
||||
|
||||
# 4. 计算超时时间
|
||||
timeout = calculate_fingerprint_detect_timeout(url_count)
|
||||
|
||||
# 5. 生成日志文件路径
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
log_file = fingerprint_dir / f"{tool_name}_{timestamp}.log"
|
||||
|
||||
logger.info(
|
||||
"开始执行 %s 指纹识别 - URL数: %d, 超时: %ds, 指纹库: %s",
|
||||
tool_name, url_count, timeout, list(fingerprint_paths.keys())
|
||||
)
|
||||
|
||||
# 6. 执行扫描任务
|
||||
try:
|
||||
result = run_xingfinger_and_stream_update_tech_task(
|
||||
cmd=command,
|
||||
tool_name=tool_name,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
source=source,
|
||||
cwd=str(fingerprint_dir),
|
||||
timeout=timeout,
|
||||
log_file=str(log_file),
|
||||
batch_size=100
|
||||
)
|
||||
|
||||
tool_stats[tool_name] = {
|
||||
'command': command,
|
||||
'result': result,
|
||||
'timeout': timeout,
|
||||
'fingerprint_libs': list(fingerprint_paths.keys())
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"✓ 工具 %s 执行完成 - 处理记录: %d, 更新: %d, 未找到: %d",
|
||||
tool_name,
|
||||
result.get('processed_records', 0),
|
||||
result.get('updated_count', 0),
|
||||
result.get('not_found_count', 0)
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
failed_tools.append({'tool': tool_name, 'reason': str(exc)})
|
||||
logger.error("工具 %s 执行失败: %s", tool_name, exc, exc_info=True)
|
||||
|
||||
if failed_tools:
|
||||
logger.warning(
|
||||
"以下指纹识别工具执行失败: %s",
|
||||
', '.join([f['tool'] for f in failed_tools])
|
||||
)
|
||||
|
||||
return tool_stats, failed_tools
|
||||
|
||||
|
||||
@flow(
|
||||
name="fingerprint_detect",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def fingerprint_detect_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: dict
|
||||
) -> dict:
|
||||
"""
|
||||
指纹识别 Flow
|
||||
|
||||
主要功能:
|
||||
1. 从数据库导出目标下所有 WebSite URL 到文件
|
||||
2. 使用 xingfinger 进行技术栈识别
|
||||
3. 解析结果并更新 WebSite.tech 字段(合并去重)
|
||||
|
||||
工作流程:
|
||||
Step 0: 创建工作目录
|
||||
Step 1: 导出 URL 列表
|
||||
Step 2: 解析配置,获取启用的工具
|
||||
Step 3: 执行 xingfinger 并解析结果
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
enabled_tools: 启用的工具配置(xingfinger)
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'scan_id': int,
|
||||
'target': str,
|
||||
'scan_workspace_dir': str,
|
||||
'urls_file': str,
|
||||
'url_count': int,
|
||||
'processed_records': int,
|
||||
'updated_count': int,
|
||||
'not_found_count': int,
|
||||
'executed_tasks': list,
|
||||
'tool_stats': dict
|
||||
}
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始指纹识别\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Target: {target_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
# 参数验证
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
|
||||
# 数据源类型(当前只支持 website)
|
||||
source = 'website'
|
||||
|
||||
# Step 0: 创建工作目录
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
fingerprint_dir = setup_scan_directory(scan_workspace_dir, 'fingerprint_detect')
|
||||
|
||||
# Step 1: 导出 URL(支持懒加载)
|
||||
urls_file, url_count = _export_urls(target_id, fingerprint_dir, source)
|
||||
|
||||
if url_count == 0:
|
||||
logger.warning("目标下没有可用的 URL,跳过指纹识别")
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'urls_file': urls_file,
|
||||
'url_count': 0,
|
||||
'processed_records': 0,
|
||||
'updated_count': 0,
|
||||
'created_count': 0,
|
||||
'executed_tasks': ['export_urls_for_fingerprint'],
|
||||
'tool_stats': {
|
||||
'total': 0,
|
||||
'successful': 0,
|
||||
'failed': 0,
|
||||
'successful_tools': [],
|
||||
'failed_tools': [],
|
||||
'details': {}
|
||||
}
|
||||
}
|
||||
|
||||
# Step 2: 工具配置信息
|
||||
logger.info("Step 2: 工具配置信息")
|
||||
logger.info("✓ 启用工具: %s", ', '.join(enabled_tools.keys()))
|
||||
|
||||
# Step 3: 执行指纹识别
|
||||
logger.info("Step 3: 执行指纹识别")
|
||||
tool_stats, failed_tools = _run_fingerprint_detect(
|
||||
enabled_tools=enabled_tools,
|
||||
urls_file=urls_file,
|
||||
url_count=url_count,
|
||||
fingerprint_dir=fingerprint_dir,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
source=source
|
||||
)
|
||||
|
||||
logger.info("="*60 + "\n✓ 指纹识别完成\n" + "="*60)
|
||||
|
||||
# 动态生成已执行的任务列表
|
||||
executed_tasks = ['export_urls_for_fingerprint']
|
||||
executed_tasks.extend([f'run_xingfinger ({tool})' for tool in tool_stats.keys()])
|
||||
|
||||
# 汇总所有工具的结果
|
||||
total_processed = sum(stats['result'].get('processed_records', 0) for stats in tool_stats.values())
|
||||
total_updated = sum(stats['result'].get('updated_count', 0) for stats in tool_stats.values())
|
||||
total_created = sum(stats['result'].get('created_count', 0) for stats in tool_stats.values())
|
||||
|
||||
successful_tools = [name for name in enabled_tools.keys()
|
||||
if name not in [f['tool'] for f in failed_tools]]
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'urls_file': urls_file,
|
||||
'url_count': url_count,
|
||||
'processed_records': total_processed,
|
||||
'updated_count': total_updated,
|
||||
'created_count': total_created,
|
||||
'executed_tasks': executed_tasks,
|
||||
'tool_stats': {
|
||||
'total': len(enabled_tools),
|
||||
'successful': len(successful_tools),
|
||||
'failed': len(failed_tools),
|
||||
'successful_tools': successful_tools,
|
||||
'failed_tools': failed_tools,
|
||||
'details': tool_stats
|
||||
}
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("配置错误: %s", e)
|
||||
raise
|
||||
except RuntimeError as e:
|
||||
logger.error("运行时错误: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("指纹识别失败: %s", e)
|
||||
raise
|
||||
@@ -30,7 +30,7 @@ from apps.scan.handlers import (
|
||||
on_initiate_scan_flow_failed,
|
||||
)
|
||||
from prefect.futures import wait
|
||||
from apps.scan.tasks.workspace_tasks import create_scan_workspace_task
|
||||
from apps.scan.utils import setup_scan_workspace
|
||||
from apps.scan.orchestrators import FlowOrchestrator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -110,7 +110,7 @@ def initiate_scan_flow(
|
||||
)
|
||||
|
||||
# ==================== Task 1: 创建 Scan 工作空间 ====================
|
||||
scan_workspace_path = create_scan_workspace_task(scan_workspace_dir)
|
||||
scan_workspace_path = setup_scan_workspace(scan_workspace_dir)
|
||||
|
||||
# ==================== Task 2: 获取引擎配置 ====================
|
||||
from apps.scan.models import Scan
|
||||
|
||||
@@ -154,28 +154,7 @@ def _parse_port_count(tool_config: dict) -> int:
|
||||
return 100
|
||||
|
||||
|
||||
def _setup_port_scan_directory(scan_workspace_dir: str) -> Path:
|
||||
"""
|
||||
创建并验证端口扫描工作目录
|
||||
|
||||
Args:
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
|
||||
Returns:
|
||||
Path: 端口扫描目录路径
|
||||
|
||||
Raises:
|
||||
RuntimeError: 目录创建或验证失败
|
||||
"""
|
||||
port_scan_dir = Path(scan_workspace_dir) / 'port_scan'
|
||||
port_scan_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not port_scan_dir.is_dir():
|
||||
raise RuntimeError(f"端口扫描目录创建失败: {port_scan_dir}")
|
||||
if not os.access(port_scan_dir, os.W_OK):
|
||||
raise RuntimeError(f"端口扫描目录不可写: {port_scan_dir}")
|
||||
|
||||
return port_scan_dir
|
||||
|
||||
|
||||
|
||||
def _export_scan_targets(target_id: int, port_scan_dir: Path) -> tuple[str, int, str]:
|
||||
@@ -442,7 +421,8 @@ def port_scan_flow(
|
||||
)
|
||||
|
||||
# Step 0: 创建工作目录
|
||||
port_scan_dir = _setup_port_scan_directory(scan_workspace_dir)
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
port_scan_dir = setup_scan_directory(scan_workspace_dir, 'port_scan')
|
||||
|
||||
# Step 1: 导出扫描目标列表到文件(根据 Target 类型自动决定内容)
|
||||
targets_file, target_count, target_type = _export_scan_targets(target_id, port_scan_dir)
|
||||
|
||||
@@ -85,28 +85,7 @@ def calculate_timeout_by_line_count(
|
||||
return min_timeout
|
||||
|
||||
|
||||
def _setup_site_scan_directory(scan_workspace_dir: str) -> Path:
|
||||
"""
|
||||
创建并验证站点扫描工作目录
|
||||
|
||||
Args:
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
|
||||
Returns:
|
||||
Path: 站点扫描目录路径
|
||||
|
||||
Raises:
|
||||
RuntimeError: 目录创建或验证失败
|
||||
"""
|
||||
site_scan_dir = Path(scan_workspace_dir) / 'site_scan'
|
||||
site_scan_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not site_scan_dir.is_dir():
|
||||
raise RuntimeError(f"站点扫描目录创建失败: {site_scan_dir}")
|
||||
if not os.access(site_scan_dir, os.W_OK):
|
||||
raise RuntimeError(f"站点扫描目录不可写: {site_scan_dir}")
|
||||
|
||||
return site_scan_dir
|
||||
|
||||
|
||||
|
||||
def _export_site_urls(target_id: int, site_scan_dir: Path, target_name: str = None) -> tuple[str, int, int]:
|
||||
@@ -130,7 +109,6 @@ def _export_site_urls(target_id: int, site_scan_dir: Path, target_name: str = No
|
||||
export_result = export_site_urls_task(
|
||||
target_id=target_id,
|
||||
output_file=urls_file,
|
||||
target_name=target_name,
|
||||
batch_size=1000 # 每次处理1000个子域名
|
||||
)
|
||||
|
||||
@@ -403,7 +381,8 @@ def site_scan_flow(
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
|
||||
# Step 0: 创建工作目录
|
||||
site_scan_dir = _setup_site_scan_directory(scan_workspace_dir)
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
site_scan_dir = setup_scan_directory(scan_workspace_dir, 'site_scan')
|
||||
|
||||
# Step 1: 导出站点 URL
|
||||
urls_file, total_urls, association_count = _export_site_urls(
|
||||
|
||||
@@ -41,28 +41,7 @@ import subprocess
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _setup_subdomain_directory(scan_workspace_dir: str) -> Path:
|
||||
"""
|
||||
创建并验证子域名扫描工作目录
|
||||
|
||||
Args:
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
|
||||
Returns:
|
||||
Path: 子域名扫描目录路径
|
||||
|
||||
Raises:
|
||||
RuntimeError: 目录创建或验证失败
|
||||
"""
|
||||
result_dir = Path(scan_workspace_dir) / 'subdomain_discovery'
|
||||
result_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not result_dir.is_dir():
|
||||
raise RuntimeError(f"子域名扫描目录创建失败: {result_dir}")
|
||||
if not os.access(result_dir, os.W_OK):
|
||||
raise RuntimeError(f"子域名扫描目录不可写: {result_dir}")
|
||||
|
||||
return result_dir
|
||||
|
||||
|
||||
|
||||
def _validate_and_normalize_target(target_name: str) -> str:
|
||||
@@ -119,12 +98,7 @@ def _run_scans_parallel(
|
||||
|
||||
# 生成时间戳(所有工具共用)
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
|
||||
# TODO: 接入代理池管理系统
|
||||
# from apps.proxy.services import proxy_pool
|
||||
# proxy_stats = proxy_pool.get_stats()
|
||||
# logger.info(f"代理池状态: {proxy_stats['healthy']}/{proxy_stats['total']} 可用")
|
||||
|
||||
|
||||
failures = [] # 记录命令构建失败的工具
|
||||
futures = {}
|
||||
|
||||
@@ -417,7 +391,8 @@ def subdomain_discovery_flow(
|
||||
)
|
||||
|
||||
# Step 0: 准备工作
|
||||
result_dir = _setup_subdomain_directory(scan_workspace_dir)
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
result_dir = setup_scan_directory(scan_workspace_dir, 'subdomain_discovery')
|
||||
|
||||
# 验证并规范化目标域名
|
||||
try:
|
||||
|
||||
@@ -42,17 +42,7 @@ SITES_FILE_TOOLS = {'katana'}
|
||||
POST_PROCESS_TOOLS = {'uro', 'httpx'}
|
||||
|
||||
|
||||
def _setup_url_fetch_directory(scan_workspace_dir: str) -> Path:
|
||||
"""创建并验证 URL 获取工作目录"""
|
||||
url_fetch_dir = Path(scan_workspace_dir) / 'url_fetch'
|
||||
url_fetch_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not url_fetch_dir.is_dir():
|
||||
raise RuntimeError(f"URL 获取目录创建失败: {url_fetch_dir}")
|
||||
if not os.access(url_fetch_dir, os.W_OK):
|
||||
raise RuntimeError(f"URL 获取目录不可写: {url_fetch_dir}")
|
||||
|
||||
return url_fetch_dir
|
||||
|
||||
|
||||
|
||||
def _classify_tools(enabled_tools: dict) -> tuple[dict, dict, dict, dict]:
|
||||
@@ -304,7 +294,8 @@ def url_fetch_flow(
|
||||
|
||||
# Step 1: 准备工作目录
|
||||
logger.info("Step 1: 准备工作目录")
|
||||
url_fetch_dir = _setup_url_fetch_directory(scan_workspace_dir)
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
url_fetch_dir = setup_scan_directory(scan_workspace_dir, 'url_fetch')
|
||||
|
||||
# Step 2: 分类工具(按输入类型)
|
||||
logger.info("Step 2: 分类工具")
|
||||
|
||||
@@ -40,8 +40,7 @@ def _export_sites_file(target_id: int, scan_id: int, target_name: str, output_di
|
||||
result = export_sites_task(
|
||||
output_file=output_file,
|
||||
target_id=target_id,
|
||||
scan_id=scan_id,
|
||||
target_name=target_name
|
||||
scan_id=scan_id
|
||||
)
|
||||
|
||||
count = result['asset_count']
|
||||
|
||||
@@ -25,10 +25,7 @@ from .utils import calculate_timeout_by_line_count
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _setup_vuln_scan_directory(scan_workspace_dir: str) -> Path:
|
||||
vuln_scan_dir = Path(scan_workspace_dir) / "vuln_scan"
|
||||
vuln_scan_dir.mkdir(parents=True, exist_ok=True)
|
||||
return vuln_scan_dir
|
||||
|
||||
|
||||
|
||||
@flow(
|
||||
@@ -55,14 +52,14 @@ def endpoints_vuln_scan_flow(
|
||||
if not enabled_tools:
|
||||
raise ValueError("enabled_tools 不能为空")
|
||||
|
||||
vuln_scan_dir = _setup_vuln_scan_directory(scan_workspace_dir)
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
vuln_scan_dir = setup_scan_directory(scan_workspace_dir, 'vuln_scan')
|
||||
endpoints_file = vuln_scan_dir / "input_endpoints.txt"
|
||||
|
||||
# Step 1: 导出 Endpoint URL
|
||||
export_result = export_endpoints_task(
|
||||
target_id=target_id,
|
||||
output_file=str(endpoints_file),
|
||||
target_name=target_name, # 传入 target_name 用于生成默认端点
|
||||
)
|
||||
total_endpoints = export_result.get("total_count", 0)
|
||||
|
||||
|
||||
@@ -87,8 +87,8 @@ def on_all_workers_high_load(sender, worker_name, cpu, mem, **kwargs):
|
||||
"""所有 Worker 高负载时的通知处理"""
|
||||
create_notification(
|
||||
title="系统负载较高",
|
||||
message=f"所有节点负载较高,已选择负载最低的节点 {worker_name}(CPU: {cpu:.1f}%, 内存: {mem:.1f}%)执行任务,扫描速度可能受影响",
|
||||
message=f"所有节点负载较高(最低负载节点 CPU: {cpu:.1f}%, 内存: {mem:.1f}%),系统将等待最多 10 分钟后分发任务,扫描速度可能受影响",
|
||||
level=NotificationLevel.MEDIUM,
|
||||
category=NotificationCategory.SYSTEM
|
||||
)
|
||||
logger.warning("高负载通知已发送 - worker=%s, cpu=%.1f%%, mem=%.1f%%", worker_name, cpu, mem)
|
||||
logger.warning("高负载通知已发送 - cpu=%.1f%%, mem=%.1f%%", cpu, mem)
|
||||
|
||||
@@ -206,6 +206,10 @@ class FlowOrchestrator:
|
||||
from apps.scan.flows.site_scan_flow import site_scan_flow
|
||||
return site_scan_flow
|
||||
|
||||
elif scan_type == 'fingerprint_detect':
|
||||
from apps.scan.flows.fingerprint_detect_flow import fingerprint_detect_flow
|
||||
return fingerprint_detect_flow
|
||||
|
||||
elif scan_type == 'directory_scan':
|
||||
from apps.scan.flows.directory_scan_flow import directory_scan_flow
|
||||
return directory_scan_flow
|
||||
|
||||
@@ -83,7 +83,7 @@ def cleanup_results(results_dir: str, retention_days: int) -> dict:
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="清理任务")
|
||||
parser.add_argument("--results_dir", type=str, default="/app/backend/results", help="扫描结果目录")
|
||||
parser.add_argument("--results_dir", type=str, default="/opt/xingrin/results", help="扫描结果目录")
|
||||
parser.add_argument("--retention_days", type=int, default=7, help="保留天数")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -17,6 +17,8 @@ from .scan_state_service import ScanStateService
|
||||
from .scan_control_service import ScanControlService
|
||||
from .scan_stats_service import ScanStatsService
|
||||
from .scheduled_scan_service import ScheduledScanService
|
||||
from .blacklist_service import BlacklistService
|
||||
from .target_export_service import TargetExportService
|
||||
|
||||
__all__ = [
|
||||
'ScanService', # 主入口(向后兼容)
|
||||
@@ -25,5 +27,7 @@ __all__ = [
|
||||
'ScanControlService',
|
||||
'ScanStatsService',
|
||||
'ScheduledScanService',
|
||||
'BlacklistService', # 黑名单过滤服务
|
||||
'TargetExportService', # 目标导出服务
|
||||
]
|
||||
|
||||
|
||||
85
backend/apps/scan/services/blacklist_service.py
Normal file
85
backend/apps/scan/services/blacklist_service.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""
|
||||
黑名单过滤服务
|
||||
|
||||
过滤敏感域名(如 .gov、.edu、.mil 等)
|
||||
|
||||
当前版本使用默认规则,后续将支持从前端配置加载。
|
||||
"""
|
||||
|
||||
from typing import List, Optional
|
||||
from django.db.models import QuerySet
|
||||
import re
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlacklistService:
|
||||
"""
|
||||
黑名单过滤服务 - 过滤敏感域名
|
||||
|
||||
TODO: 后续版本支持从前端配置加载黑名单规则
|
||||
- 用户在开始扫描时配置黑名单 URL、域名、IP
|
||||
- 黑名单规则存储在数据库中,与 Scan 或 Engine 关联
|
||||
"""
|
||||
|
||||
# 默认黑名单正则规则
|
||||
DEFAULT_PATTERNS = [
|
||||
r'\.gov$', # .gov 结尾
|
||||
r'\.gov\.[a-z]{2}$', # .gov.cn, .gov.uk 等
|
||||
r'\.edu$', # .edu 结尾
|
||||
r'\.edu\.[a-z]{2}$', # .edu.cn 等
|
||||
r'\.mil$', # .mil 结尾
|
||||
]
|
||||
|
||||
def __init__(self, patterns: Optional[List[str]] = None):
|
||||
"""
|
||||
初始化黑名单服务
|
||||
|
||||
Args:
|
||||
patterns: 正则表达式列表,None 使用默认规则
|
||||
"""
|
||||
self.patterns = patterns or self.DEFAULT_PATTERNS
|
||||
self._compiled_patterns = [re.compile(p) for p in self.patterns]
|
||||
|
||||
def filter_queryset(
|
||||
self,
|
||||
queryset: QuerySet,
|
||||
url_field: str = 'url'
|
||||
) -> QuerySet:
|
||||
"""
|
||||
数据库层面过滤 queryset
|
||||
|
||||
使用 PostgreSQL 正则表达式排除黑名单 URL
|
||||
|
||||
Args:
|
||||
queryset: 原始 queryset
|
||||
url_field: URL 字段名
|
||||
|
||||
Returns:
|
||||
QuerySet: 过滤后的 queryset
|
||||
"""
|
||||
for pattern in self.patterns:
|
||||
queryset = queryset.exclude(**{f'{url_field}__regex': pattern})
|
||||
return queryset
|
||||
|
||||
def filter_url(self, url: str) -> bool:
|
||||
"""
|
||||
检查单个 URL 是否通过黑名单过滤
|
||||
|
||||
Args:
|
||||
url: 要检查的 URL
|
||||
|
||||
Returns:
|
||||
bool: True 表示通过(不在黑名单),False 表示被过滤
|
||||
"""
|
||||
for pattern in self._compiled_patterns:
|
||||
if pattern.search(url):
|
||||
return False
|
||||
return True
|
||||
|
||||
# TODO: 后续版本实现
|
||||
# @classmethod
|
||||
# def from_scan(cls, scan_id: int) -> 'BlacklistService':
|
||||
# """从数据库加载扫描配置的黑名单规则"""
|
||||
# pass
|
||||
364
backend/apps/scan/services/target_export_service.py
Normal file
364
backend/apps/scan/services/target_export_service.py
Normal file
@@ -0,0 +1,364 @@
|
||||
"""
|
||||
目标导出服务
|
||||
|
||||
提供统一的目标提取和文件导出功能,支持:
|
||||
- URL 导出(流式写入 + 默认值回退)
|
||||
- 域名/IP 导出(用于端口扫描)
|
||||
- 黑名单过滤集成
|
||||
"""
|
||||
|
||||
import ipaddress
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional, Iterator
|
||||
|
||||
from django.db.models import QuerySet
|
||||
|
||||
from .blacklist_service import BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TargetExportService:
|
||||
"""
|
||||
目标导出服务 - 提供统一的目标提取和文件导出功能
|
||||
|
||||
使用方式:
|
||||
# Task 层决定数据源
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用导出服务
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
result = export_service.export_urls(target_id, output_path, queryset)
|
||||
"""
|
||||
|
||||
def __init__(self, blacklist_service: Optional[BlacklistService] = None):
|
||||
"""
|
||||
初始化导出服务
|
||||
|
||||
Args:
|
||||
blacklist_service: 黑名单过滤服务,None 表示禁用过滤
|
||||
"""
|
||||
self.blacklist_service = blacklist_service
|
||||
|
||||
def export_urls(
|
||||
self,
|
||||
target_id: int,
|
||||
output_path: str,
|
||||
queryset: QuerySet,
|
||||
url_field: str = 'url',
|
||||
batch_size: int = 1000
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
统一 URL 导出函数
|
||||
|
||||
自动判断数据库有无数据:
|
||||
- 有数据:流式写入数据库数据到文件
|
||||
- 无数据:调用默认值生成器生成 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_path: 输出文件路径
|
||||
queryset: 数据源 queryset(由 Task 层构建,应为 values_list flat=True)
|
||||
url_field: URL 字段名(用于黑名单过滤)
|
||||
batch_size: 批次大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_count': int
|
||||
}
|
||||
|
||||
Raises:
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
output_file = Path(output_path)
|
||||
output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
logger.info("开始导出 URL - target_id=%s, output=%s", target_id, output_path)
|
||||
|
||||
# 应用黑名单过滤(数据库层面)
|
||||
if self.blacklist_service:
|
||||
# 注意:queryset 应该是原始 queryset,不是 values_list
|
||||
# 这里假设 Task 层传入的是 values_list,需要在 Task 层处理过滤
|
||||
pass
|
||||
|
||||
total_count = 0
|
||||
try:
|
||||
with open(output_file, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for url in queryset.iterator(chunk_size=batch_size):
|
||||
if url:
|
||||
# Python 层面黑名单过滤
|
||||
if self.blacklist_service and not self.blacklist_service.filter_url(url):
|
||||
continue
|
||||
f.write(f"{url}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个 URL...", total_count)
|
||||
except IOError as e:
|
||||
logger.error("文件写入失败: %s - %s", output_path, e)
|
||||
raise
|
||||
|
||||
# 默认值回退模式
|
||||
if total_count == 0:
|
||||
total_count = self._generate_default_urls(target_id, output_file)
|
||||
|
||||
logger.info("✓ URL 导出完成 - 数量: %d, 文件: %s", total_count, output_path)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_file),
|
||||
'total_count': total_count
|
||||
}
|
||||
|
||||
def _generate_default_urls(
|
||||
self,
|
||||
target_id: int,
|
||||
output_path: Path
|
||||
) -> int:
|
||||
"""
|
||||
默认值生成器(内部函数)
|
||||
|
||||
根据 Target 类型生成默认 URL:
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 http(s)://ip
|
||||
- URL: 直接使用目标 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_path: 输出文件路径
|
||||
|
||||
Returns:
|
||||
int: 写入的 URL 总数
|
||||
"""
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
|
||||
if not target:
|
||||
logger.warning("Target ID %d 不存在,无法生成默认 URL", target_id)
|
||||
return 0
|
||||
|
||||
target_name = target.name
|
||||
target_type = target.type
|
||||
|
||||
logger.info("懒加载模式:Target 类型=%s, 名称=%s", target_type, target_name)
|
||||
|
||||
total_urls = 0
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
urls = [f"http://{target_name}", f"https://{target_name}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
elif target_type == Target.TargetType.IP:
|
||||
urls = [f"http://{target_name}", f"https://{target_name}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
try:
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
|
||||
for ip in network.hosts():
|
||||
urls = [f"http://{ip}", f"https://{ip}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
if total_urls % 10000 == 0:
|
||||
logger.info("已生成 %d 个 URL...", total_urls)
|
||||
|
||||
# /32 或 /128 特殊处理
|
||||
if total_urls == 0:
|
||||
ip = str(network.network_address)
|
||||
urls = [f"http://{ip}", f"https://{ip}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("CIDR 解析失败: %s - %s", target_name, e)
|
||||
raise ValueError(f"无效的 CIDR: {target_name}") from e
|
||||
|
||||
elif target_type == Target.TargetType.URL:
|
||||
if self._should_write_url(target_name):
|
||||
f.write(f"{target_name}\n")
|
||||
total_urls = 1
|
||||
else:
|
||||
logger.warning("不支持的 Target 类型: %s", target_type)
|
||||
|
||||
logger.info("✓ 懒加载生成默认 URL - 数量: %d", total_urls)
|
||||
return total_urls
|
||||
|
||||
def _should_write_url(self, url: str) -> bool:
|
||||
"""检查 URL 是否应该写入(通过黑名单过滤)"""
|
||||
if self.blacklist_service:
|
||||
return self.blacklist_service.filter_url(url)
|
||||
return True
|
||||
|
||||
def export_targets(
|
||||
self,
|
||||
target_id: int,
|
||||
output_path: str,
|
||||
batch_size: int = 1000
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
域名/IP 导出函数(用于端口扫描)
|
||||
|
||||
根据 Target 类型选择导出逻辑:
|
||||
- DOMAIN: 从 Subdomain 表流式导出子域名
|
||||
- IP: 直接写入 IP 地址
|
||||
- CIDR: 展开为所有主机 IP
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_path: 输出文件路径
|
||||
batch_size: 批次大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_count': int,
|
||||
'target_type': str
|
||||
}
|
||||
"""
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
from apps.asset.services.asset.subdomain_service import SubdomainService
|
||||
|
||||
output_file = Path(output_path)
|
||||
output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 获取 Target 信息
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
|
||||
if not target:
|
||||
raise ValueError(f"Target ID {target_id} 不存在")
|
||||
|
||||
target_type = target.type
|
||||
target_name = target.name
|
||||
|
||||
logger.info(
|
||||
"开始导出扫描目标 - Target ID: %d, Name: %s, Type: %s, 输出文件: %s",
|
||||
target_id, target_name, target_type, output_path
|
||||
)
|
||||
|
||||
total_count = 0
|
||||
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
total_count = self._export_domains(target_id, target_name, output_file, batch_size)
|
||||
type_desc = "域名"
|
||||
|
||||
elif target_type == Target.TargetType.IP:
|
||||
total_count = self._export_ip(target_name, output_file)
|
||||
type_desc = "IP"
|
||||
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
total_count = self._export_cidr(target_name, output_file)
|
||||
type_desc = "CIDR IP"
|
||||
|
||||
else:
|
||||
raise ValueError(f"不支持的目标类型: {target_type}")
|
||||
|
||||
logger.info(
|
||||
"✓ 扫描目标导出完成 - 类型: %s, 总数: %d, 文件: %s",
|
||||
type_desc, total_count, output_path
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_file),
|
||||
'total_count': total_count,
|
||||
'target_type': target_type
|
||||
}
|
||||
|
||||
def _export_domains(
|
||||
self,
|
||||
target_id: int,
|
||||
target_name: str,
|
||||
output_path: Path,
|
||||
batch_size: int
|
||||
) -> int:
|
||||
"""导出域名类型目标的子域名"""
|
||||
from apps.asset.services.asset.subdomain_service import SubdomainService
|
||||
|
||||
subdomain_service = SubdomainService()
|
||||
domain_iterator = subdomain_service.iter_subdomain_names_by_target(
|
||||
target_id=target_id,
|
||||
chunk_size=batch_size
|
||||
)
|
||||
|
||||
total_count = 0
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for domain_name in domain_iterator:
|
||||
if self._should_write_target(domain_name):
|
||||
f.write(f"{domain_name}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个域名...", total_count)
|
||||
|
||||
# 默认值模式:如果没有子域名,使用根域名
|
||||
if total_count == 0:
|
||||
logger.info("采用默认域名:%s (target_id=%d)", target_name, target_id)
|
||||
if self._should_write_target(target_name):
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{target_name}\n")
|
||||
total_count = 1
|
||||
|
||||
return total_count
|
||||
|
||||
def _export_ip(self, target_name: str, output_path: Path) -> int:
|
||||
"""导出 IP 类型目标"""
|
||||
if self._should_write_target(target_name):
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{target_name}\n")
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def _export_cidr(self, target_name: str, output_path: Path) -> int:
|
||||
"""导出 CIDR 类型目标,展开为每个 IP"""
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
total_count = 0
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for ip in network.hosts():
|
||||
ip_str = str(ip)
|
||||
if self._should_write_target(ip_str):
|
||||
f.write(f"{ip_str}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个 IP...", total_count)
|
||||
|
||||
# /32 或 /128 特殊处理
|
||||
if total_count == 0:
|
||||
ip_str = str(network.network_address)
|
||||
if self._should_write_target(ip_str):
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{ip_str}\n")
|
||||
total_count = 1
|
||||
|
||||
return total_count
|
||||
|
||||
def _should_write_target(self, target: str) -> bool:
|
||||
"""检查目标是否应该写入(通过黑名单过滤)"""
|
||||
if self.blacklist_service:
|
||||
return self.blacklist_service.filter_url(target)
|
||||
return True
|
||||
@@ -9,9 +9,6 @@
|
||||
- Tasks 负责具体操作,Flow 负责编排
|
||||
"""
|
||||
|
||||
# Prefect Tasks
|
||||
from .workspace_tasks import create_scan_workspace_task
|
||||
|
||||
# 子域名发现任务(已重构为多个子任务)
|
||||
from .subdomain_discovery import (
|
||||
run_subdomain_discovery_task,
|
||||
@@ -19,17 +16,25 @@ from .subdomain_discovery import (
|
||||
save_domains_task,
|
||||
)
|
||||
|
||||
# 指纹识别任务
|
||||
from .fingerprint_detect import (
|
||||
export_urls_for_fingerprint_task,
|
||||
run_xingfinger_and_stream_update_tech_task,
|
||||
)
|
||||
|
||||
# 注意:
|
||||
# - subdomain_discovery_task 已重构为多个子任务(subdomain_discovery/)
|
||||
# - finalize_scan_task 已废弃(Handler 统一管理状态)
|
||||
# - initiate_scan_task 已迁移到 flows/initiate_scan_flow.py
|
||||
# - cleanup_old_scans_task 已迁移到 flows(cleanup_old_scans_flow)
|
||||
# - create_scan_workspace_task 已删除,直接使用 setup_scan_workspace()
|
||||
|
||||
__all__ = [
|
||||
# Prefect Tasks
|
||||
'create_scan_workspace_task',
|
||||
# 子域名发现任务
|
||||
'run_subdomain_discovery_task',
|
||||
'merge_and_validate_task',
|
||||
'save_domains_task',
|
||||
# 指纹识别任务
|
||||
'export_urls_for_fingerprint_task',
|
||||
'run_xingfinger_and_stream_update_tech_task',
|
||||
]
|
||||
|
||||
@@ -1,20 +1,14 @@
|
||||
"""
|
||||
导出站点 URL 到 TXT 文件的 Task
|
||||
|
||||
使用流式处理,避免大量站点导致内存溢出
|
||||
支持默认值模式:如果没有站点,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://target_name
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 http(s)://ip
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
数据源: WebSite.url
|
||||
"""
|
||||
import logging
|
||||
import ipaddress
|
||||
from pathlib import Path
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.repositories import DjangoWebSiteRepository
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
from apps.asset.models import WebSite
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -24,19 +18,22 @@ def export_sites_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
batch_size: int = 1000,
|
||||
target_name: str = None
|
||||
) -> dict:
|
||||
"""
|
||||
导出目标下的所有站点 URL 到 TXT 文件
|
||||
|
||||
使用流式处理,支持大规模数据导出(10万+站点)
|
||||
支持默认值模式:如果没有站点,自动使用默认站点 URL(http(s)://target_name)
|
||||
数据源: WebSite.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
batch_size: 每次读取的批次大小,默认 1000
|
||||
target_name: 目标名称(用于默认值模式)
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
@@ -49,134 +46,26 @@ def export_sites_task(
|
||||
ValueError: 参数错误
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
try:
|
||||
# 初始化 Repository
|
||||
repository = DjangoWebSiteRepository()
|
||||
|
||||
logger.info("开始导出站点 URL - Target ID: %d, 输出文件: %s", target_id, output_file)
|
||||
|
||||
# 确保输出目录存在
|
||||
output_path = Path(output_file)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 使用 Repository 流式查询站点 URL
|
||||
url_iterator = repository.get_urls_for_export(
|
||||
target_id=target_id,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 流式写入文件
|
||||
total_count = 0
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for url in url_iterator:
|
||||
# 每次只处理一个 URL,边读边写
|
||||
f.write(f"{url}\n")
|
||||
total_count += 1
|
||||
|
||||
# 每写入 10000 条记录打印一次进度
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个站点 URL...", total_count)
|
||||
|
||||
# ==================== 懒加载模式:根据 Target 类型生成默认 URL ====================
|
||||
if total_count == 0:
|
||||
total_count = _write_default_urls(target_id, target_name, output_path)
|
||||
|
||||
logger.info(
|
||||
"✓ 站点 URL 导出完成 - 总数: %d, 文件: %s (%.2f KB)",
|
||||
total_count,
|
||||
str(output_path), # 使用绝对路径
|
||||
output_path.stat().st_size / 1024
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_path),
|
||||
'total_count': total_count
|
||||
}
|
||||
|
||||
except FileNotFoundError as e:
|
||||
logger.error("输出目录不存在: %s", e)
|
||||
raise
|
||||
except PermissionError as e:
|
||||
logger.error("文件写入权限不足: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("导出站点 URL 失败: %s", e)
|
||||
raise
|
||||
|
||||
|
||||
def _write_default_urls(target_id: int, target_name: str, output_path: Path) -> int:
|
||||
"""
|
||||
懒加载模式:根据 Target 类型生成默认 URL
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
target_name: 目标名称(可选,如果为空则从数据库查询)
|
||||
output_path: 输出文件路径
|
||||
|
||||
Returns:
|
||||
int: 生成的 URL 数量
|
||||
"""
|
||||
# 获取 Target 信息
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
# 使用 TargetExportService 处理导出
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
|
||||
if not target:
|
||||
logger.warning("Target ID %d 不存在,无法生成默认 URL", target_id)
|
||||
return 0
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
target_name = target.name
|
||||
target_type = target.type
|
||||
|
||||
logger.info("懒加载模式:Target 类型=%s, 名称=%s", target_type, target_name)
|
||||
|
||||
total_urls = 0
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
# 域名类型:生成 http(s)://domain
|
||||
f.write(f"http://{target_name}\n")
|
||||
f.write(f"https://{target_name}\n")
|
||||
total_urls = 2
|
||||
logger.info("✓ 域名默认 URL 已写入: http(s)://%s", target_name)
|
||||
|
||||
elif target_type == Target.TargetType.IP:
|
||||
# IP 类型:生成 http(s)://ip
|
||||
f.write(f"http://{target_name}\n")
|
||||
f.write(f"https://{target_name}\n")
|
||||
total_urls = 2
|
||||
logger.info("✓ IP 默认 URL 已写入: http(s)://%s", target_name)
|
||||
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
# CIDR 类型:展开为所有 IP 的 URL
|
||||
try:
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
|
||||
for ip in network.hosts(): # 排除网络地址和广播地址
|
||||
f.write(f"http://{ip}\n")
|
||||
f.write(f"https://{ip}\n")
|
||||
total_urls += 2
|
||||
|
||||
if total_urls % 10000 == 0:
|
||||
logger.info("已生成 %d 个 URL...", total_urls)
|
||||
|
||||
# 如果是 /32 或 /128(单个 IP),hosts() 会为空
|
||||
if total_urls == 0:
|
||||
ip = str(network.network_address)
|
||||
f.write(f"http://{ip}\n")
|
||||
f.write(f"https://{ip}\n")
|
||||
total_urls = 2
|
||||
|
||||
logger.info("✓ CIDR 默认 URL 已写入: %d 个 URL (来自 %s)", total_urls, target_name)
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("CIDR 解析失败: %s - %s", target_name, e)
|
||||
return 0
|
||||
else:
|
||||
logger.warning("不支持的 Target 类型: %s", target_type)
|
||||
return 0
|
||||
|
||||
return total_urls
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'success': result['success'],
|
||||
'output_file': result['output_file'],
|
||||
'total_count': result['total_count']
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
15
backend/apps/scan/tasks/fingerprint_detect/__init__.py
Normal file
15
backend/apps/scan/tasks/fingerprint_detect/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""
|
||||
指纹识别任务模块
|
||||
|
||||
包含:
|
||||
- export_urls_for_fingerprint_task: 导出 URL 到文件
|
||||
- run_xingfinger_and_stream_update_tech_task: 流式执行 xingfinger 并更新 tech
|
||||
"""
|
||||
|
||||
from .export_urls_task import export_urls_for_fingerprint_task
|
||||
from .run_xingfinger_task import run_xingfinger_and_stream_update_tech_task
|
||||
|
||||
__all__ = [
|
||||
'export_urls_for_fingerprint_task',
|
||||
'run_xingfinger_and_stream_update_tech_task',
|
||||
]
|
||||
@@ -0,0 +1,65 @@
|
||||
"""
|
||||
导出 URL 任务
|
||||
|
||||
用于指纹识别前导出目标下的 URL 到文件
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.models import WebSite
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="export_urls_for_fingerprint")
|
||||
def export_urls_for_fingerprint_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
source: str = 'website',
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出目标下的 URL 到文件(用于指纹识别)
|
||||
|
||||
数据源: WebSite.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
- URL: 直接使用目标 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径
|
||||
source: 数据源类型(保留参数,兼容旧调用)
|
||||
batch_size: 批量读取大小
|
||||
|
||||
Returns:
|
||||
dict: {'output_file': str, 'total_count': int, 'source': str}
|
||||
"""
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用 TargetExportService 处理导出
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'output_file': result['output_file'],
|
||||
'total_count': result['total_count'],
|
||||
'source': source
|
||||
}
|
||||
@@ -0,0 +1,300 @@
|
||||
"""
|
||||
xingfinger 执行任务
|
||||
|
||||
流式执行 xingfinger 命令并实时更新 tech 字段
|
||||
"""
|
||||
|
||||
import importlib
|
||||
import json
|
||||
import logging
|
||||
import subprocess
|
||||
from typing import Optional, Generator
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from django.db import connection
|
||||
from prefect import task
|
||||
|
||||
from apps.scan.utils import execute_stream
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# 数据源映射:source → (module_path, model_name, url_field)
|
||||
SOURCE_MODEL_MAP = {
|
||||
'website': ('apps.asset.models', 'WebSite', 'url'),
|
||||
# 以后扩展:
|
||||
# 'endpoint': ('apps.asset.models', 'Endpoint', 'url'),
|
||||
# 'directory': ('apps.asset.models', 'Directory', 'url'),
|
||||
}
|
||||
|
||||
|
||||
def _get_model_class(source: str):
|
||||
"""根据数据源类型获取 Model 类"""
|
||||
if source not in SOURCE_MODEL_MAP:
|
||||
raise ValueError(f"不支持的数据源: {source}")
|
||||
|
||||
module_path, model_name, _ = SOURCE_MODEL_MAP[source]
|
||||
module = importlib.import_module(module_path)
|
||||
return getattr(module, model_name)
|
||||
|
||||
|
||||
def parse_xingfinger_line(line: str) -> tuple[str, list[str]] | None:
|
||||
"""
|
||||
解析 xingfinger 单行 JSON 输出
|
||||
|
||||
xingfinger 静默模式输出格式:
|
||||
{"url": "https://example.com", "cms": "WordPress,PHP,nginx", ...}
|
||||
|
||||
Returns:
|
||||
tuple: (url, tech_list) 或 None(解析失败时)
|
||||
"""
|
||||
try:
|
||||
item = json.loads(line)
|
||||
url = item.get('url', '').strip()
|
||||
cms = item.get('cms', '')
|
||||
|
||||
if not url or not cms:
|
||||
return None
|
||||
|
||||
# cms 字段按逗号分割,去除空白
|
||||
techs = [t.strip() for t in cms.split(',') if t.strip()]
|
||||
|
||||
return (url, techs) if techs else None
|
||||
|
||||
except json.JSONDecodeError:
|
||||
return None
|
||||
|
||||
|
||||
def bulk_merge_tech_field(
|
||||
source: str,
|
||||
url_techs_map: dict[str, list[str]],
|
||||
target_id: int
|
||||
) -> dict:
|
||||
"""
|
||||
批量合并 tech 数组字段(PostgreSQL 原生 SQL)
|
||||
|
||||
使用 PostgreSQL 原生 SQL 实现高效的数组合并去重操作。
|
||||
如果 URL 对应的记录不存在,会自动创建新记录。
|
||||
|
||||
Returns:
|
||||
dict: {'updated_count': int, 'created_count': int}
|
||||
"""
|
||||
Model = _get_model_class(source)
|
||||
table_name = Model._meta.db_table
|
||||
|
||||
updated_count = 0
|
||||
created_count = 0
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
for url, techs in url_techs_map.items():
|
||||
if not techs:
|
||||
continue
|
||||
|
||||
# 先尝试更新(PostgreSQL 数组合并去重)
|
||||
sql = f"""
|
||||
UPDATE {table_name}
|
||||
SET tech = (
|
||||
SELECT ARRAY(SELECT DISTINCT unnest(
|
||||
COALESCE(tech, ARRAY[]::varchar[]) || %s::varchar[]
|
||||
))
|
||||
)
|
||||
WHERE url = %s AND target_id = %s
|
||||
"""
|
||||
|
||||
cursor.execute(sql, [techs, url, target_id])
|
||||
|
||||
if cursor.rowcount > 0:
|
||||
updated_count += cursor.rowcount
|
||||
else:
|
||||
# 记录不存在,创建新记录
|
||||
try:
|
||||
# 从 URL 提取 host
|
||||
parsed = urlparse(url)
|
||||
host = parsed.hostname or ''
|
||||
|
||||
# 插入新记录(带冲突处理)
|
||||
insert_sql = f"""
|
||||
INSERT INTO {table_name} (target_id, url, host, tech, created_at)
|
||||
VALUES (%s, %s, %s, %s::varchar[], NOW())
|
||||
ON CONFLICT (target_id, url) DO UPDATE SET
|
||||
tech = (
|
||||
SELECT ARRAY(SELECT DISTINCT unnest(
|
||||
COALESCE({table_name}.tech, ARRAY[]::varchar[]) || EXCLUDED.tech
|
||||
))
|
||||
)
|
||||
"""
|
||||
cursor.execute(insert_sql, [target_id, url, host, techs])
|
||||
created_count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("创建 %s 记录失败 (url=%s): %s", source, url, e)
|
||||
|
||||
return {
|
||||
'updated_count': updated_count,
|
||||
'created_count': created_count
|
||||
}
|
||||
|
||||
|
||||
def _parse_xingfinger_stream_output(
|
||||
cmd: str,
|
||||
tool_name: str,
|
||||
cwd: Optional[str] = None,
|
||||
timeout: Optional[int] = None,
|
||||
log_file: Optional[str] = None
|
||||
) -> Generator[tuple[str, list[str]], None, None]:
|
||||
"""
|
||||
流式解析 xingfinger 命令输出
|
||||
|
||||
基于 execute_stream 实时处理 xingfinger 命令的 stdout,将每行 JSON 输出
|
||||
转换为 (url, tech_list) 格式
|
||||
"""
|
||||
logger.info("开始流式解析 xingfinger 命令输出 - 命令: %s", cmd)
|
||||
|
||||
total_lines = 0
|
||||
valid_records = 0
|
||||
|
||||
try:
|
||||
for line in execute_stream(cmd=cmd, tool_name=tool_name, cwd=cwd, shell=True, timeout=timeout, log_file=log_file):
|
||||
total_lines += 1
|
||||
|
||||
# 解析单行 JSON
|
||||
result = parse_xingfinger_line(line)
|
||||
if result is None:
|
||||
continue
|
||||
|
||||
valid_records += 1
|
||||
yield result
|
||||
|
||||
# 每处理 500 条记录输出一次进度
|
||||
if valid_records % 500 == 0:
|
||||
logger.info("已解析 %d 条有效记录...", valid_records)
|
||||
|
||||
except subprocess.TimeoutExpired as e:
|
||||
error_msg = f"xingfinger 命令执行超时 - 超过 {timeout} 秒"
|
||||
logger.warning(error_msg)
|
||||
raise RuntimeError(error_msg) from e
|
||||
except Exception as e:
|
||||
logger.error("流式解析 xingfinger 输出失败: %s", e, exc_info=True)
|
||||
raise
|
||||
|
||||
logger.info("流式解析完成 - 总行数: %d, 有效记录: %d", total_lines, valid_records)
|
||||
|
||||
|
||||
@task(name="run_xingfinger_and_stream_update_tech")
|
||||
def run_xingfinger_and_stream_update_tech_task(
|
||||
cmd: str,
|
||||
tool_name: str,
|
||||
scan_id: int,
|
||||
target_id: int,
|
||||
source: str,
|
||||
cwd: str,
|
||||
timeout: int,
|
||||
log_file: str,
|
||||
batch_size: int = 100
|
||||
) -> dict:
|
||||
"""
|
||||
流式执行 xingfinger 命令并实时更新 tech 字段
|
||||
|
||||
根据 source 参数更新对应表的 tech 字段:
|
||||
- website → WebSite.tech
|
||||
- endpoint → Endpoint.tech(以后扩展)
|
||||
|
||||
处理流程:
|
||||
1. 流式执行 xingfinger 命令
|
||||
2. 实时解析 JSON 输出
|
||||
3. 累积到 batch_size 条后批量更新数据库
|
||||
4. 使用 PostgreSQL 原生 SQL 进行数组合并去重
|
||||
5. 如果记录不存在,自动创建
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'processed_records': int,
|
||||
'updated_count': int,
|
||||
'created_count': int,
|
||||
'batch_count': int
|
||||
}
|
||||
"""
|
||||
logger.info(
|
||||
"开始执行 xingfinger 并更新 tech - target_id=%s, source=%s, timeout=%s秒",
|
||||
target_id, source, timeout
|
||||
)
|
||||
|
||||
data_generator = None
|
||||
|
||||
try:
|
||||
# 初始化统计
|
||||
processed_records = 0
|
||||
updated_count = 0
|
||||
created_count = 0
|
||||
batch_count = 0
|
||||
|
||||
# 当前批次的 URL -> techs 映射
|
||||
url_techs_map = {}
|
||||
|
||||
# 流式处理
|
||||
data_generator = _parse_xingfinger_stream_output(
|
||||
cmd=cmd,
|
||||
tool_name=tool_name,
|
||||
cwd=cwd,
|
||||
timeout=timeout,
|
||||
log_file=log_file
|
||||
)
|
||||
|
||||
for url, techs in data_generator:
|
||||
processed_records += 1
|
||||
|
||||
# 累积到 url_techs_map
|
||||
if url in url_techs_map:
|
||||
# 合并同一 URL 的多次识别结果
|
||||
url_techs_map[url].extend(techs)
|
||||
else:
|
||||
url_techs_map[url] = techs
|
||||
|
||||
# 达到批次大小,执行批量更新
|
||||
if len(url_techs_map) >= batch_size:
|
||||
batch_count += 1
|
||||
result = bulk_merge_tech_field(source, url_techs_map, target_id)
|
||||
updated_count += result['updated_count']
|
||||
created_count += result.get('created_count', 0)
|
||||
|
||||
logger.debug(
|
||||
"批次 %d 完成 - 更新: %d, 创建: %d",
|
||||
batch_count, result['updated_count'], result.get('created_count', 0)
|
||||
)
|
||||
|
||||
# 清空批次
|
||||
url_techs_map = {}
|
||||
|
||||
# 处理最后一批
|
||||
if url_techs_map:
|
||||
batch_count += 1
|
||||
result = bulk_merge_tech_field(source, url_techs_map, target_id)
|
||||
updated_count += result['updated_count']
|
||||
created_count += result.get('created_count', 0)
|
||||
|
||||
logger.info(
|
||||
"✓ xingfinger 执行完成 - 处理记录: %d, 更新: %d, 创建: %d, 批次: %d",
|
||||
processed_records, updated_count, created_count, batch_count
|
||||
)
|
||||
|
||||
return {
|
||||
'processed_records': processed_records,
|
||||
'updated_count': updated_count,
|
||||
'created_count': created_count,
|
||||
'batch_count': batch_count
|
||||
}
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning("⚠️ xingfinger 执行超时 - target_id=%s, timeout=%s秒", target_id, timeout)
|
||||
raise
|
||||
except Exception as e:
|
||||
error_msg = f"xingfinger 执行失败: {e}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
raise RuntimeError(error_msg) from e
|
||||
finally:
|
||||
# 清理资源
|
||||
if data_generator is not None:
|
||||
try:
|
||||
data_generator.close()
|
||||
except Exception as e:
|
||||
logger.debug("关闭生成器时出错: %s", e)
|
||||
@@ -1,119 +1,21 @@
|
||||
"""
|
||||
导出扫描目标到 TXT 文件的 Task
|
||||
|
||||
使用 TargetExportService.export_targets() 统一处理导出逻辑
|
||||
|
||||
根据 Target 类型决定导出内容:
|
||||
- DOMAIN: 从 Subdomain 表导出子域名
|
||||
- IP: 直接写入 target.name
|
||||
- CIDR: 展开 CIDR 范围内的所有 IP
|
||||
|
||||
使用流式处理,避免大量数据导致内存溢出
|
||||
"""
|
||||
import logging
|
||||
import ipaddress
|
||||
from pathlib import Path
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.services.asset.subdomain_service import SubdomainService
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target # 仅用于 TargetType 常量
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _export_domains(target_id: int, target_name: str, output_path: Path, batch_size: int) -> int:
|
||||
"""
|
||||
导出域名类型目标的子域名(支持默认值模式)
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
target_name: 目标名称(域名)
|
||||
output_path: 输出文件路径
|
||||
batch_size: 批次大小
|
||||
|
||||
Returns:
|
||||
int: 导出的记录数
|
||||
|
||||
默认值模式:
|
||||
如果没有子域名,自动使用根域名作为默认子域名
|
||||
"""
|
||||
subdomain_service = SubdomainService()
|
||||
domain_iterator = subdomain_service.iter_subdomain_names_by_target(
|
||||
target_id=target_id,
|
||||
chunk_size=batch_size
|
||||
)
|
||||
|
||||
total_count = 0
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for domain_name in domain_iterator:
|
||||
f.write(f"{domain_name}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个域名...", total_count)
|
||||
|
||||
# ==================== 采用默认域名:如果没有子域名,使用根域名 ====================
|
||||
# 只写入文件供扫描工具使用,不写入数据库
|
||||
# 数据库只存储扫描发现的真实资产
|
||||
if total_count == 0:
|
||||
logger.info("采用默认域名:%s (target_id=%d)", target_name, target_id)
|
||||
|
||||
# 只写入文件,不写入数据库
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{target_name}\n")
|
||||
total_count = 1
|
||||
|
||||
logger.info("✓ 默认域名已写入文件: %s", target_name)
|
||||
|
||||
return total_count
|
||||
|
||||
|
||||
def _export_ip(target_name: str, output_path: Path) -> int:
|
||||
"""
|
||||
导出 IP 类型目标
|
||||
|
||||
Args:
|
||||
target_name: IP 地址
|
||||
output_path: 输出文件路径
|
||||
|
||||
Returns:
|
||||
int: 导出的记录数(始终为 1)
|
||||
"""
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{target_name}\n")
|
||||
return 1
|
||||
|
||||
|
||||
def _export_cidr(target_name: str, output_path: Path) -> int:
|
||||
"""
|
||||
导出 CIDR 类型目标,展开为每个 IP
|
||||
|
||||
Args:
|
||||
target_name: CIDR 范围(如 192.168.1.0/24)
|
||||
output_path: 输出文件路径
|
||||
|
||||
Returns:
|
||||
int: 导出的 IP 数量
|
||||
"""
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
total_count = 0
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for ip in network.hosts(): # 排除网络地址和广播地址
|
||||
f.write(f"{ip}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个 IP...", total_count)
|
||||
|
||||
# 如果是 /32 或 /128(单个 IP),hosts() 会为空,需要特殊处理
|
||||
if total_count == 0:
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{network.network_address}\n")
|
||||
total_count = 1
|
||||
|
||||
return total_count
|
||||
|
||||
|
||||
@task(name="export_scan_targets")
|
||||
def export_scan_targets_task(
|
||||
target_id: int,
|
||||
@@ -145,62 +47,20 @@ def export_scan_targets_task(
|
||||
ValueError: Target 不存在
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
try:
|
||||
# 1. 通过 Service 层获取 Target
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
if not target:
|
||||
raise ValueError(f"Target ID {target_id} 不存在")
|
||||
|
||||
target_type = target.type
|
||||
target_name = target.name
|
||||
|
||||
logger.info(
|
||||
"开始导出扫描目标 - Target ID: %d, Name: %s, Type: %s, 输出文件: %s",
|
||||
target_id, target_name, target_type, output_file
|
||||
)
|
||||
|
||||
# 2. 确保输出目录存在
|
||||
output_path = Path(output_file)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 3. 根据类型导出
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
total_count = _export_domains(target_id, target_name, output_path, batch_size)
|
||||
type_desc = "域名"
|
||||
elif target_type == Target.TargetType.IP:
|
||||
total_count = _export_ip(target_name, output_path)
|
||||
type_desc = "IP"
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
total_count = _export_cidr(target_name, output_path)
|
||||
type_desc = "CIDR IP"
|
||||
else:
|
||||
raise ValueError(f"不支持的目标类型: {target_type}")
|
||||
|
||||
logger.info(
|
||||
"✓ 扫描目标导出完成 - 类型: %s, 总数: %d, 文件: %s (%.2f KB)",
|
||||
type_desc,
|
||||
total_count,
|
||||
str(output_path),
|
||||
output_path.stat().st_size / 1024
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_path),
|
||||
'total_count': total_count,
|
||||
'target_type': target_type
|
||||
}
|
||||
|
||||
except FileNotFoundError as e:
|
||||
logger.error("输出目录不存在: %s", e)
|
||||
raise
|
||||
except PermissionError as e:
|
||||
logger.error("文件写入权限不足: %s", e)
|
||||
raise
|
||||
except ValueError as e:
|
||||
logger.error("参数错误: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("导出扫描目标失败: %s", e)
|
||||
raise
|
||||
# 使用 TargetExportService 处理导出
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
|
||||
result = export_service.export_targets(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'success': result['success'],
|
||||
'output_file': result['output_file'],
|
||||
'total_count': result['total_count'],
|
||||
'target_type': result['target_type']
|
||||
}
|
||||
|
||||
@@ -2,52 +2,65 @@
|
||||
导出站点URL到文件的Task
|
||||
|
||||
直接使用 HostPortMapping 表查询 host+port 组合,拼接成URL格式写入文件
|
||||
使用 TargetExportService 处理默认值回退逻辑
|
||||
|
||||
默认值模式:
|
||||
- 如果没有 HostPortMapping 数据,写入默认 URL 到文件(不写入数据库)
|
||||
- DOMAIN: http(s)://target_name
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 http(s)://ip
|
||||
特殊逻辑:
|
||||
- 80 端口:只生成 HTTP URL(省略端口号)
|
||||
- 443 端口:只生成 HTTPS URL(省略端口号)
|
||||
- 其他端口:生成 HTTP 和 HTTPS 两个URL(带端口号)
|
||||
"""
|
||||
import logging
|
||||
import ipaddress
|
||||
from pathlib import Path
|
||||
from prefect import task
|
||||
from typing import Optional
|
||||
|
||||
from apps.asset.services import HostPortMappingService
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _generate_urls_from_port(host: str, port: int) -> list[str]:
|
||||
"""
|
||||
根据端口生成 URL 列表
|
||||
|
||||
- 80 端口:只生成 HTTP URL(省略端口号)
|
||||
- 443 端口:只生成 HTTPS URL(省略端口号)
|
||||
- 其他端口:生成 HTTP 和 HTTPS 两个URL(带端口号)
|
||||
"""
|
||||
if port == 80:
|
||||
return [f"http://{host}"]
|
||||
elif port == 443:
|
||||
return [f"https://{host}"]
|
||||
else:
|
||||
return [f"http://{host}:{port}", f"https://{host}:{port}"]
|
||||
|
||||
|
||||
@task(name="export_site_urls")
|
||||
def export_site_urls_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
target_name: Optional[str] = None,
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出目标下的所有站点URL到文件(基于 HostPortMapping 表)
|
||||
|
||||
功能:
|
||||
1. 从 HostPortMapping 表查询 target 下所有 host+port 组合
|
||||
2. 拼接成URL格式(标准端口80/443将省略端口号)
|
||||
3. 写入到指定文件中
|
||||
数据源: HostPortMapping (host + port)
|
||||
|
||||
默认值模式(懒加载):
|
||||
- 如果没有 HostPortMapping 数据,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://target_name
|
||||
特殊逻辑:
|
||||
- 80 端口:只生成 HTTP URL(省略端口号)
|
||||
- 443 端口:只生成 HTTPS URL(省略端口号)
|
||||
- 其他端口:生成 HTTP 和 HTTPS 两个URL(带端口号)
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
target_name: 目标名称(用于懒加载时写入默认值)
|
||||
batch_size: 每次处理的批次大小,默认1000(暂未使用,预留)
|
||||
batch_size: 每次处理的批次大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
@@ -61,155 +74,54 @@ def export_site_urls_task(
|
||||
ValueError: 参数错误
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
try:
|
||||
logger.info("开始统计站点URL - Target ID: %d, 输出文件: %s", target_id, output_file)
|
||||
|
||||
# 确保输出目录存在
|
||||
output_path = Path(output_file)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 直接查询 HostPortMapping 表,按 host 排序
|
||||
service = HostPortMappingService()
|
||||
associations = service.iter_host_port_by_target(
|
||||
target_id=target_id,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
|
||||
total_urls = 0
|
||||
association_count = 0
|
||||
|
||||
# 流式写入文件
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for assoc in associations:
|
||||
association_count += 1
|
||||
host = assoc['host']
|
||||
port = assoc['port']
|
||||
|
||||
# 根据端口号生成URL
|
||||
# 80 端口:只生成 HTTP URL(省略端口号)
|
||||
# 443 端口:只生成 HTTPS URL(省略端口号)
|
||||
# 其他端口:生成 HTTP 和 HTTPS 两个URL(带端口号)
|
||||
if port == 80:
|
||||
# HTTP 标准端口,省略端口号
|
||||
url = f"http://{host}"
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
elif port == 443:
|
||||
# HTTPS 标准端口,省略端口号
|
||||
url = f"https://{host}"
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
else:
|
||||
# 非标准端口,生成 HTTP 和 HTTPS 两个URL
|
||||
http_url = f"http://{host}:{port}"
|
||||
https_url = f"https://{host}:{port}"
|
||||
f.write(f"{http_url}\n")
|
||||
f.write(f"{https_url}\n")
|
||||
total_urls += 2
|
||||
|
||||
# 每处理1000条记录打印一次进度
|
||||
if association_count % 1000 == 0:
|
||||
logger.info("已处理 %d 条关联,生成 %d 个URL...", association_count, total_urls)
|
||||
|
||||
logger.info(
|
||||
"✓ 站点URL导出完成 - 关联数: %d, 总URL数: %d, 文件: %s (%.2f KB)",
|
||||
association_count,
|
||||
total_urls,
|
||||
str(output_path),
|
||||
output_path.stat().st_size / 1024
|
||||
)
|
||||
|
||||
# ==================== 懒加载模式:根据 Target 类型生成默认 URL ====================
|
||||
if total_urls == 0:
|
||||
total_urls = _write_default_urls(target_id, target_name, output_path)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_path),
|
||||
'total_urls': total_urls,
|
||||
'association_count': association_count
|
||||
}
|
||||
|
||||
except FileNotFoundError as e:
|
||||
logger.error("输出目录不存在: %s", e)
|
||||
raise
|
||||
except PermissionError as e:
|
||||
logger.error("文件写入权限不足: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("导出站点URL失败: %s", e)
|
||||
raise
|
||||
|
||||
|
||||
def _write_default_urls(target_id: int, target_name: Optional[str], output_path: Path) -> int:
|
||||
"""
|
||||
懒加载模式:根据 Target 类型生成默认 URL
|
||||
logger.info("开始统计站点URL - Target ID: %d, 输出文件: %s", target_id, output_file)
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
target_name: 目标名称(可选,如果为空则从数据库查询)
|
||||
output_path: 输出文件路径
|
||||
|
||||
Returns:
|
||||
int: 生成的 URL 数量
|
||||
"""
|
||||
# 获取 Target 信息
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
# 确保输出目录存在
|
||||
output_path = Path(output_file)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not target:
|
||||
logger.warning("Target ID %d 不存在,无法生成默认 URL", target_id)
|
||||
return 0
|
||||
# 初始化黑名单服务
|
||||
blacklist_service = BlacklistService()
|
||||
|
||||
target_name = target.name
|
||||
target_type = target.type
|
||||
|
||||
logger.info("懒加载模式:Target 类型=%s, 名称=%s", target_type, target_name)
|
||||
# 直接查询 HostPortMapping 表,按 host 排序
|
||||
service = HostPortMappingService()
|
||||
associations = service.iter_host_port_by_target(
|
||||
target_id=target_id,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
|
||||
total_urls = 0
|
||||
association_count = 0
|
||||
|
||||
# 流式写入文件(特殊端口逻辑)
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
# 域名类型:生成 http(s)://domain
|
||||
f.write(f"http://{target_name}\n")
|
||||
f.write(f"https://{target_name}\n")
|
||||
total_urls = 2
|
||||
logger.info("✓ 域名默认 URL 已写入: http(s)://%s", target_name)
|
||||
for assoc in associations:
|
||||
association_count += 1
|
||||
host = assoc['host']
|
||||
port = assoc['port']
|
||||
|
||||
elif target_type == Target.TargetType.IP:
|
||||
# IP 类型:生成 http(s)://ip
|
||||
f.write(f"http://{target_name}\n")
|
||||
f.write(f"https://{target_name}\n")
|
||||
total_urls = 2
|
||||
logger.info("✓ IP 默认 URL 已写入: http(s)://%s", target_name)
|
||||
# 根据端口号生成URL
|
||||
for url in _generate_urls_from_port(host, port):
|
||||
if blacklist_service.filter_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
# CIDR 类型:展开为所有 IP 的 URL
|
||||
try:
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
|
||||
for ip in network.hosts(): # 排除网络地址和广播地址
|
||||
f.write(f"http://{ip}\n")
|
||||
f.write(f"https://{ip}\n")
|
||||
total_urls += 2
|
||||
|
||||
if total_urls % 10000 == 0:
|
||||
logger.info("已生成 %d 个 URL...", total_urls)
|
||||
|
||||
# 如果是 /32 或 /128(单个 IP),hosts() 会为空
|
||||
if total_urls == 0:
|
||||
ip = str(network.network_address)
|
||||
f.write(f"http://{ip}\n")
|
||||
f.write(f"https://{ip}\n")
|
||||
total_urls = 2
|
||||
|
||||
logger.info("✓ CIDR 默认 URL 已写入: %d 个 URL (来自 %s)", total_urls, target_name)
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("CIDR 解析失败: %s - %s", target_name, e)
|
||||
return 0
|
||||
else:
|
||||
logger.warning("不支持的 Target 类型: %s", target_type)
|
||||
return 0
|
||||
if association_count % 1000 == 0:
|
||||
logger.info("已处理 %d 条关联,生成 %d 个URL...", association_count, total_urls)
|
||||
|
||||
return total_urls
|
||||
logger.info(
|
||||
"✓ 站点URL导出完成 - 关联数: %d, 总URL数: %d, 文件: %s",
|
||||
association_count, total_urls, str(output_path)
|
||||
)
|
||||
|
||||
# 默认值回退模式:使用 TargetExportService
|
||||
if total_urls == 0:
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
total_urls = export_service._generate_default_urls(target_id, output_path)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_path),
|
||||
'total_urls': total_urls,
|
||||
'association_count': association_count
|
||||
}
|
||||
|
||||
@@ -1,25 +1,16 @@
|
||||
"""
|
||||
导出站点 URL 列表任务
|
||||
|
||||
从 WebSite 表导出站点 URL 列表到文件(用于 katana 等爬虫工具)
|
||||
|
||||
使用流式写入,避免内存溢出
|
||||
|
||||
懒加载模式:
|
||||
- 如果 WebSite 表为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: 写入 http(s)://domain
|
||||
- IP: 写入 http(s)://ip
|
||||
- CIDR: 展开为所有 IP
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
数据源: WebSite.url(用于 katana 等爬虫工具)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import ipaddress
|
||||
from pathlib import Path
|
||||
from prefect import task
|
||||
from typing import Optional
|
||||
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
from apps.asset.models import WebSite
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -33,21 +24,23 @@ def export_sites_task(
|
||||
output_file: str,
|
||||
target_id: int,
|
||||
scan_id: int,
|
||||
target_name: Optional[str] = None,
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出站点 URL 列表到文件(用于 katana 等爬虫工具)
|
||||
|
||||
数据源: WebSite.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果 WebSite 表为空,根据 Target 类型生成默认 URL
|
||||
- 数据库只存储"真实发现"的资产
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
output_file: 输出文件路径
|
||||
target_id: 目标 ID
|
||||
scan_id: 扫描 ID
|
||||
target_name: 目标名称(用于懒加载时写入默认值)
|
||||
scan_id: 扫描 ID(保留参数,兼容旧调用)
|
||||
batch_size: 批次大小(内存优化)
|
||||
|
||||
Returns:
|
||||
@@ -60,109 +53,22 @@ def export_sites_task(
|
||||
ValueError: 参数错误
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
try:
|
||||
logger.info("开始导出站点 URL 列表 - Target ID: %d", target_id)
|
||||
|
||||
# 确保输出目录存在
|
||||
output_path = Path(output_file)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 从 WebSite 表导出站点 URL
|
||||
from apps.asset.services import WebSiteService
|
||||
|
||||
website_service = WebSiteService()
|
||||
|
||||
# 流式写入文件
|
||||
asset_count = 0
|
||||
with open(output_path, 'w') as f:
|
||||
for url in website_service.iter_website_urls_by_target(target_id, batch_size):
|
||||
f.write(f"{url}\n")
|
||||
asset_count += 1
|
||||
|
||||
if asset_count % batch_size == 0:
|
||||
f.flush()
|
||||
|
||||
# ==================== 懒加载模式:根据 Target 类型生成默认 URL ====================
|
||||
if asset_count == 0:
|
||||
asset_count = _write_default_urls(target_id, target_name, output_path)
|
||||
|
||||
logger.info("✓ 站点 URL 导出完成 - 文件: %s, 数量: %d", output_file, asset_count)
|
||||
|
||||
return {
|
||||
'output_file': output_file,
|
||||
'asset_count': asset_count,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error("导出站点 URL 失败: %s", e, exc_info=True)
|
||||
raise RuntimeError(f"导出站点 URL 失败: {e}") from e
|
||||
|
||||
|
||||
def _write_default_urls(target_id: int, target_name: Optional[str], output_path: Path) -> int:
|
||||
"""
|
||||
懒加载模式:根据 Target 类型生成默认 URL 列表
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
target_name: 目标名称
|
||||
output_path: 输出文件路径
|
||||
|
||||
Returns:
|
||||
int: 生成的 URL 数量
|
||||
"""
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
# 使用 TargetExportService 处理导出
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
|
||||
if not target:
|
||||
logger.warning("Target ID %d 不存在,无法生成默认 URL", target_id)
|
||||
return 0
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
target_name = target.name
|
||||
target_type = target.type
|
||||
|
||||
logger.info("懒加载模式:Target 类型=%s, 名称=%s", target_type, target_name)
|
||||
|
||||
total_urls = 0
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
f.write(f"http://{target_name}\n")
|
||||
f.write(f"https://{target_name}\n")
|
||||
total_urls = 2
|
||||
logger.info("✓ 域名默认 URL 已写入: http(s)://%s", target_name)
|
||||
|
||||
elif target_type == Target.TargetType.IP:
|
||||
f.write(f"http://{target_name}\n")
|
||||
f.write(f"https://{target_name}\n")
|
||||
total_urls = 2
|
||||
logger.info("✓ IP 默认 URL 已写入: http(s)://%s", target_name)
|
||||
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
try:
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
|
||||
for ip in network.hosts():
|
||||
f.write(f"http://{ip}\n")
|
||||
f.write(f"https://{ip}\n")
|
||||
total_urls += 2
|
||||
|
||||
if total_urls % 10000 == 0:
|
||||
logger.info("已生成 %d 个 URL...", total_urls)
|
||||
|
||||
# /32 或 /128 特殊处理
|
||||
if total_urls == 0:
|
||||
ip = str(network.network_address)
|
||||
f.write(f"http://{ip}\n")
|
||||
f.write(f"https://{ip}\n")
|
||||
total_urls = 2
|
||||
|
||||
logger.info("✓ CIDR 默认 URL 已写入: %d 个 URL (来自 %s)", total_urls, target_name)
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("CIDR 解析失败: %s - %s", target_name, e)
|
||||
return 0
|
||||
else:
|
||||
logger.warning("不支持的 Target 类型: %s", target_type)
|
||||
return 0
|
||||
|
||||
return total_urls
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'output_file': result['output_file'],
|
||||
'asset_count': result['total_count'],
|
||||
}
|
||||
|
||||
@@ -1,25 +1,16 @@
|
||||
"""导出 Endpoint URL 到文件的 Task
|
||||
|
||||
基于 EndpointService.iter_endpoint_urls_by_target 按目标流式导出端点 URL,
|
||||
用于漏洞扫描(如 Dalfox XSS)的输入文件生成。
|
||||
|
||||
默认值模式:
|
||||
- 如果没有 Endpoint,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://target_name
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 http(s)://ip
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
数据源: Endpoint.url
|
||||
"""
|
||||
|
||||
import logging
|
||||
import ipaddress
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.services import EndpointService
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
from apps.asset.models import Endpoint
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -29,17 +20,21 @@ def export_endpoints_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
batch_size: int = 1000,
|
||||
target_name: Optional[str] = None,
|
||||
) -> Dict[str, object]:
|
||||
"""导出目标下的所有 Endpoint URL 到文本文件。
|
||||
|
||||
默认值模式:如果没有 Endpoint,根据 Target 类型生成默认 URL
|
||||
数据源: Endpoint.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
batch_size: 每次从数据库迭代的批大小
|
||||
target_name: 目标名称(用于默认值模式)
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
@@ -48,117 +43,23 @@ def export_endpoints_task(
|
||||
"total_count": int,
|
||||
}
|
||||
"""
|
||||
try:
|
||||
logger.info("开始导出 Endpoint URL - Target ID: %d, 输出文件: %s", target_id, output_file)
|
||||
|
||||
output_path = Path(output_file)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
service = EndpointService()
|
||||
url_iterator = service.iter_endpoint_urls_by_target(target_id, chunk_size=batch_size)
|
||||
|
||||
total_count = 0
|
||||
with open(output_path, "w", encoding="utf-8", buffering=8192) as f:
|
||||
for url in url_iterator:
|
||||
f.write(f"{url}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个 Endpoint URL...", total_count)
|
||||
|
||||
# ==================== 懒加载模式:根据 Target 类型生成默认 URL ====================
|
||||
if total_count == 0:
|
||||
total_count = _write_default_urls(target_id, target_name, output_path)
|
||||
|
||||
logger.info(
|
||||
"✓ Endpoint URL 导出完成 - 总数: %d, 文件: %s (%.2f KB)",
|
||||
total_count,
|
||||
str(output_path),
|
||||
output_path.stat().st_size / 1024,
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"output_file": str(output_path),
|
||||
"total_count": total_count,
|
||||
}
|
||||
|
||||
except FileNotFoundError as e:
|
||||
logger.error("输出目录不存在: %s", e)
|
||||
raise
|
||||
except PermissionError as e:
|
||||
logger.error("文件写入权限不足: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("导出 Endpoint URL 失败: %s", e)
|
||||
raise
|
||||
|
||||
|
||||
def _write_default_urls(target_id: int, target_name: Optional[str], output_path: Path) -> int:
|
||||
"""
|
||||
懒加载模式:根据 Target 类型生成默认 URL
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = Endpoint.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
target_name: 目标名称(可选,如果为空则从数据库查询)
|
||||
output_path: 输出文件路径
|
||||
|
||||
Returns:
|
||||
int: 生成的 URL 数量
|
||||
"""
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
# 使用 TargetExportService 处理导出
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
|
||||
if not target:
|
||||
logger.warning("Target ID %d 不存在,无法生成默认 URL", target_id)
|
||||
return 0
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
target_name = target.name
|
||||
target_type = target.type
|
||||
|
||||
logger.info("懒加载模式:Target 类型=%s, 名称=%s", target_type, target_name)
|
||||
|
||||
total_urls = 0
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
f.write(f"http://{target_name}\n")
|
||||
f.write(f"https://{target_name}\n")
|
||||
total_urls = 2
|
||||
logger.info("✓ 域名默认 URL 已写入: http(s)://%s", target_name)
|
||||
|
||||
elif target_type == Target.TargetType.IP:
|
||||
f.write(f"http://{target_name}\n")
|
||||
f.write(f"https://{target_name}\n")
|
||||
total_urls = 2
|
||||
logger.info("✓ IP 默认 URL 已写入: http(s)://%s", target_name)
|
||||
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
try:
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
|
||||
for ip in network.hosts():
|
||||
f.write(f"http://{ip}\n")
|
||||
f.write(f"https://{ip}\n")
|
||||
total_urls += 2
|
||||
|
||||
if total_urls % 10000 == 0:
|
||||
logger.info("已生成 %d 个 URL...", total_urls)
|
||||
|
||||
# /32 或 /128 特殊处理
|
||||
if total_urls == 0:
|
||||
ip = str(network.network_address)
|
||||
f.write(f"http://{ip}\n")
|
||||
f.write(f"https://{ip}\n")
|
||||
total_urls = 2
|
||||
|
||||
logger.info("✓ CIDR 默认 URL 已写入: %d 个 URL (来自 %s)", total_urls, target_name)
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("CIDR 解析失败: %s - %s", target_name, e)
|
||||
return 0
|
||||
else:
|
||||
logger.warning("不支持的 Target 类型: %s", target_type)
|
||||
return 0
|
||||
|
||||
return total_urls
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
"success": result['success'],
|
||||
"output_file": result['output_file'],
|
||||
"total_count": result['total_count'],
|
||||
}
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
"""
|
||||
工作空间相关的 Prefect Tasks
|
||||
|
||||
负责扫描工作空间的创建、验证和管理
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from prefect import task
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(
|
||||
name="create_scan_workspace",
|
||||
description="创建并验证 Scan 工作空间目录",
|
||||
retries=2,
|
||||
retry_delay_seconds=5
|
||||
)
|
||||
def create_scan_workspace_task(scan_workspace_dir: str) -> Path:
|
||||
"""
|
||||
创建并验证 Scan 工作空间目录
|
||||
|
||||
Args:
|
||||
scan_workspace_dir: Scan 工作空间目录路径
|
||||
|
||||
Returns:
|
||||
Path: 创建的 Scan 工作空间路径对象
|
||||
|
||||
Raises:
|
||||
OSError: 目录创建失败或不可写
|
||||
"""
|
||||
scan_workspace_path = Path(scan_workspace_dir)
|
||||
|
||||
# 创建目录
|
||||
try:
|
||||
scan_workspace_path.mkdir(parents=True, exist_ok=True)
|
||||
logger.info("✓ Scan 工作空间已创建: %s", scan_workspace_path)
|
||||
except OSError as e:
|
||||
logger.error("创建 Scan 工作空间失败: %s - %s", scan_workspace_dir, e)
|
||||
raise
|
||||
|
||||
# 验证目录是否可写
|
||||
test_file = scan_workspace_path / ".test_write"
|
||||
try:
|
||||
test_file.touch()
|
||||
test_file.unlink()
|
||||
logger.info("✓ Scan 工作空间验证通过(可写): %s", scan_workspace_path)
|
||||
except OSError as e:
|
||||
error_msg = f"Scan 工作空间不可写: {scan_workspace_path}"
|
||||
logger.error(error_msg)
|
||||
raise OSError(error_msg) from e
|
||||
|
||||
return scan_workspace_path
|
||||
@@ -10,11 +10,15 @@ from .command_executor import execute_and_wait, execute_stream
|
||||
from .wordlist_helpers import ensure_wordlist_local
|
||||
from .nuclei_helpers import ensure_nuclei_templates_local
|
||||
from .performance import FlowPerformanceTracker, CommandPerformanceTracker
|
||||
from .workspace_utils import setup_scan_workspace, setup_scan_directory
|
||||
from . import config_parser
|
||||
|
||||
__all__ = [
|
||||
# 目录清理
|
||||
'remove_directory',
|
||||
# 工作空间
|
||||
'setup_scan_workspace', # 创建 Scan 根工作空间
|
||||
'setup_scan_directory', # 创建扫描子目录
|
||||
# 命令构建
|
||||
'build_scan_command', # 扫描工具命令构建(基于 f-string)
|
||||
# 命令执行
|
||||
|
||||
@@ -169,26 +169,23 @@ def parse_enabled_tools_from_dict(
|
||||
)
|
||||
|
||||
if enabled_value:
|
||||
# 检查 timeout 必需参数
|
||||
if 'timeout' not in config:
|
||||
raise ValueError(f"工具 {name} 缺少必需参数 'timeout'")
|
||||
# timeout 默认为 'auto',由具体 Flow 自动计算
|
||||
timeout_value = config.get('timeout', 'auto')
|
||||
|
||||
# 验证 timeout 值的有效性
|
||||
timeout_value = config['timeout']
|
||||
|
||||
if timeout_value == 'auto':
|
||||
# 允许 'auto',由具体 Flow 处理
|
||||
pass
|
||||
elif isinstance(timeout_value, int):
|
||||
if timeout_value <= 0:
|
||||
raise ValueError(f"工具 {name} 的 timeout 参数无效({timeout_value}),必须大于0")
|
||||
else:
|
||||
raise ValueError(
|
||||
f"工具 {name} 的 timeout 参数类型错误:期望 int 或 'auto',实际 {type(timeout_value).__name__}"
|
||||
)
|
||||
if timeout_value != 'auto':
|
||||
if isinstance(timeout_value, int):
|
||||
if timeout_value <= 0:
|
||||
raise ValueError(f"工具 {name} 的 timeout 参数无效({timeout_value}),必须大于0")
|
||||
else:
|
||||
raise ValueError(
|
||||
f"工具 {name} 的 timeout 参数类型错误:期望 int 或 'auto',实际 {type(timeout_value).__name__}"
|
||||
)
|
||||
|
||||
# 将配置 key 中划线转为下划线,统一给下游代码使用
|
||||
enabled_tools[name] = _normalize_config_keys(config)
|
||||
normalized_config = _normalize_config_keys(config)
|
||||
normalized_config['timeout'] = timeout_value # 确保 timeout 存在
|
||||
enabled_tools[name] = normalized_config
|
||||
|
||||
logger.info(f"扫描类型: {scan_type}, 启用工具: {len(enabled_tools)}/{len(tools)}")
|
||||
|
||||
|
||||
230
backend/apps/scan/utils/fingerprint_helpers.py
Normal file
230
backend/apps/scan/utils/fingerprint_helpers.py
Normal file
@@ -0,0 +1,230 @@
|
||||
"""指纹文件本地缓存工具
|
||||
|
||||
提供 Worker 侧的指纹文件缓存和版本校验功能,用于:
|
||||
- 指纹识别扫描 (fingerprint_detect_flow)
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# 指纹库映射:lib_name → ensure_func_name
|
||||
FINGERPRINT_LIB_MAP = {
|
||||
'ehole': 'ensure_ehole_fingerprint_local',
|
||||
'goby': 'ensure_goby_fingerprint_local',
|
||||
'wappalyzer': 'ensure_wappalyzer_fingerprint_local',
|
||||
}
|
||||
|
||||
|
||||
def ensure_ehole_fingerprint_local() -> str:
|
||||
"""
|
||||
确保本地存在最新的 EHole 指纹文件(带缓存)
|
||||
|
||||
流程:
|
||||
1. 获取当前指纹库版本
|
||||
2. 检查缓存文件是否存在且版本匹配
|
||||
3. 版本不匹配则重新导出
|
||||
|
||||
Returns:
|
||||
str: 本地指纹文件路径
|
||||
|
||||
使用场景:
|
||||
Worker 执行扫描任务前调用,获取最新指纹文件路径
|
||||
"""
|
||||
from apps.engine.services.fingerprints import EholeFingerprintService
|
||||
|
||||
service = EholeFingerprintService()
|
||||
current_version = service.get_fingerprint_version()
|
||||
|
||||
# 缓存目录和文件
|
||||
base_dir = getattr(settings, 'FINGERPRINTS_BASE_PATH', '/opt/xingrin/fingerprints')
|
||||
os.makedirs(base_dir, exist_ok=True)
|
||||
cache_file = os.path.join(base_dir, 'ehole.json')
|
||||
version_file = os.path.join(base_dir, 'ehole.version')
|
||||
|
||||
# 检查缓存版本
|
||||
cached_version = None
|
||||
if os.path.exists(version_file):
|
||||
try:
|
||||
with open(version_file, 'r') as f:
|
||||
cached_version = f.read().strip()
|
||||
except OSError as e:
|
||||
logger.warning("读取版本文件失败: %s", e)
|
||||
|
||||
# 版本匹配,直接返回缓存
|
||||
if cached_version == current_version and os.path.exists(cache_file):
|
||||
logger.info("EHole 指纹文件缓存有效(版本匹配): %s", cache_file)
|
||||
return cache_file
|
||||
|
||||
# 版本不匹配,重新导出
|
||||
logger.info(
|
||||
"EHole 指纹文件需要更新: cached=%s, current=%s",
|
||||
cached_version, current_version
|
||||
)
|
||||
count = service.export_to_file(cache_file)
|
||||
|
||||
# 写入版本文件
|
||||
try:
|
||||
with open(version_file, 'w') as f:
|
||||
f.write(current_version)
|
||||
except OSError as e:
|
||||
logger.warning("写入版本文件失败: %s", e)
|
||||
|
||||
logger.info("EHole 指纹文件已更新: %s", cache_file)
|
||||
return cache_file
|
||||
|
||||
|
||||
def ensure_goby_fingerprint_local() -> str:
|
||||
"""
|
||||
确保本地存在最新的 Goby 指纹文件(带缓存)
|
||||
|
||||
Returns:
|
||||
str: 本地指纹文件路径
|
||||
"""
|
||||
from apps.engine.services.fingerprints import GobyFingerprintService
|
||||
|
||||
service = GobyFingerprintService()
|
||||
current_version = service.get_fingerprint_version()
|
||||
|
||||
# 缓存目录和文件
|
||||
base_dir = getattr(settings, 'FINGERPRINTS_BASE_PATH', '/opt/xingrin/fingerprints')
|
||||
os.makedirs(base_dir, exist_ok=True)
|
||||
cache_file = os.path.join(base_dir, 'goby.json')
|
||||
version_file = os.path.join(base_dir, 'goby.version')
|
||||
|
||||
# 检查缓存版本
|
||||
cached_version = None
|
||||
if os.path.exists(version_file):
|
||||
try:
|
||||
with open(version_file, 'r') as f:
|
||||
cached_version = f.read().strip()
|
||||
except OSError as e:
|
||||
logger.warning("读取 Goby 版本文件失败: %s", e)
|
||||
|
||||
# 版本匹配,直接返回缓存
|
||||
if cached_version == current_version and os.path.exists(cache_file):
|
||||
logger.info("Goby 指纹文件缓存有效(版本匹配): %s", cache_file)
|
||||
return cache_file
|
||||
|
||||
# 版本不匹配,重新导出
|
||||
logger.info(
|
||||
"Goby 指纹文件需要更新: cached=%s, current=%s",
|
||||
cached_version, current_version
|
||||
)
|
||||
# Goby 导出格式是数组,直接写入
|
||||
data = service.get_export_data()
|
||||
with open(cache_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, ensure_ascii=False)
|
||||
|
||||
# 写入版本文件
|
||||
try:
|
||||
with open(version_file, 'w') as f:
|
||||
f.write(current_version)
|
||||
except OSError as e:
|
||||
logger.warning("写入 Goby 版本文件失败: %s", e)
|
||||
|
||||
logger.info("Goby 指纹文件已更新: %s", cache_file)
|
||||
return cache_file
|
||||
|
||||
|
||||
def ensure_wappalyzer_fingerprint_local() -> str:
|
||||
"""
|
||||
确保本地存在最新的 Wappalyzer 指纹文件(带缓存)
|
||||
|
||||
Returns:
|
||||
str: 本地指纹文件路径
|
||||
"""
|
||||
from apps.engine.services.fingerprints import WappalyzerFingerprintService
|
||||
|
||||
service = WappalyzerFingerprintService()
|
||||
current_version = service.get_fingerprint_version()
|
||||
|
||||
# 缓存目录和文件
|
||||
base_dir = getattr(settings, 'FINGERPRINTS_BASE_PATH', '/opt/xingrin/fingerprints')
|
||||
os.makedirs(base_dir, exist_ok=True)
|
||||
cache_file = os.path.join(base_dir, 'wappalyzer.json')
|
||||
version_file = os.path.join(base_dir, 'wappalyzer.version')
|
||||
|
||||
# 检查缓存版本
|
||||
cached_version = None
|
||||
if os.path.exists(version_file):
|
||||
try:
|
||||
with open(version_file, 'r') as f:
|
||||
cached_version = f.read().strip()
|
||||
except OSError as e:
|
||||
logger.warning("读取 Wappalyzer 版本文件失败: %s", e)
|
||||
|
||||
# 版本匹配,直接返回缓存
|
||||
if cached_version == current_version and os.path.exists(cache_file):
|
||||
logger.info("Wappalyzer 指纹文件缓存有效(版本匹配): %s", cache_file)
|
||||
return cache_file
|
||||
|
||||
# 版本不匹配,重新导出
|
||||
logger.info(
|
||||
"Wappalyzer 指纹文件需要更新: cached=%s, current=%s",
|
||||
cached_version, current_version
|
||||
)
|
||||
# Wappalyzer 导出格式是 {"apps": {...}}
|
||||
data = service.get_export_data()
|
||||
with open(cache_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, ensure_ascii=False)
|
||||
|
||||
# 写入版本文件
|
||||
try:
|
||||
with open(version_file, 'w') as f:
|
||||
f.write(current_version)
|
||||
except OSError as e:
|
||||
logger.warning("写入 Wappalyzer 版本文件失败: %s", e)
|
||||
|
||||
logger.info("Wappalyzer 指纹文件已更新: %s", cache_file)
|
||||
return cache_file
|
||||
|
||||
|
||||
def get_fingerprint_paths(lib_names: list) -> dict:
|
||||
"""
|
||||
获取多个指纹库的本地路径
|
||||
|
||||
Args:
|
||||
lib_names: 指纹库名称列表,如 ['ehole', 'goby']
|
||||
|
||||
Returns:
|
||||
dict: {lib_name: local_path},如 {'ehole': '/opt/xingrin/fingerprints/ehole.json'}
|
||||
|
||||
示例:
|
||||
paths = get_fingerprint_paths(['ehole'])
|
||||
# {'ehole': '/opt/xingrin/fingerprints/ehole.json'}
|
||||
"""
|
||||
paths = {}
|
||||
for lib_name in lib_names:
|
||||
if lib_name not in FINGERPRINT_LIB_MAP:
|
||||
logger.warning("不支持的指纹库: %s,跳过", lib_name)
|
||||
continue
|
||||
|
||||
ensure_func_name = FINGERPRINT_LIB_MAP[lib_name]
|
||||
# 获取当前模块中的函数
|
||||
ensure_func = globals().get(ensure_func_name)
|
||||
if ensure_func is None:
|
||||
logger.warning("指纹库 %s 的导出函数 %s 未实现,跳过", lib_name, ensure_func_name)
|
||||
continue
|
||||
|
||||
try:
|
||||
paths[lib_name] = ensure_func()
|
||||
except Exception as e:
|
||||
logger.error("获取指纹库 %s 路径失败: %s", lib_name, e)
|
||||
continue
|
||||
|
||||
return paths
|
||||
|
||||
|
||||
__all__ = [
|
||||
"ensure_ehole_fingerprint_local",
|
||||
"ensure_goby_fingerprint_local",
|
||||
"ensure_wappalyzer_fingerprint_local",
|
||||
"get_fingerprint_paths",
|
||||
"FINGERPRINT_LIB_MAP",
|
||||
]
|
||||
@@ -19,6 +19,7 @@ from typing import Optional
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from apps.common.utils.git_proxy import get_git_proxy_url
|
||||
from apps.engine.models import NucleiTemplateRepo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -49,7 +50,7 @@ def get_local_commit_hash(local_path: Path) -> Optional[str]:
|
||||
|
||||
|
||||
def git_clone(repo_url: str, local_path: Path) -> bool:
|
||||
"""Git clone 仓库
|
||||
"""Git clone 仓库(支持 Git 加速)
|
||||
|
||||
Args:
|
||||
repo_url: 仓库 URL
|
||||
@@ -58,9 +59,15 @@ def git_clone(repo_url: str, local_path: Path) -> bool:
|
||||
Returns:
|
||||
是否成功
|
||||
"""
|
||||
# Transform URL for Git acceleration if enabled
|
||||
proxied_url = get_git_proxy_url(repo_url)
|
||||
|
||||
if proxied_url != repo_url:
|
||||
logger.info("Using Git acceleration: %s -> %s", repo_url, proxied_url)
|
||||
|
||||
logger.info("正在 clone 模板仓库: %s -> %s", repo_url, local_path)
|
||||
result = subprocess.run(
|
||||
["git", "clone", "--depth", "1", repo_url, str(local_path)],
|
||||
["git", "clone", "--depth", "1", proxied_url, str(local_path)],
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
|
||||
@@ -83,7 +83,8 @@ def ensure_wordlist_local(wordlist_name: str) -> str:
|
||||
"无法确定 Django API 地址:请配置 SERVER_URL 或 PUBLIC_HOST 环境变量"
|
||||
)
|
||||
# 远程 Worker 通过 nginx HTTPS 访问,不再直连 8888
|
||||
api_base = f"https://{public_host}/api"
|
||||
public_port = getattr(settings, 'PUBLIC_PORT', '8083')
|
||||
api_base = f"https://{public_host}:{public_port}/api"
|
||||
query = urllib_parse.urlencode({'wordlist': wordlist_name})
|
||||
download_url = f"{api_base.rstrip('/')}/wordlists/download/?{query}"
|
||||
|
||||
|
||||
83
backend/apps/scan/utils/workspace_utils.py
Normal file
83
backend/apps/scan/utils/workspace_utils.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""
|
||||
工作空间工具模块
|
||||
|
||||
提供统一的扫描工作目录创建和验证功能
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_scan_workspace(scan_workspace_dir: str) -> Path:
|
||||
"""
|
||||
创建 Scan 根工作空间目录
|
||||
|
||||
Args:
|
||||
scan_workspace_dir: 工作空间目录路径
|
||||
|
||||
Returns:
|
||||
Path: 创建的目录路径
|
||||
|
||||
Raises:
|
||||
RuntimeError: 目录创建失败或不可写
|
||||
"""
|
||||
workspace_path = Path(scan_workspace_dir)
|
||||
|
||||
try:
|
||||
workspace_path.mkdir(parents=True, exist_ok=True)
|
||||
except OSError as e:
|
||||
raise RuntimeError(f"创建工作空间失败: {scan_workspace_dir} - {e}") from e
|
||||
|
||||
# 验证可写
|
||||
_verify_writable(workspace_path)
|
||||
|
||||
logger.info("✓ Scan 工作空间已创建: %s", workspace_path)
|
||||
return workspace_path
|
||||
|
||||
|
||||
def setup_scan_directory(scan_workspace_dir: str, subdir: str) -> Path:
|
||||
"""
|
||||
创建扫描子目录
|
||||
|
||||
Args:
|
||||
scan_workspace_dir: 根工作空间目录
|
||||
subdir: 子目录名称(如 'fingerprint_detect', 'site_scan')
|
||||
|
||||
Returns:
|
||||
Path: 创建的子目录路径
|
||||
|
||||
Raises:
|
||||
RuntimeError: 目录创建失败或不可写
|
||||
"""
|
||||
scan_dir = Path(scan_workspace_dir) / subdir
|
||||
|
||||
try:
|
||||
scan_dir.mkdir(parents=True, exist_ok=True)
|
||||
except OSError as e:
|
||||
raise RuntimeError(f"创建扫描目录失败: {scan_dir} - {e}") from e
|
||||
|
||||
# 验证可写
|
||||
_verify_writable(scan_dir)
|
||||
|
||||
logger.info("✓ 扫描目录已创建: %s", scan_dir)
|
||||
return scan_dir
|
||||
|
||||
|
||||
def _verify_writable(path: Path) -> None:
|
||||
"""
|
||||
验证目录可写
|
||||
|
||||
Args:
|
||||
path: 目录路径
|
||||
|
||||
Raises:
|
||||
RuntimeError: 目录不可写
|
||||
"""
|
||||
test_file = path / ".test_write"
|
||||
try:
|
||||
test_file.touch()
|
||||
test_file.unlink()
|
||||
except OSError as e:
|
||||
raise RuntimeError(f"目录不可写: {path} - {e}") from e
|
||||
@@ -275,13 +275,22 @@ LOGGING = get_logging_config(debug=DEBUG)
|
||||
# 命令执行日志开关(供 apps.scan.utils.command_executor 使用)
|
||||
ENABLE_COMMAND_LOGGING = get_bool_env('ENABLE_COMMAND_LOGGING', True)
|
||||
|
||||
# 扫描工具基础路径(后端和 Worker 统一使用该路径前缀存放三方工具等文件)
|
||||
# ==================== 数据目录配置(统一使用 /opt/xingrin) ====================
|
||||
# 所有数据目录统一挂载到 /opt/xingrin,便于管理和备份
|
||||
|
||||
# 扫描工具基础路径
|
||||
SCAN_TOOLS_BASE_PATH = os.getenv('SCAN_TOOLS_PATH', '/opt/xingrin/tools')
|
||||
|
||||
# 字典文件基础路径(后端和 Worker 统一使用该路径前缀存放字典文件)
|
||||
# 字典文件基础路径
|
||||
WORDLISTS_BASE_PATH = os.getenv('WORDLISTS_PATH', '/opt/xingrin/wordlists')
|
||||
|
||||
# Nuclei 模板基础路径(custom / public 两类模板目录)
|
||||
# 指纹库基础路径
|
||||
FINGERPRINTS_BASE_PATH = os.getenv('FINGERPRINTS_PATH', '/opt/xingrin/fingerprints')
|
||||
|
||||
# Nuclei 模板仓库根目录(存放 git clone 的仓库)
|
||||
NUCLEI_TEMPLATES_REPOS_BASE_DIR = os.getenv('NUCLEI_TEMPLATES_REPOS_DIR', '/opt/xingrin/nuclei-repos')
|
||||
|
||||
# Nuclei 模板基础路径(custom / public 两类模板目录,已废弃,保留兼容)
|
||||
NUCLEI_CUSTOM_TEMPLATES_DIR = os.getenv('NUCLEI_CUSTOM_TEMPLATES_DIR', '/opt/xingrin/nuclei-templates/custom')
|
||||
NUCLEI_PUBLIC_TEMPLATES_DIR = os.getenv('NUCLEI_PUBLIC_TEMPLATES_DIR', '/opt/xingrin/nuclei-templates/public')
|
||||
|
||||
@@ -290,6 +299,7 @@ NUCLEI_TEMPLATES_REPO_URL = os.getenv('NUCLEI_TEMPLATES_REPO_URL', 'https://gith
|
||||
|
||||
# 对外访问主机与端口(供 Worker 访问 Django 使用)
|
||||
PUBLIC_HOST = os.getenv('PUBLIC_HOST', 'localhost').strip()
|
||||
PUBLIC_PORT = os.getenv('PUBLIC_PORT', '8083').strip() # 对外 HTTPS 端口
|
||||
SERVER_PORT = os.getenv('SERVER_PORT', '8888')
|
||||
|
||||
# ============================================
|
||||
@@ -335,9 +345,11 @@ TASK_SUBMIT_INTERVAL = int(os.getenv('TASK_SUBMIT_INTERVAL', '6'))
|
||||
DOCKER_NETWORK_NAME = os.getenv('DOCKER_NETWORK_NAME', 'xingrin_network')
|
||||
|
||||
# 宿主机挂载源路径(所有节点统一使用固定路径)
|
||||
# 部署前需创建:mkdir -p /opt/xingrin/{results,logs}
|
||||
# 部署前需创建:mkdir -p /opt/xingrin
|
||||
HOST_RESULTS_DIR = '/opt/xingrin/results'
|
||||
HOST_LOGS_DIR = '/opt/xingrin/logs'
|
||||
HOST_FINGERPRINTS_DIR = '/opt/xingrin/fingerprints'
|
||||
HOST_WORDLISTS_DIR = '/opt/xingrin/wordlists'
|
||||
|
||||
# ============================================
|
||||
# Worker 配置中心(任务容器从 /api/workers/config/ 获取)
|
||||
@@ -361,6 +373,6 @@ WORKER_REDIS_URL = os.getenv(
|
||||
'redis://redis:6379/0' if _is_internal_public else f'redis://{PUBLIC_HOST}:6379/0'
|
||||
)
|
||||
|
||||
# 容器内挂载目标路径(固定值,不需要修改)
|
||||
CONTAINER_RESULTS_MOUNT = '/app/backend/results'
|
||||
CONTAINER_LOGS_MOUNT = '/app/backend/logs'
|
||||
# 容器内挂载目标路径(统一使用 /opt/xingrin)
|
||||
CONTAINER_RESULTS_MOUNT = '/opt/xingrin/results'
|
||||
CONTAINER_LOGS_MOUNT = '/opt/xingrin/logs'
|
||||
|
||||
4793
backend/fingerprints/ehole.json
Normal file
4793
backend/fingerprints/ehole.json
Normal file
File diff suppressed because it is too large
Load Diff
1
backend/fingerprints/fingerprinthub_web.json
Normal file
1
backend/fingerprints/fingerprinthub_web.json
Normal file
File diff suppressed because one or more lines are too long
1
backend/fingerprints/fingers_http.json
Normal file
1
backend/fingerprints/fingers_http.json
Normal file
File diff suppressed because one or more lines are too long
100281
backend/fingerprints/goby.json
Normal file
100281
backend/fingerprints/goby.json
Normal file
File diff suppressed because it is too large
Load Diff
50005
backend/fingerprints/wappalyzer.json
Normal file
50005
backend/fingerprints/wappalyzer.json
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -108,7 +108,9 @@ create_dirs() {
|
||||
log_info "创建数据目录..."
|
||||
sudo mkdir -p "${MARKER_DIR}/results"
|
||||
sudo mkdir -p "${MARKER_DIR}/logs"
|
||||
sudo chmod -R 755 "${MARKER_DIR}"
|
||||
sudo mkdir -p "${MARKER_DIR}/fingerprints"
|
||||
sudo mkdir -p "${MARKER_DIR}/wordlists"
|
||||
sudo chmod -R 777 "${MARKER_DIR}"
|
||||
log_success "数据目录已创建"
|
||||
}
|
||||
|
||||
|
||||
@@ -22,8 +22,10 @@ SERVER_PORT=8888
|
||||
# 供远程 Worker 访问主服务器的地址:
|
||||
# - 仅本地部署:server(Docker 内部服务名)
|
||||
# - 有远程 Worker:改为主服务器外网 IP 或域名(如 192.168.1.100 或 xingrin.example.com)
|
||||
# 注意:远程 Worker 会通过 https://{PUBLIC_HOST} 访问(nginx 反代到后端 8888)
|
||||
# 注意:远程 Worker 会通过 https://{PUBLIC_HOST}:{PUBLIC_PORT} 访问(nginx 反代到后端 8888)
|
||||
PUBLIC_HOST=server
|
||||
# 对外 HTTPS 端口
|
||||
PUBLIC_PORT=8083
|
||||
|
||||
# ==================== Django 核心配置 ====================
|
||||
# 生产环境务必更换为随机强密钥
|
||||
@@ -35,10 +37,10 @@ CORS_ALLOWED_ORIGINS=http://localhost:3000
|
||||
|
||||
# ==================== 路径配置(容器内路径) ====================
|
||||
# 扫描结果保存目录
|
||||
SCAN_RESULTS_DIR=/app/backend/results
|
||||
SCAN_RESULTS_DIR=/opt/xingrin/results
|
||||
# Django 日志目录
|
||||
# 注意:如果留空或删除此变量,日志将只输出到 Docker 控制台(标准输出),不写入文件
|
||||
LOG_DIR=/app/backend/logs
|
||||
LOG_DIR=/opt/xingrin/logs
|
||||
|
||||
# ==================== 日志级别配置 ====================
|
||||
# 应用日志级别:DEBUG / INFO / WARNING / ERROR
|
||||
|
||||
@@ -45,9 +45,8 @@ services:
|
||||
redis:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
# 统一使用固定路径(开发环境:ln -s ~/project/backend/results /opt/xingrin/results)
|
||||
- /opt/xingrin/results:/app/backend/results
|
||||
- /opt/xingrin/logs:/app/backend/logs
|
||||
# 统一挂载数据目录
|
||||
- /opt/xingrin:/opt/xingrin
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8888/api/"]
|
||||
@@ -97,8 +96,7 @@ services:
|
||||
frontend:
|
||||
condition: service_started
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "8083:8083"
|
||||
volumes:
|
||||
# SSL 证书挂载(方便更新)
|
||||
- ./nginx/ssl:/etc/nginx/ssl:ro
|
||||
@@ -111,8 +109,7 @@ services:
|
||||
image: docker-worker:${IMAGE_TAG:-latest}-dev
|
||||
restart: "no"
|
||||
volumes:
|
||||
- /opt/xingrin/results:/app/backend/results
|
||||
- /opt/xingrin/logs:/app/backend/logs
|
||||
- /opt/xingrin:/opt/xingrin
|
||||
command: echo "Worker image built for development"
|
||||
|
||||
volumes:
|
||||
|
||||
@@ -47,9 +47,8 @@ services:
|
||||
redis:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
# 统一使用固定路径(部署时需创建:mkdir -p /opt/xingrin/{results,logs})
|
||||
- /opt/xingrin/results:/app/backend/results
|
||||
- /opt/xingrin/logs:/app/backend/logs
|
||||
# 统一挂载数据目录
|
||||
- /opt/xingrin:/opt/xingrin
|
||||
# Docker Socket 挂载:允许 Django 服务器执行本地 docker 命令(用于本地 Worker 任务分发)
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
healthcheck:
|
||||
@@ -95,8 +94,7 @@ services:
|
||||
frontend:
|
||||
condition: service_started
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "8083:8083"
|
||||
volumes:
|
||||
- ./nginx/ssl:/etc/nginx/ssl:ro
|
||||
|
||||
|
||||
@@ -16,17 +16,9 @@ http {
|
||||
server frontend:3000;
|
||||
}
|
||||
|
||||
# HTTP 跳转到 HTTPS
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
# HTTPS 反代(将证书放在 /docker/nginx/ssl 下映射到 /etc/nginx/ssl)
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
listen 8083 ssl http2;
|
||||
server_name _;
|
||||
|
||||
ssl_certificate /etc/nginx/ssl/fullchain.pem;
|
||||
@@ -36,6 +28,9 @@ http {
|
||||
|
||||
client_max_body_size 50m;
|
||||
|
||||
# HTTP 请求到 HTTPS 端口时自动跳转
|
||||
error_page 497 =301 https://$host:$server_port$request_uri;
|
||||
|
||||
# 指纹特征 - 用于 FOFA/Shodan 等搜索引擎识别
|
||||
add_header X-Powered-By "Xingrin ASM" always;
|
||||
|
||||
|
||||
@@ -111,10 +111,18 @@ init_wordlists() {
|
||||
log_info "字典初始化完成"
|
||||
}
|
||||
|
||||
# 初始化指纹库
|
||||
init_fingerprints() {
|
||||
log_step "初始化指纹库..."
|
||||
docker compose exec -T server python backend/manage.py init_fingerprints
|
||||
log_info "指纹库初始化完成"
|
||||
}
|
||||
|
||||
# 初始化 Nuclei 模板仓库
|
||||
init_nuclei_templates() {
|
||||
log_step "初始化 Nuclei 模板仓库..."
|
||||
docker compose exec -T server python backend/manage.py init_nuclei_templates --sync
|
||||
# 只创建数据库记录,git clone 由 install.sh 在容器外完成(支持 Git 加速)
|
||||
docker compose exec -T server python backend/manage.py init_nuclei_templates
|
||||
log_info "Nuclei 模板仓库初始化完成"
|
||||
}
|
||||
|
||||
@@ -157,6 +165,7 @@ main() {
|
||||
|
||||
init_engine_config
|
||||
init_wordlists
|
||||
# init_fingerprints
|
||||
init_nuclei_templates
|
||||
init_admin_user
|
||||
|
||||
|
||||
@@ -21,6 +21,11 @@ echo " [1.3/3] 初始化默认目录字典..."
|
||||
python manage.py init_wordlists
|
||||
echo " ✓ 默认目录字典已就绪"
|
||||
|
||||
|
||||
echo " [1.4/3] 初始化默认指纹库..."
|
||||
# python manage.py init_fingerprints
|
||||
echo " ✓ 默认指纹库已就绪"
|
||||
|
||||
# 2. 启动 Django uvicorn 服务 (ASGI)
|
||||
# 定时任务由内置 APScheduler 处理,在 Django 启动时自动启动
|
||||
echo " [2/3] 启动 Django uvicorn (ASGI)..."
|
||||
|
||||
@@ -26,7 +26,8 @@ RUN go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest && \
|
||||
go install -v github.com/projectdiscovery/katana/cmd/katana@latest && \
|
||||
go install -v github.com/tomnomnom/assetfinder@latest && \
|
||||
go install -v github.com/ffuf/ffuf/v2@latest && \
|
||||
go install -v github.com/d3mondev/puredns/v2@latest
|
||||
go install -v github.com/d3mondev/puredns/v2@latest && \
|
||||
go install -v github.com/yyhuni/xingfinger@latest
|
||||
|
||||
# 安装 Amass v5(禁用 CGO 以跳过 libpostal 依赖)
|
||||
RUN CGO_ENABLED=0 go install -v github.com/owasp-amass/amass/v5/cmd/amass@main
|
||||
|
||||
@@ -12,11 +12,10 @@
|
||||
- **操作系统**: Ubuntu 18.04+ / Debian 10+
|
||||
- **权限**: sudo 管理员权限
|
||||
- **端口要求**: 需要开放以下端口
|
||||
- `80` - HTTP 访问(自动跳转到 HTTPS)
|
||||
- `443` - HTTPS 访问(主要访问端口)
|
||||
- `8083` - HTTPS 访问(主要访问端口)
|
||||
- `5432` - PostgreSQL 数据库(如使用本地数据库)
|
||||
- `6379` - Redis 缓存服务
|
||||
- 后端 API 仅容器内监听 8888,由 nginx 反代到 80/443,对公网无需放行 8888
|
||||
- 后端 API 仅容器内监听 8888,由 nginx 反代到 8083,对公网无需放行 8888
|
||||
|
||||
## 一键安装
|
||||
|
||||
@@ -45,7 +44,7 @@ sudo ./install.sh --no-frontend
|
||||
|
||||
### 3. 访问系统
|
||||
安装完成后,访问:
|
||||
- **Web 界面**: https://你的服务器IP/
|
||||
- **Web 界面**: https://你的服务器IP:8083/
|
||||
|
||||
**默认账号**:
|
||||
- 用户名: `admin`
|
||||
@@ -60,13 +59,10 @@ sudo ./install.sh --no-frontend
|
||||
|
||||
#### 必须放行的端口
|
||||
```
|
||||
80 - HTTP 访问
|
||||
443 - HTTPS 访问
|
||||
3000 - 前端服务(开发模式)
|
||||
8083 - HTTPS 访问(主要访问端口)
|
||||
5432 - PostgreSQL(如使用本地数据库)
|
||||
6379 - Redis 缓存
|
||||
```
|
||||
> 后端 API 默认仅在容器内 8888 监听,由 nginx 反代到 80/443,对公网无需放行 8888。
|
||||
|
||||
#### 推荐方案
|
||||
- **国外 VPS**:如 Vultr、DigitalOcean、Linode 等,默认开放所有端口,无需额外配置
|
||||
@@ -195,8 +191,7 @@ IMAGE_TAG=v1.0.0 # 镜像版本(自动设置)
|
||||
#### 1. 端口被占用
|
||||
```bash
|
||||
# 检查端口占用
|
||||
sudo netstat -tlnp | grep :80
|
||||
sudo netstat -tlnp | grep :443
|
||||
sudo netstat -tlnp | grep :8083
|
||||
|
||||
# 停止占用端口的服务
|
||||
sudo systemctl stop apache2 # 如果是 Apache
|
||||
|
||||
@@ -40,8 +40,13 @@ flowchart TB
|
||||
HTTPX1[httpx<br/>Web Service Detection]
|
||||
end
|
||||
|
||||
subgraph FINGER["Fingerprint Detect"]
|
||||
XINGFINGER[xingfinger<br/>Tech Stack Detection]
|
||||
end
|
||||
|
||||
RESOLVE --> NAABU
|
||||
NAABU --> HTTPX1
|
||||
HTTPX1 --> XINGFINGER
|
||||
end
|
||||
|
||||
TARGET --> SUBFINDER
|
||||
@@ -69,9 +74,9 @@ flowchart TB
|
||||
end
|
||||
end
|
||||
|
||||
HTTPX1 --> WAYMORE
|
||||
HTTPX1 --> KATANA
|
||||
HTTPX1 --> FFUF
|
||||
XINGFINGER --> WAYMORE
|
||||
XINGFINGER --> KATANA
|
||||
XINGFINGER --> FFUF
|
||||
|
||||
subgraph STAGE3["Stage 3: Vulnerability Sequential"]
|
||||
direction TB
|
||||
@@ -105,7 +110,7 @@ flowchart TB
|
||||
```python
|
||||
# backend/apps/scan/configs/command_templates.py
|
||||
EXECUTION_STAGES = [
|
||||
{'mode': 'sequential', 'flows': ['subdomain_discovery', 'port_scan', 'site_scan']},
|
||||
{'mode': 'sequential', 'flows': ['subdomain_discovery', 'port_scan', 'site_scan', 'fingerprint_detect']},
|
||||
{'mode': 'parallel', 'flows': ['url_fetch', 'directory_scan']},
|
||||
{'mode': 'sequential', 'flows': ['vuln_scan']},
|
||||
]
|
||||
@@ -118,6 +123,7 @@ EXECUTION_STAGES = [
|
||||
| subdomain_discovery | subfinder, amass, sublist3r, assetfinder, puredns | Subdomain |
|
||||
| port_scan | naabu | HostPortMapping |
|
||||
| site_scan | httpx | WebSite |
|
||||
| fingerprint_detect | xingfinger | WebSite.tech(更新) |
|
||||
| url_fetch | waymore, katana, uro, httpx | Endpoint |
|
||||
| directory_scan | ffuf | Directory |
|
||||
| vuln_scan | dalfox, nuclei | Vulnerability |
|
||||
|
||||
@@ -245,7 +245,7 @@ A: 更新字典内容后会重新计算 hash,Worker 下次使用时会检测
|
||||
|
||||
A: 检查:
|
||||
1. `PUBLIC_HOST` 是否配置为 Server 的外网 IP 或域名
|
||||
2. Nginx 443 (HTTPS) 是否可达(远程 Worker 通过 nginx 访问后端)
|
||||
2. Nginx 8083 (HTTPS) 是否可达(远程 Worker 通过 nginx 访问后端)
|
||||
3. Worker 到 Server 的网络是否通畅
|
||||
|
||||
### Q: 如何批量导入字典?
|
||||
|
||||
@@ -4,27 +4,27 @@ import { VulnSeverityChart } from "@/components/dashboard/vuln-severity-chart"
|
||||
import { DashboardDataTable } from "@/components/dashboard/dashboard-data-table"
|
||||
|
||||
/**
|
||||
* 仪表板页面组件
|
||||
* 这是应用的主要仪表板页面,包含卡片、图表和数据表格
|
||||
* 布局结构已移至根布局组件中
|
||||
* Dashboard page component
|
||||
* This is the main dashboard page of the application, containing cards, charts and data tables
|
||||
* Layout structure has been moved to the root layout component
|
||||
*/
|
||||
export default function Page() {
|
||||
return (
|
||||
// 内容区域,包含卡片、图表和数据表格
|
||||
// Content area containing cards, charts and data tables
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
|
||||
{/* 顶部统计卡片 */}
|
||||
{/* Top statistics cards */}
|
||||
<DashboardStatCards />
|
||||
|
||||
{/* 图表区域 - 趋势图 + 漏洞分布 */}
|
||||
{/* Chart area - Trend chart + Vulnerability distribution */}
|
||||
<div className="grid gap-4 px-4 lg:px-6 @xl/main:grid-cols-2">
|
||||
{/* 资产趋势折线图 */}
|
||||
{/* Asset trend line chart */}
|
||||
<AssetTrendChart />
|
||||
|
||||
{/* 漏洞严重程度分布 */}
|
||||
{/* Vulnerability severity distribution */}
|
||||
<VulnSeverityChart />
|
||||
</div>
|
||||
|
||||
{/* 漏洞 / 扫描历史 Tab */}
|
||||
{/* Vulnerabilities / Scan history tab */}
|
||||
<div className="px-4 lg:px-6">
|
||||
<DashboardDataTable />
|
||||
</div>
|
||||
132
frontend/app/[locale]/layout.tsx
Normal file
132
frontend/app/[locale]/layout.tsx
Normal file
@@ -0,0 +1,132 @@
|
||||
import type React from "react"
|
||||
import type { Metadata } from "next"
|
||||
import { NextIntlClientProvider } from 'next-intl'
|
||||
import { getMessages, setRequestLocale, getTranslations } from 'next-intl/server'
|
||||
import { notFound } from 'next/navigation'
|
||||
import { locales, localeHtmlLang, type Locale } from '@/i18n/config'
|
||||
|
||||
// Import global style files
|
||||
import "../globals.css"
|
||||
// Import Noto Sans SC local font
|
||||
import "@fontsource/noto-sans-sc/400.css"
|
||||
import "@fontsource/noto-sans-sc/500.css"
|
||||
import "@fontsource/noto-sans-sc/700.css"
|
||||
// Import color themes
|
||||
import "@/styles/themes/bubblegum.css"
|
||||
import "@/styles/themes/quantum-rose.css"
|
||||
import "@/styles/themes/clean-slate.css"
|
||||
import "@/styles/themes/cosmic-night.css"
|
||||
import "@/styles/themes/vercel.css"
|
||||
import "@/styles/themes/vercel-dark.css"
|
||||
import "@/styles/themes/violet-bloom.css"
|
||||
import "@/styles/themes/cyberpunk-1.css"
|
||||
import { Suspense } from "react"
|
||||
import Script from "next/script"
|
||||
import { QueryProvider } from "@/components/providers/query-provider"
|
||||
import { ThemeProvider } from "@/components/providers/theme-provider"
|
||||
|
||||
// Import common layout components
|
||||
import { RoutePrefetch } from "@/components/route-prefetch"
|
||||
import { RouteProgress } from "@/components/route-progress"
|
||||
import { AuthLayout } from "@/components/auth/auth-layout"
|
||||
|
||||
// Dynamically generate metadata
|
||||
export async function generateMetadata({ params }: { params: Promise<{ locale: string }> }): Promise<Metadata> {
|
||||
const { locale } = await params
|
||||
const t = await getTranslations({ locale, namespace: 'metadata' })
|
||||
|
||||
return {
|
||||
title: t('title'),
|
||||
description: t('description'),
|
||||
keywords: t('keywords').split(',').map(k => k.trim()),
|
||||
generator: "Xingrin ASM Platform",
|
||||
authors: [{ name: "yyhuni" }],
|
||||
openGraph: {
|
||||
title: t('ogTitle'),
|
||||
description: t('ogDescription'),
|
||||
type: "website",
|
||||
locale: locale === 'zh' ? 'zh_CN' : 'en_US',
|
||||
},
|
||||
robots: {
|
||||
index: true,
|
||||
follow: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Use Noto Sans SC + system font fallback, fully loaded locally
|
||||
const fontConfig = {
|
||||
className: "font-sans",
|
||||
style: {
|
||||
fontFamily: "'Noto Sans SC', system-ui, -apple-system, PingFang SC, Hiragino Sans GB, Microsoft YaHei, sans-serif"
|
||||
}
|
||||
}
|
||||
|
||||
// Generate static parameters, support all languages
|
||||
export function generateStaticParams() {
|
||||
return locales.map((locale) => ({ locale }))
|
||||
}
|
||||
|
||||
interface Props {
|
||||
children: React.ReactNode
|
||||
params: Promise<{ locale: string }>
|
||||
}
|
||||
|
||||
/**
|
||||
* Language layout component
|
||||
* Wraps all pages, provides internationalization context
|
||||
*/
|
||||
export default async function LocaleLayout({
|
||||
children,
|
||||
params,
|
||||
}: Props) {
|
||||
const { locale } = await params
|
||||
|
||||
// Validate locale validity
|
||||
if (!locales.includes(locale as Locale)) {
|
||||
notFound()
|
||||
}
|
||||
|
||||
// Enable static rendering
|
||||
setRequestLocale(locale)
|
||||
|
||||
// Load translation messages
|
||||
const messages = await getMessages()
|
||||
|
||||
return (
|
||||
<html lang={localeHtmlLang[locale as Locale]} suppressHydrationWarning>
|
||||
<body className={fontConfig.className} style={fontConfig.style}>
|
||||
{/* Load external scripts */}
|
||||
<Script
|
||||
src="https://tweakcn.com/live-preview.min.js"
|
||||
strategy="beforeInteractive"
|
||||
crossOrigin="anonymous"
|
||||
/>
|
||||
{/* Route loading progress bar */}
|
||||
<Suspense fallback={null}>
|
||||
<RouteProgress />
|
||||
</Suspense>
|
||||
{/* ThemeProvider provides theme switching functionality */}
|
||||
<ThemeProvider
|
||||
attribute="class"
|
||||
defaultTheme="dark"
|
||||
enableSystem
|
||||
disableTransitionOnChange
|
||||
>
|
||||
{/* NextIntlClientProvider provides internationalization context */}
|
||||
<NextIntlClientProvider messages={messages}>
|
||||
{/* QueryProvider provides React Query functionality */}
|
||||
<QueryProvider>
|
||||
{/* Route prefetch */}
|
||||
<RoutePrefetch />
|
||||
{/* AuthLayout handles authentication and sidebar display */}
|
||||
<AuthLayout>
|
||||
{children}
|
||||
</AuthLayout>
|
||||
</QueryProvider>
|
||||
</NextIntlClientProvider>
|
||||
</ThemeProvider>
|
||||
</body>
|
||||
</html>
|
||||
)
|
||||
}
|
||||
28
frontend/app/[locale]/login/layout.tsx
Normal file
28
frontend/app/[locale]/login/layout.tsx
Normal file
@@ -0,0 +1,28 @@
|
||||
import type { Metadata } from "next"
|
||||
import { getTranslations } from "next-intl/server"
|
||||
|
||||
type Props = {
|
||||
params: Promise<{ locale: string }>
|
||||
}
|
||||
|
||||
export async function generateMetadata({ params }: Props): Promise<Metadata> {
|
||||
const { locale } = await params
|
||||
const t = await getTranslations({ locale, namespace: "auth" })
|
||||
|
||||
return {
|
||||
title: t("pageTitle"),
|
||||
description: t("pageDescription"),
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Login page layout
|
||||
* Does not include sidebar and header
|
||||
*/
|
||||
export default function LoginLayout({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode
|
||||
}) {
|
||||
return children
|
||||
}
|
||||
127
frontend/app/[locale]/login/page.tsx
Normal file
127
frontend/app/[locale]/login/page.tsx
Normal file
@@ -0,0 +1,127 @@
|
||||
"use client"
|
||||
|
||||
import React from "react"
|
||||
import { useRouter } from "next/navigation"
|
||||
import { useTranslations } from "next-intl"
|
||||
import Lottie from "lottie-react"
|
||||
import securityAnimation from "@/public/animations/Security000-Purple.json"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { Card, CardContent } from "@/components/ui/card"
|
||||
import {
|
||||
Field,
|
||||
FieldGroup,
|
||||
FieldLabel,
|
||||
} from "@/components/ui/field"
|
||||
import { Spinner } from "@/components/ui/spinner"
|
||||
import { useLogin, useAuth } from "@/hooks/use-auth"
|
||||
import { useRoutePrefetch } from "@/hooks/use-route-prefetch"
|
||||
|
||||
export default function LoginPage() {
|
||||
// Preload all page components on login page
|
||||
useRoutePrefetch()
|
||||
const router = useRouter()
|
||||
const { data: auth, isLoading: authLoading } = useAuth()
|
||||
const { mutate: login, isPending } = useLogin()
|
||||
const t = useTranslations("auth")
|
||||
|
||||
const [username, setUsername] = React.useState("")
|
||||
const [password, setPassword] = React.useState("")
|
||||
|
||||
// If already logged in, redirect to dashboard
|
||||
React.useEffect(() => {
|
||||
if (auth?.authenticated) {
|
||||
router.push("/dashboard/")
|
||||
}
|
||||
}, [auth, router])
|
||||
|
||||
const handleSubmit = (e: React.FormEvent) => {
|
||||
e.preventDefault()
|
||||
login({ username, password })
|
||||
}
|
||||
|
||||
// Show spinner while loading
|
||||
if (authLoading) {
|
||||
return (
|
||||
<div className="flex min-h-svh w-full flex-col items-center justify-center gap-4 bg-background">
|
||||
<Spinner className="size-8 text-primary" />
|
||||
<p className="text-muted-foreground text-sm" suppressHydrationWarning>loading...</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Don't show login page if already logged in
|
||||
if (auth?.authenticated) {
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="login-bg flex min-h-svh flex-col p-6 md:p-10">
|
||||
{/* Main content area */}
|
||||
<div className="flex-1 flex items-center justify-center">
|
||||
<div className="w-full max-w-sm md:max-w-4xl">
|
||||
<Card className="overflow-hidden p-0">
|
||||
<CardContent className="grid p-0 md:grid-cols-2">
|
||||
<form className="p-6 md:p-8" onSubmit={handleSubmit}>
|
||||
<FieldGroup>
|
||||
{/* Fingerprint identifier - for FOFA/Shodan and other search engines to identify */}
|
||||
<meta name="generator" content="Xingrin ASM Platform" />
|
||||
<div className="flex flex-col items-center gap-2 text-center">
|
||||
<h1 className="text-2xl font-bold">{t("title")}</h1>
|
||||
<p className="text-sm text-muted-foreground mt-1">
|
||||
{t("subtitle")}
|
||||
</p>
|
||||
</div>
|
||||
<Field>
|
||||
<FieldLabel htmlFor="username">{t("username")}</FieldLabel>
|
||||
<Input
|
||||
id="username"
|
||||
type="text"
|
||||
placeholder={t("usernamePlaceholder")}
|
||||
value={username}
|
||||
onChange={(e) => setUsername(e.target.value)}
|
||||
required
|
||||
autoFocus
|
||||
/>
|
||||
</Field>
|
||||
<Field>
|
||||
<FieldLabel htmlFor="password">{t("password")}</FieldLabel>
|
||||
<Input
|
||||
id="password"
|
||||
type="password"
|
||||
placeholder={t("passwordPlaceholder")}
|
||||
value={password}
|
||||
onChange={(e) => setPassword(e.target.value)}
|
||||
required
|
||||
/>
|
||||
</Field>
|
||||
<Field>
|
||||
<Button type="submit" className="w-full" disabled={isPending}>
|
||||
{isPending ? t("loggingIn") : t("login")}
|
||||
</Button>
|
||||
</Field>
|
||||
</FieldGroup>
|
||||
</form>
|
||||
<div className="bg-primary/5 relative hidden md:flex md:items-center md:justify-center">
|
||||
<div className="text-center p-4">
|
||||
<Lottie
|
||||
animationData={securityAnimation}
|
||||
loop={true}
|
||||
className="w-96 h-96 mx-auto"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Version number - fixed at the bottom of the page */}
|
||||
<div className="flex-shrink-0 text-center py-4">
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{process.env.NEXT_PUBLIC_VERSION || 'dev'}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -4,8 +4,8 @@ import React from "react"
|
||||
import { OrganizationDetailView } from "@/components/organization/organization-detail-view"
|
||||
|
||||
/**
|
||||
* 组织详情页面
|
||||
* 显示组织的统计信息和资产列表
|
||||
* Organization detail page
|
||||
* Displays organization statistics and asset list
|
||||
*/
|
||||
export default function OrganizationDetailPage({
|
||||
params,
|
||||
@@ -1,30 +1,35 @@
|
||||
// 导入组织管理组件
|
||||
"use client"
|
||||
|
||||
// Import organization management component
|
||||
import { OrganizationList } from "@/components/organization/organization-list"
|
||||
// 导入图标
|
||||
// Import icons
|
||||
import { Building2 } from "lucide-react"
|
||||
import { useTranslations } from "next-intl"
|
||||
|
||||
/**
|
||||
* 组织管理页面
|
||||
* 资产管理下的组织管理子页面,显示组织列表和相关操作
|
||||
* Organization management page
|
||||
* Sub-page under asset management that displays organization list and related operations
|
||||
*/
|
||||
export default function OrganizationPage() {
|
||||
const t = useTranslations("pages.organization")
|
||||
|
||||
return (
|
||||
// 内容区域,包含组织管理功能
|
||||
// Content area containing organization management features
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
|
||||
{/* 页面头部 */}
|
||||
{/* Page header */}
|
||||
<div className="flex items-center justify-between px-4 lg:px-6">
|
||||
<div>
|
||||
<h2 className="text-2xl font-bold tracking-tight flex items-center gap-2">
|
||||
<Building2 />
|
||||
组织
|
||||
{t("title")}
|
||||
</h2>
|
||||
<p className="text-muted-foreground">
|
||||
管理和查看系统中的所有组织信息
|
||||
{t("description")}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 组织列表组件 */}
|
||||
{/* Organization list component */}
|
||||
<div className="px-4 lg:px-6">
|
||||
<OrganizationList />
|
||||
</div>
|
||||
7
frontend/app/[locale]/page.tsx
Normal file
7
frontend/app/[locale]/page.tsx
Normal file
@@ -0,0 +1,7 @@
|
||||
import { redirect } from 'next/navigation';
|
||||
import { defaultLocale } from '@/i18n/config';
|
||||
|
||||
export default function Home() {
|
||||
// Redirect directly to dashboard page (with language prefix)
|
||||
redirect(`/${defaultLocale}/dashboard/`);
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import React, { useState, useMemo } from "react"
|
||||
import { Settings, Search, Pencil, Trash2, Check, X, Plus } from "lucide-react"
|
||||
import * as yaml from "js-yaml"
|
||||
import Editor from "@monaco-editor/react"
|
||||
import { useTranslations } from "next-intl"
|
||||
import { useColorTheme } from "@/hooks/use-color-theme"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
@@ -26,24 +27,26 @@ import { cn } from "@/lib/utils"
|
||||
import type { ScanEngine } from "@/types/engine.types"
|
||||
import { MasterDetailSkeleton } from "@/components/ui/master-detail-skeleton"
|
||||
|
||||
/** 功能配置项定义 - 与 YAML 配置结构对应 */
|
||||
/** Feature configuration item definition - corresponds to YAML configuration structure */
|
||||
const FEATURE_LIST = [
|
||||
{ key: "subdomain_discovery", label: "子域名发现" },
|
||||
{ key: "port_scan", label: "端口扫描" },
|
||||
{ key: "site_scan", label: "站点扫描" },
|
||||
{ key: "directory_scan", label: "目录扫描" },
|
||||
{ key: "url_fetch", label: "URL 抓取" },
|
||||
{ key: "vuln_scan", label: "漏洞扫描" },
|
||||
{ key: "subdomain_discovery" },
|
||||
{ key: "port_scan" },
|
||||
{ key: "site_scan" },
|
||||
{ key: "fingerprint_detect" },
|
||||
{ key: "directory_scan" },
|
||||
{ key: "url_fetch" },
|
||||
{ key: "vuln_scan" },
|
||||
] as const
|
||||
|
||||
type FeatureKey = typeof FEATURE_LIST[number]["key"]
|
||||
|
||||
/** 解析引擎配置获取启用的功能 */
|
||||
/** Parse engine configuration to get enabled features */
|
||||
function parseEngineFeatures(engine: ScanEngine): Record<FeatureKey, boolean> {
|
||||
const defaultFeatures: Record<FeatureKey, boolean> = {
|
||||
subdomain_discovery: false,
|
||||
port_scan: false,
|
||||
site_scan: false,
|
||||
fingerprint_detect: false,
|
||||
directory_scan: false,
|
||||
url_fetch: false,
|
||||
vuln_scan: false,
|
||||
@@ -59,6 +62,7 @@ function parseEngineFeatures(engine: ScanEngine): Record<FeatureKey, boolean> {
|
||||
subdomain_discovery: !!config.subdomain_discovery,
|
||||
port_scan: !!config.port_scan,
|
||||
site_scan: !!config.site_scan,
|
||||
fingerprint_detect: !!config.fingerprint_detect,
|
||||
directory_scan: !!config.directory_scan,
|
||||
url_fetch: !!config.url_fetch,
|
||||
vuln_scan: !!config.vuln_scan,
|
||||
@@ -68,14 +72,14 @@ function parseEngineFeatures(engine: ScanEngine): Record<FeatureKey, boolean> {
|
||||
}
|
||||
}
|
||||
|
||||
/** 计算启用的功能数量 */
|
||||
/** Calculate the number of enabled features */
|
||||
function countEnabledFeatures(engine: ScanEngine) {
|
||||
const features = parseEngineFeatures(engine)
|
||||
return Object.values(features).filter(Boolean).length
|
||||
}
|
||||
|
||||
/**
|
||||
* 扫描引擎页面
|
||||
* Scan engine page
|
||||
*/
|
||||
export default function ScanEnginePage() {
|
||||
const [selectedId, setSelectedId] = useState<number | null>(null)
|
||||
@@ -87,6 +91,12 @@ export default function ScanEnginePage() {
|
||||
const [engineToDelete, setEngineToDelete] = useState<ScanEngine | null>(null)
|
||||
|
||||
const { currentTheme } = useColorTheme()
|
||||
|
||||
// Internationalization
|
||||
const tCommon = useTranslations("common")
|
||||
const tConfirm = useTranslations("common.confirm")
|
||||
const tNav = useTranslations("nav")
|
||||
const tEngine = useTranslations("scan.engine")
|
||||
|
||||
// API Hooks
|
||||
const { data: engines = [], isLoading } = useEngines()
|
||||
@@ -94,20 +104,20 @@ export default function ScanEnginePage() {
|
||||
const updateEngineMutation = useUpdateEngine()
|
||||
const deleteEngineMutation = useDeleteEngine()
|
||||
|
||||
// 过滤引擎列表
|
||||
// Filter engine list
|
||||
const filteredEngines = useMemo(() => {
|
||||
if (!searchQuery.trim()) return engines
|
||||
const query = searchQuery.toLowerCase()
|
||||
return engines.filter((e) => e.name.toLowerCase().includes(query))
|
||||
}, [engines, searchQuery])
|
||||
|
||||
// 选中的引擎
|
||||
// Selected engine
|
||||
const selectedEngine = useMemo(() => {
|
||||
if (!selectedId) return null
|
||||
return engines.find((e) => e.id === selectedId) || null
|
||||
}, [selectedId, engines])
|
||||
|
||||
// 选中引擎的功能状态
|
||||
// Selected engine's feature status
|
||||
const selectedFeatures = useMemo(() => {
|
||||
if (!selectedEngine) return null
|
||||
return parseEngineFeatures(selectedEngine)
|
||||
@@ -150,21 +160,21 @@ export default function ScanEnginePage() {
|
||||
})
|
||||
}
|
||||
|
||||
// 加载状态
|
||||
// Loading state
|
||||
if (isLoading) {
|
||||
return <MasterDetailSkeleton title="扫描引擎" listItemCount={4} />
|
||||
return <MasterDetailSkeleton title={tNav("scanEngine")} listItemCount={4} />
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col h-full">
|
||||
{/* 顶部:标题 + 搜索 + 新建按钮 */}
|
||||
{/* Top: Title + Search + Create button */}
|
||||
<div className="flex items-center justify-between gap-4 px-4 py-4 lg:px-6">
|
||||
<h1 className="text-2xl font-bold shrink-0">扫描引擎</h1>
|
||||
<h1 className="text-2xl font-bold shrink-0">{tNav("scanEngine")}</h1>
|
||||
<div className="flex items-center gap-2 flex-1 max-w-md">
|
||||
<div className="relative flex-1">
|
||||
<Search className="absolute left-2.5 top-1/2 h-4 w-4 -translate-y-1/2 text-muted-foreground" />
|
||||
<Input
|
||||
placeholder="搜索引擎..."
|
||||
placeholder={tEngine("searchPlaceholder")}
|
||||
value={searchQuery}
|
||||
onChange={(e) => setSearchQuery(e.target.value)}
|
||||
className="pl-8"
|
||||
@@ -173,27 +183,27 @@ export default function ScanEnginePage() {
|
||||
</div>
|
||||
<Button onClick={() => setIsCreateDialogOpen(true)}>
|
||||
<Plus className="h-4 w-4 mr-1" />
|
||||
新建引擎
|
||||
{tEngine("createEngine")}
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<Separator />
|
||||
|
||||
{/* 主体:左侧列表 + 右侧详情 */}
|
||||
{/* Main: Left list + Right details */}
|
||||
<div className="flex flex-1 min-h-0">
|
||||
{/* 左侧:引擎列表 */}
|
||||
{/* Left: Engine list */}
|
||||
<div className="w-72 lg:w-80 border-r flex flex-col">
|
||||
<div className="px-4 py-3 border-b">
|
||||
<h2 className="text-sm font-medium text-muted-foreground">
|
||||
引擎列表 ({filteredEngines.length})
|
||||
{tEngine("engineList")} ({filteredEngines.length})
|
||||
</h2>
|
||||
</div>
|
||||
<ScrollArea className="flex-1">
|
||||
{isLoading ? (
|
||||
<div className="p-4 text-sm text-muted-foreground">加载中...</div>
|
||||
<div className="p-4 text-sm text-muted-foreground">{tCommon("loading")}</div>
|
||||
) : filteredEngines.length === 0 ? (
|
||||
<div className="p-4 text-sm text-muted-foreground">
|
||||
{searchQuery ? "未找到匹配的引擎" : "暂无引擎,请先新建"}
|
||||
{searchQuery ? tEngine("noMatchingEngine") : tEngine("noEngines")}
|
||||
</div>
|
||||
) : (
|
||||
<div className="p-2">
|
||||
@@ -212,7 +222,7 @@ export default function ScanEnginePage() {
|
||||
{engine.name}
|
||||
</div>
|
||||
<div className="text-xs text-muted-foreground mt-0.5">
|
||||
{countEnabledFeatures(engine)} 个功能已启用
|
||||
{tEngine("featuresEnabled", { count: countEnabledFeatures(engine) })}
|
||||
</div>
|
||||
</button>
|
||||
))}
|
||||
@@ -221,11 +231,11 @@ export default function ScanEnginePage() {
|
||||
</ScrollArea>
|
||||
</div>
|
||||
|
||||
{/* 右侧:引擎详情 */}
|
||||
{/* Right: Engine details */}
|
||||
<div className="flex-1 flex flex-col min-w-0">
|
||||
{selectedEngine && selectedFeatures ? (
|
||||
<>
|
||||
{/* 详情头部 */}
|
||||
{/* Details header */}
|
||||
<div className="px-6 py-4 border-b">
|
||||
<div className="flex items-start gap-3">
|
||||
<div className="flex h-10 w-10 items-center justify-center rounded-lg bg-primary/10 shrink-0">
|
||||
@@ -236,20 +246,20 @@ export default function ScanEnginePage() {
|
||||
{selectedEngine.name}
|
||||
</h2>
|
||||
<p className="text-sm text-muted-foreground mt-0.5">
|
||||
更新于 {new Date(selectedEngine.updatedAt).toLocaleString("zh-CN")}
|
||||
{tEngine("updatedAt")} {new Date(selectedEngine.updatedAt).toLocaleString()}
|
||||
</p>
|
||||
</div>
|
||||
<Badge variant="outline">
|
||||
{countEnabledFeatures(selectedEngine)} 个功能
|
||||
{tEngine("featuresCount", { count: countEnabledFeatures(selectedEngine) })}
|
||||
</Badge>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 详情内容 */}
|
||||
{/* Details content */}
|
||||
<div className="flex-1 flex flex-col min-h-0 p-6 gap-6">
|
||||
{/* 功能状态 */}
|
||||
{/* Feature status */}
|
||||
<div className="shrink-0">
|
||||
<h3 className="text-sm font-medium mb-3">已启用功能</h3>
|
||||
<h3 className="text-sm font-medium mb-3">{tEngine("enabledFeatures")}</h3>
|
||||
<div className="rounded-lg border">
|
||||
<div className="grid grid-cols-3 gap-px bg-muted">
|
||||
{FEATURE_LIST.map((feature) => {
|
||||
@@ -267,7 +277,7 @@ export default function ScanEnginePage() {
|
||||
) : (
|
||||
<X className="h-4 w-4 text-muted-foreground/50 shrink-0" />
|
||||
)}
|
||||
<span className="text-sm truncate">{feature.label}</span>
|
||||
<span className="text-sm truncate">{tEngine(`features.${feature.key}`)}</span>
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
@@ -275,10 +285,10 @@ export default function ScanEnginePage() {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 配置预览 */}
|
||||
{/* Configuration preview */}
|
||||
{selectedEngine.configuration && (
|
||||
<div className="flex-1 flex flex-col min-h-0">
|
||||
<h3 className="text-sm font-medium mb-3 shrink-0">配置预览</h3>
|
||||
<h3 className="text-sm font-medium mb-3 shrink-0">{tEngine("configPreview")}</h3>
|
||||
<div className="flex-1 rounded-lg border overflow-hidden min-h-0">
|
||||
<Editor
|
||||
height="100%"
|
||||
@@ -302,7 +312,7 @@ export default function ScanEnginePage() {
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* 操作按钮 */}
|
||||
{/* Action buttons */}
|
||||
<div className="px-6 py-4 border-t flex items-center gap-2">
|
||||
<Button
|
||||
variant="outline"
|
||||
@@ -310,7 +320,7 @@ export default function ScanEnginePage() {
|
||||
onClick={() => handleEdit(selectedEngine)}
|
||||
>
|
||||
<Pencil className="h-4 w-4 mr-1.5" />
|
||||
编辑配置
|
||||
{tEngine("editConfig")}
|
||||
</Button>
|
||||
<div className="flex-1" />
|
||||
<Button
|
||||
@@ -321,23 +331,23 @@ export default function ScanEnginePage() {
|
||||
disabled={deleteEngineMutation.isPending}
|
||||
>
|
||||
<Trash2 className="h-4 w-4 mr-1.5" />
|
||||
删除
|
||||
{tCommon("actions.delete")}
|
||||
</Button>
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
// 未选中状态
|
||||
// Unselected state
|
||||
<div className="flex-1 flex items-center justify-center">
|
||||
<div className="text-center text-muted-foreground">
|
||||
<Settings className="h-12 w-12 mx-auto mb-3 opacity-50" />
|
||||
<p className="text-sm">选择左侧引擎查看详情</p>
|
||||
<p className="text-sm">{tEngine("selectEngineHint")}</p>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 编辑引擎弹窗 */}
|
||||
{/* Edit engine dialog */}
|
||||
<EngineEditDialog
|
||||
engine={editingEngine}
|
||||
open={isEditDialogOpen}
|
||||
@@ -345,30 +355,30 @@ export default function ScanEnginePage() {
|
||||
onSave={handleSaveYaml}
|
||||
/>
|
||||
|
||||
{/* 新建引擎弹窗 */}
|
||||
{/* Create engine dialog */}
|
||||
<EngineCreateDialog
|
||||
open={isCreateDialogOpen}
|
||||
onOpenChange={setIsCreateDialogOpen}
|
||||
onSave={handleCreateEngine}
|
||||
/>
|
||||
|
||||
{/* 删除确认弹窗 */}
|
||||
{/* Delete confirmation dialog */}
|
||||
<AlertDialog open={deleteDialogOpen} onOpenChange={setDeleteDialogOpen}>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader>
|
||||
<AlertDialogTitle>确认删除</AlertDialogTitle>
|
||||
<AlertDialogTitle>{tConfirm("deleteTitle")}</AlertDialogTitle>
|
||||
<AlertDialogDescription>
|
||||
确定要删除引擎「{engineToDelete?.name}」吗?此操作无法撤销。
|
||||
{tConfirm("deleteEngineMessage", { name: engineToDelete?.name ?? "" })}
|
||||
</AlertDialogDescription>
|
||||
</AlertDialogHeader>
|
||||
<AlertDialogFooter>
|
||||
<AlertDialogCancel>取消</AlertDialogCancel>
|
||||
<AlertDialogCancel>{tCommon("actions.cancel")}</AlertDialogCancel>
|
||||
<AlertDialogAction
|
||||
onClick={confirmDelete}
|
||||
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
|
||||
disabled={deleteEngineMutation.isPending}
|
||||
>
|
||||
{deleteEngineMutation.isPending ? "删除中..." : "删除"}
|
||||
{deleteEngineMutation.isPending ? tConfirm("deleting") : tCommon("actions.delete")}
|
||||
</AlertDialogAction>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
@@ -7,6 +7,7 @@ import { Target } from "lucide-react"
|
||||
import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import { useScan } from "@/hooks/use-scans"
|
||||
import { useTranslations } from "next-intl"
|
||||
|
||||
export default function ScanHistoryLayout({
|
||||
children,
|
||||
@@ -16,6 +17,7 @@ export default function ScanHistoryLayout({
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const pathname = usePathname()
|
||||
const { data: scanData, isLoading } = useScan(parseInt(id))
|
||||
const t = useTranslations("scan.history")
|
||||
|
||||
const getActiveTab = () => {
|
||||
if (pathname.includes("/subdomain")) return "subdomain"
|
||||
@@ -37,7 +39,7 @@ export default function ScanHistoryLayout({
|
||||
"ip-addresses": `${basePath}/ip-addresses/`,
|
||||
}
|
||||
|
||||
// 从扫描数据中获取各个tab的数量
|
||||
// Get counts for each tab from scan data
|
||||
const counts = {
|
||||
subdomain: scanData?.summary?.subdomains || 0,
|
||||
endpoints: scanData?.summary?.endpoints || 0,
|
||||
@@ -55,7 +57,7 @@ export default function ScanHistoryLayout({
|
||||
<Target />
|
||||
Scan Results
|
||||
</h2>
|
||||
<p className="text-muted-foreground">扫描任务 ID:{id}</p>
|
||||
<p className="text-muted-foreground">{t("taskId", { id })}</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user