mirror of
https://github.com/yyhuni/xingrin.git
synced 2026-01-31 11:46:16 +08:00
Compare commits
137 Commits
v1.3.16-de
...
001-websoc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b859fc9062 | ||
|
|
49b5fbef28 | ||
|
|
11112a68f6 | ||
|
|
9049b096ba | ||
|
|
ca6c0eb082 | ||
|
|
64bcd9a6f5 | ||
|
|
443e2172e4 | ||
|
|
c6dcfb0a5b | ||
|
|
25ae325c69 | ||
|
|
cab83d89cf | ||
|
|
0f8fff2dc4 | ||
|
|
6e48b97dc2 | ||
|
|
ed757d6e14 | ||
|
|
2aa1afbabf | ||
|
|
35ac64db57 | ||
|
|
b4bfab92e3 | ||
|
|
72210c42d0 | ||
|
|
91aaf7997f | ||
|
|
32e3179d58 | ||
|
|
487f7c84b5 | ||
|
|
b2cc83f569 | ||
|
|
f854cf09be | ||
|
|
7e1c2c187a | ||
|
|
4abb259ca0 | ||
|
|
bbef6af000 | ||
|
|
ba0864ed16 | ||
|
|
f54827829a | ||
|
|
170021130c | ||
|
|
b540f69152 | ||
|
|
d7f1e04855 | ||
|
|
68ad18e6da | ||
|
|
a7542d4a34 | ||
|
|
6f02d9f3c5 | ||
|
|
794846ca7a | ||
|
|
5eea7b2621 | ||
|
|
069527a7f1 | ||
|
|
e542633ad3 | ||
|
|
e8a9606d3b | ||
|
|
dc2e1e027d | ||
|
|
b1847faa3a | ||
|
|
e699842492 | ||
|
|
08a4807bef | ||
|
|
191ff9837b | ||
|
|
679dff9037 | ||
|
|
ce4330b628 | ||
|
|
4ce6b148f8 | ||
|
|
a89f775ee9 | ||
|
|
e3003f33f9 | ||
|
|
3760684b64 | ||
|
|
bfd7e11d09 | ||
|
|
f758feb0d0 | ||
|
|
8798eed337 | ||
|
|
bd1e25cfd5 | ||
|
|
d775055572 | ||
|
|
00dfad60b8 | ||
|
|
a5c48fe4d4 | ||
|
|
85c880731c | ||
|
|
c6b6507412 | ||
|
|
af457dc44c | ||
|
|
9e01a6aa5e | ||
|
|
ed80772e6f | ||
|
|
a22af21dcb | ||
|
|
8de950a7a5 | ||
|
|
9db84221e9 | ||
|
|
0728f3c01d | ||
|
|
4aa7b3d68a | ||
|
|
3946a53337 | ||
|
|
c94fe1ec4b | ||
|
|
6dea525527 | ||
|
|
5b0416972a | ||
|
|
5345a34cbd | ||
|
|
3ca56abc3e | ||
|
|
9703add22d | ||
|
|
f5a489e2d6 | ||
|
|
d75a3f6882 | ||
|
|
59e48e5b15 | ||
|
|
2d2ec93626 | ||
|
|
ced9f811f4 | ||
|
|
aa99b26f50 | ||
|
|
8342f196db | ||
|
|
1bd2a6ed88 | ||
|
|
033ff89aee | ||
|
|
4284a0cd9a | ||
|
|
943a4cb960 | ||
|
|
eb2d853b76 | ||
|
|
1184c18b74 | ||
|
|
8a6f1b6f24 | ||
|
|
255d505aba | ||
|
|
d06a9bab1f | ||
|
|
6d5c776bf7 | ||
|
|
bf058dd67b | ||
|
|
0532d7c8b8 | ||
|
|
2ee9b5ffa2 | ||
|
|
648a1888d4 | ||
|
|
2508268a45 | ||
|
|
c60383940c | ||
|
|
47298c294a | ||
|
|
eba394e14e | ||
|
|
592a1958c4 | ||
|
|
38e2856c08 | ||
|
|
f5ad8e68e9 | ||
|
|
d5f91a236c | ||
|
|
24ae8b5aeb | ||
|
|
86f43f94a0 | ||
|
|
53ba03d1e5 | ||
|
|
89c44ebd05 | ||
|
|
e0e3419edb | ||
|
|
52ee4684a7 | ||
|
|
ce8cebf11d | ||
|
|
ec006d8f54 | ||
|
|
48976a570f | ||
|
|
5da7229873 | ||
|
|
8bb737a9fa | ||
|
|
2d018d33f3 | ||
|
|
0c07cc8497 | ||
|
|
225b039985 | ||
|
|
d1624627bc | ||
|
|
7bb15e4ae4 | ||
|
|
8e8cc29669 | ||
|
|
d6d5338acb | ||
|
|
c521bdb511 | ||
|
|
abf2d95f6f | ||
|
|
ab58cf0d85 | ||
|
|
fb0111adf2 | ||
|
|
161ee9a2b1 | ||
|
|
0cf75585d5 | ||
|
|
1d8d5f51d9 | ||
|
|
3f8de07c8c | ||
|
|
cd5c2b9f11 | ||
|
|
54786c22dd | ||
|
|
d468f975ab | ||
|
|
a85a12b8ad | ||
|
|
a8b0d97b7b | ||
|
|
b8504921c2 | ||
|
|
ecfc1822fb | ||
|
|
81633642e6 | ||
|
|
6ff86e14ec |
45
.github/workflows/check-generated-files.yml
vendored
Normal file
45
.github/workflows/check-generated-files.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Check Generated Files
|
||||
|
||||
on:
|
||||
workflow_call: # 只在被其他 workflow 调用时运行
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21
|
||||
|
||||
- name: Generate files for all workflows
|
||||
working-directory: worker
|
||||
run: make generate
|
||||
|
||||
- name: Check for differences
|
||||
run: |
|
||||
if ! git diff --exit-code; then
|
||||
echo "❌ Generated files are out of date!"
|
||||
echo "Please run: cd worker && make generate"
|
||||
echo ""
|
||||
echo "Changed files:"
|
||||
git status --porcelain
|
||||
echo ""
|
||||
echo "Diff:"
|
||||
git diff
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Generated files are up to date"
|
||||
|
||||
- name: Run metadata consistency tests
|
||||
working-directory: worker
|
||||
run: make test-metadata
|
||||
|
||||
- name: Run all tests
|
||||
working-directory: worker
|
||||
run: make test
|
||||
159
.gitignore
vendored
159
.gitignore
vendored
@@ -1,136 +1,51 @@
|
||||
# ============================
|
||||
# 操作系统相关文件
|
||||
# ============================
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
# Go
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
*.test
|
||||
*.out
|
||||
vendor/
|
||||
go.work
|
||||
|
||||
# ============================
|
||||
# 前端 (Next.js/Node.js) 相关
|
||||
# ============================
|
||||
# 依赖目录
|
||||
front-back/node_modules/
|
||||
front-back/.pnpm-store/
|
||||
# Build artifacts
|
||||
dist/
|
||||
build/
|
||||
bin/
|
||||
|
||||
# Next.js 构建产物
|
||||
front-back/.next/
|
||||
front-back/out/
|
||||
front-back/dist/
|
||||
|
||||
# 环境变量文件
|
||||
front-back/.env
|
||||
front-back/.env.local
|
||||
front-back/.env.development.local
|
||||
front-back/.env.test.local
|
||||
front-back/.env.production.local
|
||||
|
||||
# 运行时和缓存
|
||||
front-back/.turbo/
|
||||
front-back/.swc/
|
||||
front-back/.eslintcache
|
||||
front-back/.tsbuildinfo
|
||||
|
||||
# ============================
|
||||
# 后端 (Python/Django) 相关
|
||||
# ============================
|
||||
# Python 虚拟环境
|
||||
.venv/
|
||||
venv/
|
||||
env/
|
||||
ENV/
|
||||
|
||||
# Python 编译文件
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pyd
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# Django 相关
|
||||
backend/db.sqlite3
|
||||
backend/db.sqlite3-journal
|
||||
backend/media/
|
||||
backend/staticfiles/
|
||||
backend/.env
|
||||
backend/.env.local
|
||||
|
||||
# Python 测试和覆盖率
|
||||
.pytest_cache/
|
||||
.coverage
|
||||
htmlcov/
|
||||
*.cover
|
||||
|
||||
# ============================
|
||||
# 后端 (Go) 相关
|
||||
# ============================
|
||||
# 编译产物
|
||||
backend/bin/
|
||||
backend/dist/
|
||||
backend/*.exe
|
||||
backend/*.exe~
|
||||
backend/*.dll
|
||||
backend/*.so
|
||||
backend/*.dylib
|
||||
|
||||
# 测试相关
|
||||
backend/*.test
|
||||
backend/*.out
|
||||
backend/*.prof
|
||||
|
||||
# Go workspace 文件
|
||||
backend/go.work
|
||||
backend/go.work.sum
|
||||
|
||||
# Go 依赖管理
|
||||
backend/vendor/
|
||||
|
||||
# ============================
|
||||
# IDE 和编辑器相关
|
||||
# ============================
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
.cursor/
|
||||
.claude/
|
||||
.kiro/
|
||||
.playwright-mcp/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
.DS_Store
|
||||
|
||||
# ============================
|
||||
# Docker 相关
|
||||
# ============================
|
||||
docker/.env
|
||||
docker/.env.local
|
||||
|
||||
# SSL 证书和私钥(不应提交)
|
||||
docker/nginx/ssl/*.pem
|
||||
docker/nginx/ssl/*.key
|
||||
docker/nginx/ssl/*.crt
|
||||
|
||||
# ============================
|
||||
# 日志文件和扫描结果
|
||||
# ============================
|
||||
# Environment
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
*.log
|
||||
logs/
|
||||
results/
|
||||
.venv/
|
||||
|
||||
# 开发脚本运行时文件(进程 ID 和启动日志)
|
||||
backend/scripts/dev/.pids/
|
||||
# Testing
|
||||
coverage.txt
|
||||
*.coverprofile
|
||||
.hypothesis/
|
||||
|
||||
# ============================
|
||||
# 临时文件
|
||||
# ============================
|
||||
# Temporary files
|
||||
*.tmp
|
||||
tmp/
|
||||
temp/
|
||||
.cache/
|
||||
|
||||
HGETALL
|
||||
KEYS
|
||||
vuln_scan/input_endpoints.txt
|
||||
open-in-v0
|
||||
.kiro/
|
||||
.claude/
|
||||
.specify/
|
||||
|
||||
# AI Assistant directories
|
||||
codex/
|
||||
openspec/
|
||||
specs/
|
||||
AGENTS.md
|
||||
WARP.md
|
||||
|
||||
4
.vscode/settings.json
vendored
Normal file
4
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"typescript.autoClosingTags": false,
|
||||
"kiroAgent.configureMCP": "Enabled"
|
||||
}
|
||||
303
README.md
303
README.md
@@ -1,303 +0,0 @@
|
||||
<h1 align="center">XingRin - 星环</h1>
|
||||
|
||||
<p align="center">
|
||||
<b>🛡️ 攻击面管理平台 (ASM) | 自动化资产发现与漏洞扫描系统</b>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/yyhuni/xingrin/stargazers"><img src="https://img.shields.io/github/stars/yyhuni/xingrin?style=flat-square&logo=github" alt="GitHub stars"></a>
|
||||
<a href="https://github.com/yyhuni/xingrin/network/members"><img src="https://img.shields.io/github/forks/yyhuni/xingrin?style=flat-square&logo=github" alt="GitHub forks"></a>
|
||||
<a href="https://github.com/yyhuni/xingrin/issues"><img src="https://img.shields.io/github/issues/yyhuni/xingrin?style=flat-square&logo=github" alt="GitHub issues"></a>
|
||||
<a href="https://github.com/yyhuni/xingrin/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-PolyForm%20NC-blue?style=flat-square" alt="License"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="#-功能特性">功能特性</a> •
|
||||
<a href="#-全局资产搜索">资产搜索</a> •
|
||||
<a href="#-快速开始">快速开始</a> •
|
||||
<a href="#-文档">文档</a> •
|
||||
<a href="#-反馈与贡献">反馈与贡献</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<sub>🔍 关键词: ASM | 攻击面管理 | 漏洞扫描 | 资产发现 | 资产搜索 | Bug Bounty | 渗透测试 | Nuclei | 子域名枚举 | EASM</sub>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
## 🌐 在线 Demo
|
||||
|
||||
👉 **[https://xingrin.vercel.app/](https://xingrin.vercel.app/)**
|
||||
|
||||
> ⚠️ 仅用于 UI 展示,未接入后端数据库
|
||||
|
||||
---
|
||||
|
||||
<p align="center">
|
||||
<b>🎨 现代化 UI </b>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="docs/screenshots/light.png" alt="Light Mode" width="24%">
|
||||
<img src="docs/screenshots/bubblegum.png" alt="Bubblegum" width="24%">
|
||||
<img src="docs/screenshots/cosmic-night.png" alt="Cosmic Night" width="24%">
|
||||
<img src="docs/screenshots/quantum-rose.png" alt="Quantum Rose" width="24%">
|
||||
</p>
|
||||
|
||||
## 📚 文档
|
||||
|
||||
- [📖 技术文档](./docs/README.md) - 技术文档导航(🚧 持续完善中)
|
||||
- [🚀 快速开始](./docs/quick-start.md) - 一键安装和部署指南
|
||||
- [🔄 版本管理](./docs/version-management.md) - Git Tag 驱动的自动化版本管理系统
|
||||
- [📦 Nuclei 模板架构](./docs/nuclei-template-architecture.md) - 模板仓库的存储与同步
|
||||
- [📖 字典文件架构](./docs/wordlist-architecture.md) - 字典文件的存储与同步
|
||||
- [🔍 扫描流程架构](./docs/scan-flow-architecture.md) - 完整扫描流程与工具编排
|
||||
|
||||
|
||||
---
|
||||
|
||||
## ✨ 功能特性
|
||||
|
||||
### 🎯 目标与资产管理
|
||||
- **组织管理** - 多层级目标组织,灵活分组
|
||||
- **目标管理** - 支持域名、IP目标类型
|
||||
- **资产发现** - 子域名、网站、端点、目录自动发现
|
||||
- **资产快照** - 扫描结果快照对比,追踪资产变化
|
||||
|
||||
### 🔍 漏洞扫描
|
||||
- **多引擎支持** - 集成 Nuclei 等主流扫描引擎
|
||||
- **自定义流程** - YAML 配置扫描流程,灵活编排
|
||||
- **定时扫描** - Cron 表达式配置,自动化周期扫描
|
||||
|
||||
### 🔖 指纹识别
|
||||
- **多源指纹库** - 内置 EHole、Goby、Wappalyzer、Fingers、FingerPrintHub、ARL 等 2.7W+ 指纹规则
|
||||
- **自动识别** - 扫描流程自动执行,识别 Web 应用技术栈
|
||||
- **指纹管理** - 支持查询、导入、导出指纹规则
|
||||
|
||||
#### 扫描流程架构
|
||||
|
||||
完整的扫描流程包括:子域名发现、端口扫描、站点发现、指纹识别、URL 收集、目录扫描、漏洞扫描等阶段
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
START["开始扫描"]
|
||||
|
||||
subgraph STAGE1["阶段 1: 资产发现"]
|
||||
direction TB
|
||||
SUB["子域名发现<br/>subfinder, amass, puredns"]
|
||||
PORT["端口扫描<br/>naabu"]
|
||||
SITE["站点识别<br/>httpx"]
|
||||
FINGER["指纹识别<br/>xingfinger"]
|
||||
SUB --> PORT --> SITE --> FINGER
|
||||
end
|
||||
|
||||
subgraph STAGE2["阶段 2: 深度分析"]
|
||||
direction TB
|
||||
URL["URL 收集<br/>waymore, katana"]
|
||||
DIR["目录扫描<br/>ffuf"]
|
||||
end
|
||||
|
||||
subgraph STAGE3["阶段 3: 漏洞检测"]
|
||||
VULN["漏洞扫描<br/>nuclei, dalfox"]
|
||||
end
|
||||
|
||||
FINISH["扫描完成"]
|
||||
|
||||
START --> STAGE1
|
||||
FINGER --> STAGE2
|
||||
STAGE2 --> STAGE3
|
||||
STAGE3 --> FINISH
|
||||
|
||||
style START fill:#34495e,stroke:#2c3e50,stroke-width:2px,color:#fff
|
||||
style FINISH fill:#27ae60,stroke:#229954,stroke-width:2px,color:#fff
|
||||
style STAGE1 fill:#3498db,stroke:#2980b9,stroke-width:2px,color:#fff
|
||||
style STAGE2 fill:#9b59b6,stroke:#8e44ad,stroke-width:2px,color:#fff
|
||||
style STAGE3 fill:#e67e22,stroke:#d35400,stroke-width:2px,color:#fff
|
||||
style SUB fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style PORT fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style SITE fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style FINGER fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style URL fill:#bb8fce,stroke:#9b59b6,stroke-width:1px,color:#fff
|
||||
style DIR fill:#bb8fce,stroke:#9b59b6,stroke-width:1px,color:#fff
|
||||
style VULN fill:#f0b27a,stroke:#e67e22,stroke-width:1px,color:#fff
|
||||
```
|
||||
|
||||
详细说明请查看 [扫描流程架构文档](./docs/scan-flow-architecture.md)
|
||||
|
||||
### 🖥️ 分布式架构
|
||||
- **多节点扫描** - 支持部署多个 Worker 节点,横向扩展扫描能力
|
||||
- **本地节点** - 零配置,安装即自动注册本地 Docker Worker
|
||||
- **远程节点** - SSH 一键部署远程 VPS 作为扫描节点
|
||||
- **负载感知调度** - 实时感知节点负载,自动分发任务到最优节点
|
||||
- **节点监控** - 实时心跳检测,CPU/内存/磁盘状态监控
|
||||
- **断线重连** - 节点离线自动检测,恢复后自动重新接入
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph MASTER["主服务器 (Master Server)"]
|
||||
direction TB
|
||||
|
||||
REDIS["Redis 负载缓存"]
|
||||
|
||||
subgraph SCHEDULER["任务调度器 (Task Distributor)"]
|
||||
direction TB
|
||||
SUBMIT["接收扫描任务"]
|
||||
SELECT["负载感知选择"]
|
||||
DISPATCH["智能分发"]
|
||||
|
||||
SUBMIT --> SELECT
|
||||
SELECT --> DISPATCH
|
||||
end
|
||||
|
||||
REDIS -.负载数据.-> SELECT
|
||||
end
|
||||
|
||||
subgraph WORKERS["Worker 节点集群"]
|
||||
direction TB
|
||||
|
||||
W1["Worker 1 (本地)<br/>CPU: 45% | MEM: 60%"]
|
||||
W2["Worker 2 (远程)<br/>CPU: 30% | MEM: 40%"]
|
||||
W3["Worker N (远程)<br/>CPU: 90% | MEM: 85%"]
|
||||
end
|
||||
|
||||
DISPATCH -->|任务分发| W1
|
||||
DISPATCH -->|任务分发| W2
|
||||
DISPATCH -->|高负载跳过| W3
|
||||
|
||||
W1 -.心跳上报.-> REDIS
|
||||
W2 -.心跳上报.-> REDIS
|
||||
W3 -.心跳上报.-> REDIS
|
||||
```
|
||||
|
||||
### 🔎 全局资产搜索
|
||||
- **多类型搜索** - 支持 Website 和 Endpoint 两种资产类型
|
||||
- **表达式语法** - 支持 `=`(模糊)、`==`(精确)、`!=`(不等于)操作符
|
||||
- **逻辑组合** - 支持 `&&` (AND) 和 `||` (OR) 逻辑组合
|
||||
- **多字段查询** - 支持 host、url、title、tech、status、body、header 字段
|
||||
- **CSV 导出** - 流式导出全部搜索结果,无数量限制
|
||||
|
||||
#### 搜索语法示例
|
||||
|
||||
```bash
|
||||
# 基础搜索
|
||||
host="api" # host 包含 "api"
|
||||
status=="200" # 状态码精确等于 200
|
||||
tech="nginx" # 技术栈包含 nginx
|
||||
|
||||
# 组合搜索
|
||||
host="api" && status=="200" # host 包含 api 且状态码为 200
|
||||
tech="vue" || tech="react" # 技术栈包含 vue 或 react
|
||||
|
||||
# 复杂查询
|
||||
host="admin" && tech="php" && status=="200"
|
||||
url="/api/v1" && status!="404"
|
||||
```
|
||||
|
||||
### 📊 可视化界面
|
||||
- **数据统计** - 资产/漏洞统计仪表盘
|
||||
- **实时通知** - WebSocket 消息推送
|
||||
- **通知推送** - 实时企业微信,tg,discard消息推送服务
|
||||
|
||||
---
|
||||
|
||||
## 📦 快速开始
|
||||
|
||||
### 环境要求
|
||||
|
||||
- **操作系统**: Ubuntu 20.04+ / Debian 11+
|
||||
- **系统架构**: AMD64 (x86_64) / ARM64 (aarch64)
|
||||
- **硬件**: 2核 4G 内存起步,20GB+ 磁盘空间
|
||||
|
||||
### 一键安装
|
||||
|
||||
```bash
|
||||
# 克隆项目
|
||||
git clone https://github.com/yyhuni/xingrin.git
|
||||
cd xingrin
|
||||
|
||||
# 安装并启动(生产模式)
|
||||
sudo ./install.sh
|
||||
|
||||
# 🇨🇳 中国大陆用户推荐使用镜像加速(第三方加速服务可能会失效,不保证长期可用)
|
||||
sudo ./install.sh --mirror
|
||||
```
|
||||
|
||||
> **💡 --mirror 参数说明**
|
||||
> - 自动配置 Docker 镜像加速(国内镜像源)
|
||||
> - 加速 Git 仓库克隆(Nuclei 模板等)
|
||||
> - 大幅提升安装速度,避免网络超时
|
||||
|
||||
### 访问服务
|
||||
|
||||
- **Web 界面**: `https://ip:8083`
|
||||
- **默认账号**: admin / admin(首次登录后请修改密码)
|
||||
|
||||
### 常用命令
|
||||
|
||||
```bash
|
||||
# 启动服务
|
||||
sudo ./start.sh
|
||||
|
||||
# 停止服务
|
||||
sudo ./stop.sh
|
||||
|
||||
# 重启服务
|
||||
sudo ./restart.sh
|
||||
|
||||
# 卸载
|
||||
sudo ./uninstall.sh
|
||||
```
|
||||
|
||||
## 🤝 反馈与贡献
|
||||
|
||||
- 💡 **发现 Bug,有新想法,比如UI设计,功能设计等** 欢迎点击右边链接进行提交建议 [Issue](https://github.com/yyhuni/xingrin/issues) 或者公众号私信
|
||||
|
||||
## 📧 联系
|
||||
- 微信公众号: **塔罗安全学苑**
|
||||
- 微信群去公众号底下的菜单,有个交流群,点击就可以看到了,链接过期可以私信我拉你
|
||||
|
||||
<img src="docs/wechat-qrcode.png" alt="微信公众号" width="200">
|
||||
|
||||
|
||||
## ⚠️ 免责声明
|
||||
|
||||
**重要:请在使用前仔细阅读**
|
||||
|
||||
1. 本工具仅供**授权的安全测试**和**安全研究**使用
|
||||
2. 使用者必须确保已获得目标系统的**合法授权**
|
||||
3. **严禁**将本工具用于未经授权的渗透测试或攻击行为
|
||||
4. 未经授权扫描他人系统属于**违法行为**,可能面临法律责任
|
||||
5. 开发者**不对任何滥用行为负责**
|
||||
|
||||
使用本工具即表示您同意:
|
||||
- 仅在合法授权范围内使用
|
||||
- 遵守所在地区的法律法规
|
||||
- 承担因滥用产生的一切后果
|
||||
|
||||
## 🌟 Star History
|
||||
|
||||
如果这个项目对你有帮助,请给一个 ⭐ Star 支持一下!
|
||||
|
||||
[](https://star-history.com/#yyhuni/xingrin&Date)
|
||||
|
||||
## 📄 许可证
|
||||
|
||||
本项目采用 [GNU General Public License v3.0](LICENSE) 许可证。
|
||||
|
||||
### 允许的用途
|
||||
|
||||
- ✅ 个人学习和研究
|
||||
- ✅ 商业和非商业使用
|
||||
- ✅ 修改和分发
|
||||
- ✅ 专利使用
|
||||
- ✅ 私人使用
|
||||
|
||||
### 义务和限制
|
||||
|
||||
- 📋 **开源义务**:分发时必须提供源代码
|
||||
- 📋 **相同许可**:衍生作品必须使用相同许可证
|
||||
- 📋 **版权声明**:必须保留原始版权和许可证声明
|
||||
- ❌ **责任免除**:不提供任何担保
|
||||
- ❌ 未经授权的渗透测试
|
||||
- ❌ 任何违法行为
|
||||
|
||||
32
agent/go.mod
Normal file
32
agent/go.mod
Normal file
@@ -0,0 +1,32 @@
|
||||
module github.com/yyhuni/orbit/agent
|
||||
|
||||
go 1.24.5
|
||||
|
||||
require (
|
||||
github.com/docker/docker v28.5.2+incompatible
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/shirou/gopsutil/v3 v3.24.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect
|
||||
go.opentelemetry.io/otel v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.39.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
)
|
||||
78
agent/go.sum
Normal file
78
agent/go.sum
Normal file
@@ -0,0 +1,78 @@
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
|
||||
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
|
||||
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
|
||||
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
|
||||
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
|
||||
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
|
||||
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
|
||||
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
|
||||
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
|
||||
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
@@ -1,710 +0,0 @@
|
||||
"""
|
||||
目录扫描 Flow
|
||||
|
||||
负责编排目录扫描的完整流程
|
||||
|
||||
架构:
|
||||
- Flow 负责编排多个原子 Task
|
||||
- 支持并发执行扫描工具(使用 ThreadPoolTaskRunner)
|
||||
- 每个 Task 可独立重试
|
||||
- 配置由 YAML 解析
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
from prefect import flow
|
||||
from prefect.task_runners import ThreadPoolTaskRunner
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
from apps.scan.tasks.directory_scan import (
|
||||
export_sites_task,
|
||||
run_and_stream_save_directories_task
|
||||
)
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.utils import config_parser, build_scan_command, ensure_wordlist_local, user_log
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 默认最大并发数
|
||||
DEFAULT_MAX_WORKERS = 5
|
||||
|
||||
|
||||
def calculate_directory_scan_timeout(
|
||||
tool_config: dict,
|
||||
base_per_word: float = 1.0,
|
||||
min_timeout: int = 60,
|
||||
max_timeout: int = 7200
|
||||
) -> int:
|
||||
"""
|
||||
根据字典行数计算目录扫描超时时间
|
||||
|
||||
计算公式:超时时间 = 字典行数 × 每个单词基础时间
|
||||
超时范围:60秒 ~ 2小时(7200秒)
|
||||
|
||||
Args:
|
||||
tool_config: 工具配置字典,包含 wordlist 路径
|
||||
base_per_word: 每个单词的基础时间(秒),默认 1.0秒
|
||||
min_timeout: 最小超时时间(秒),默认 60秒
|
||||
max_timeout: 最大超时时间(秒),默认 7200秒(2小时)
|
||||
|
||||
Returns:
|
||||
int: 计算出的超时时间(秒),范围:60 ~ 7200
|
||||
|
||||
Example:
|
||||
# 1000行字典 × 1.0秒 = 1000秒 → 限制为7200秒中的 1000秒
|
||||
# 10000行字典 × 1.0秒 = 10000秒 → 限制为7200秒(最大值)
|
||||
timeout = calculate_directory_scan_timeout(
|
||||
tool_config={'wordlist': '/path/to/wordlist.txt'}
|
||||
)
|
||||
"""
|
||||
try:
|
||||
# 从 tool_config 中获取 wordlist 路径
|
||||
wordlist_path = tool_config.get('wordlist')
|
||||
if not wordlist_path:
|
||||
logger.warning("工具配置中未指定 wordlist,使用默认超时: %d秒", min_timeout)
|
||||
return min_timeout
|
||||
|
||||
# 展开用户目录(~)
|
||||
wordlist_path = os.path.expanduser(wordlist_path)
|
||||
|
||||
# 检查文件是否存在
|
||||
if not os.path.exists(wordlist_path):
|
||||
logger.warning("字典文件不存在: %s,使用默认超时: %d秒", wordlist_path, min_timeout)
|
||||
return min_timeout
|
||||
|
||||
# 使用 wc -l 快速统计字典行数
|
||||
result = subprocess.run(
|
||||
['wc', '-l', wordlist_path],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
# wc -l 输出格式:行数 + 空格 + 文件名
|
||||
line_count = int(result.stdout.strip().split()[0])
|
||||
|
||||
# 计算超时时间
|
||||
timeout = int(line_count * base_per_word)
|
||||
|
||||
# 设置合理的下限(不再设置上限)
|
||||
timeout = max(min_timeout, timeout)
|
||||
|
||||
logger.info(
|
||||
"目录扫描超时计算 - 字典: %s, 行数: %d, 基础时间: %.3f秒/词, 计算超时: %d秒",
|
||||
wordlist_path, line_count, base_per_word, timeout
|
||||
)
|
||||
|
||||
return timeout
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error("统计字典行数失败: %s", e)
|
||||
# 失败时返回默认超时
|
||||
return min_timeout
|
||||
except (ValueError, IndexError) as e:
|
||||
logger.error("解析字典行数失败: %s", e)
|
||||
return min_timeout
|
||||
except Exception as e:
|
||||
logger.error("计算超时时间异常: %s", e)
|
||||
return min_timeout
|
||||
|
||||
|
||||
def _get_max_workers(tool_config: dict, default: int = DEFAULT_MAX_WORKERS) -> int:
|
||||
"""
|
||||
从单个工具配置中获取 max_workers 参数
|
||||
|
||||
Args:
|
||||
tool_config: 单个工具的配置字典,如 {'max_workers': 10, 'threads': 5, ...}
|
||||
default: 默认值,默认为 5
|
||||
|
||||
Returns:
|
||||
int: max_workers 值
|
||||
"""
|
||||
if not isinstance(tool_config, dict):
|
||||
return default
|
||||
|
||||
# 支持 max_workers 和 max-workers(YAML 中划线会被转换)
|
||||
max_workers = tool_config.get('max_workers') or tool_config.get('max-workers')
|
||||
if max_workers is not None and isinstance(max_workers, int) and max_workers > 0:
|
||||
return max_workers
|
||||
return default
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _export_site_urls(target_id: int, target_name: str, directory_scan_dir: Path) -> tuple[str, int]:
|
||||
"""
|
||||
导出目标下的所有站点 URL 到文件(支持懒加载)
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
target_name: 目标名称(用于懒加载创建默认站点)
|
||||
directory_scan_dir: 目录扫描目录
|
||||
|
||||
Returns:
|
||||
tuple: (sites_file, site_count)
|
||||
|
||||
Raises:
|
||||
ValueError: 站点数量为 0
|
||||
"""
|
||||
logger.info("Step 1: 导出目标的所有站点 URL")
|
||||
|
||||
sites_file = str(directory_scan_dir / 'sites.txt')
|
||||
export_result = export_sites_task(
|
||||
target_id=target_id,
|
||||
output_file=sites_file,
|
||||
batch_size=1000 # 每次读取 1000 条,优化内存占用
|
||||
)
|
||||
|
||||
site_count = export_result['total_count']
|
||||
|
||||
logger.info(
|
||||
"✓ 站点 URL 导出完成 - 文件: %s, 数量: %d",
|
||||
export_result['output_file'],
|
||||
site_count
|
||||
)
|
||||
|
||||
if site_count == 0:
|
||||
logger.warning("目标下没有站点,无法执行目录扫描")
|
||||
# 不抛出异常,由上层决定如何处理
|
||||
# raise ValueError("目标下没有站点,无法执行目录扫描")
|
||||
|
||||
return export_result['output_file'], site_count
|
||||
|
||||
|
||||
def _run_scans_sequentially(
|
||||
enabled_tools: dict,
|
||||
sites_file: str,
|
||||
directory_scan_dir: Path,
|
||||
scan_id: int,
|
||||
target_id: int,
|
||||
site_count: int,
|
||||
target_name: str
|
||||
) -> tuple[int, int, list]:
|
||||
"""
|
||||
串行执行目录扫描任务(支持多工具)- 已废弃,保留用于兼容
|
||||
|
||||
Args:
|
||||
enabled_tools: 启用的工具配置字典
|
||||
sites_file: 站点文件路径
|
||||
directory_scan_dir: 目录扫描目录
|
||||
scan_id: 扫描任务 ID
|
||||
target_id: 目标 ID
|
||||
site_count: 站点数量
|
||||
target_name: 目标名称(用于错误日志)
|
||||
|
||||
Returns:
|
||||
tuple: (total_directories, processed_sites, failed_sites)
|
||||
"""
|
||||
# 读取站点列表
|
||||
sites = []
|
||||
with open(sites_file, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
site_url = line.strip()
|
||||
if site_url:
|
||||
sites.append(site_url)
|
||||
|
||||
logger.info("准备扫描 %d 个站点,使用工具: %s", len(sites), ', '.join(enabled_tools.keys()))
|
||||
|
||||
total_directories = 0
|
||||
processed_sites_set = set() # 使用 set 避免重复计数
|
||||
failed_sites = []
|
||||
|
||||
# 遍历每个工具
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
logger.info("="*60)
|
||||
logger.info("使用工具: %s", tool_name)
|
||||
logger.info("="*60)
|
||||
|
||||
# 如果配置了 wordlist_name,则先确保本地存在对应的字典文件(含 hash 校验)
|
||||
wordlist_name = tool_config.get('wordlist_name')
|
||||
if wordlist_name:
|
||||
try:
|
||||
local_wordlist_path = ensure_wordlist_local(wordlist_name)
|
||||
tool_config['wordlist'] = local_wordlist_path
|
||||
except Exception as exc:
|
||||
logger.error("为工具 %s 准备字典失败: %s", tool_name, exc)
|
||||
# 当前工具无法执行,将所有站点视为失败,继续下一个工具
|
||||
failed_sites.extend(sites)
|
||||
continue
|
||||
|
||||
# 逐个站点执行扫描
|
||||
for idx, site_url in enumerate(sites, 1):
|
||||
logger.info(
|
||||
"[%d/%d] 开始扫描站点: %s (工具: %s)",
|
||||
idx, len(sites), site_url, tool_name
|
||||
)
|
||||
|
||||
# 使用统一的命令构建器
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='directory_scan',
|
||||
command_params={
|
||||
'url': site_url
|
||||
},
|
||||
tool_config=tool_config
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"✗ [%d/%d] 构建 %s 命令失败: %s - 站点: %s",
|
||||
idx, len(sites), tool_name, e, site_url
|
||||
)
|
||||
failed_sites.append(site_url)
|
||||
continue
|
||||
|
||||
# 单个站点超时:从配置中获取(支持 'auto' 动态计算)
|
||||
# ffuf 逐个站点扫描,timeout 就是单个站点的超时时间
|
||||
site_timeout = tool_config.get('timeout', 300)
|
||||
if site_timeout == 'auto':
|
||||
# 动态计算超时时间(基于字典行数)
|
||||
site_timeout = calculate_directory_scan_timeout(tool_config)
|
||||
logger.info(f"✓ 工具 {tool_name} 动态计算 timeout: {site_timeout}秒")
|
||||
|
||||
# 生成日志文件路径
|
||||
from datetime import datetime
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
log_file = directory_scan_dir / f"{tool_name}_{timestamp}_{idx}.log"
|
||||
|
||||
try:
|
||||
# 直接调用 task(串行执行)
|
||||
result = run_and_stream_save_directories_task(
|
||||
cmd=command,
|
||||
tool_name=tool_name, # 新增:工具名称
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
site_url=site_url,
|
||||
cwd=str(directory_scan_dir),
|
||||
shell=True,
|
||||
batch_size=1000,
|
||||
timeout=site_timeout,
|
||||
log_file=str(log_file) # 新增:日志文件路径
|
||||
)
|
||||
|
||||
total_directories += result.get('created_directories', 0)
|
||||
processed_sites_set.add(site_url) # 使用 set 记录成功的站点
|
||||
|
||||
logger.info(
|
||||
"✓ [%d/%d] 站点扫描完成: %s - 发现 %d 个目录",
|
||||
idx, len(sites), site_url,
|
||||
result.get('created_directories', 0)
|
||||
)
|
||||
|
||||
except subprocess.TimeoutExpired as exc:
|
||||
# 超时异常单独处理
|
||||
failed_sites.append(site_url)
|
||||
logger.warning(
|
||||
"⚠️ [%d/%d] 站点扫描超时: %s - 超时配置: %d秒\n"
|
||||
"注意:超时前已解析的目录数据已保存到数据库,但扫描未完全完成。",
|
||||
idx, len(sites), site_url, site_timeout
|
||||
)
|
||||
except Exception as exc:
|
||||
# 其他异常
|
||||
failed_sites.append(site_url)
|
||||
logger.error(
|
||||
"✗ [%d/%d] 站点扫描失败: %s - 错误: %s",
|
||||
idx, len(sites), site_url, exc
|
||||
)
|
||||
|
||||
# 每 10 个站点输出进度
|
||||
if idx % 10 == 0:
|
||||
logger.info(
|
||||
"进度: %d/%d (%.1f%%) - 已发现 %d 个目录",
|
||||
idx, len(sites), idx/len(sites)*100, total_directories
|
||||
)
|
||||
|
||||
# 计算成功和失败的站点数
|
||||
processed_count = len(processed_sites_set)
|
||||
|
||||
if failed_sites:
|
||||
logger.warning(
|
||||
"部分站点扫描失败: %d/%d",
|
||||
len(failed_sites), len(sites)
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"✓ 串行目录扫描执行完成 - 成功: %d/%d, 失败: %d, 总目录数: %d",
|
||||
processed_count, len(sites), len(failed_sites), total_directories
|
||||
)
|
||||
|
||||
return total_directories, processed_count, failed_sites
|
||||
|
||||
|
||||
def _generate_log_filename(tool_name: str, site_url: str, directory_scan_dir: Path) -> Path:
|
||||
"""
|
||||
生成唯一的日志文件名
|
||||
|
||||
使用 URL 的 hash 确保并发时不会冲突
|
||||
|
||||
Args:
|
||||
tool_name: 工具名称
|
||||
site_url: 站点 URL
|
||||
directory_scan_dir: 目录扫描目录
|
||||
|
||||
Returns:
|
||||
Path: 日志文件路径
|
||||
"""
|
||||
url_hash = hashlib.md5(site_url.encode()).hexdigest()[:8]
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f')
|
||||
return directory_scan_dir / f"{tool_name}_{url_hash}_{timestamp}.log"
|
||||
|
||||
|
||||
def _run_scans_concurrently(
|
||||
enabled_tools: dict,
|
||||
sites_file: str,
|
||||
directory_scan_dir: Path,
|
||||
scan_id: int,
|
||||
target_id: int,
|
||||
site_count: int,
|
||||
target_name: str
|
||||
) -> Tuple[int, int, List[str]]:
|
||||
"""
|
||||
并发执行目录扫描任务(使用 ThreadPoolTaskRunner)
|
||||
|
||||
Args:
|
||||
enabled_tools: 启用的工具配置字典
|
||||
sites_file: 站点文件路径
|
||||
directory_scan_dir: 目录扫描目录
|
||||
scan_id: 扫描任务 ID
|
||||
target_id: 目标 ID
|
||||
site_count: 站点数量
|
||||
target_name: 目标名称(用于错误日志)
|
||||
|
||||
Returns:
|
||||
tuple: (total_directories, processed_sites, failed_sites)
|
||||
"""
|
||||
# 读取站点列表
|
||||
sites: List[str] = []
|
||||
with open(sites_file, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
site_url = line.strip()
|
||||
if site_url:
|
||||
sites.append(site_url)
|
||||
|
||||
if not sites:
|
||||
logger.warning("站点列表为空")
|
||||
return 0, 0, []
|
||||
|
||||
logger.info(
|
||||
"准备并发扫描 %d 个站点,使用工具: %s",
|
||||
len(sites), ', '.join(enabled_tools.keys())
|
||||
)
|
||||
|
||||
total_directories = 0
|
||||
processed_sites_count = 0
|
||||
failed_sites: List[str] = []
|
||||
|
||||
# 遍历每个工具
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# 每个工具独立获取 max_workers 配置
|
||||
max_workers = _get_max_workers(tool_config)
|
||||
|
||||
logger.info("="*60)
|
||||
logger.info("使用工具: %s (并发模式, max_workers=%d)", tool_name, max_workers)
|
||||
logger.info("="*60)
|
||||
user_log(scan_id, "directory_scan", f"Running {tool_name}")
|
||||
|
||||
# 如果配置了 wordlist_name,则先确保本地存在对应的字典文件(含 hash 校验)
|
||||
wordlist_name = tool_config.get('wordlist_name')
|
||||
if wordlist_name:
|
||||
try:
|
||||
local_wordlist_path = ensure_wordlist_local(wordlist_name)
|
||||
tool_config['wordlist'] = local_wordlist_path
|
||||
except Exception as exc:
|
||||
logger.error("为工具 %s 准备字典失败: %s", tool_name, exc)
|
||||
# 当前工具无法执行,将所有站点视为失败,继续下一个工具
|
||||
failed_sites.extend(sites)
|
||||
continue
|
||||
|
||||
# 计算超时时间(所有站点共用)
|
||||
site_timeout = tool_config.get('timeout', 300)
|
||||
if site_timeout == 'auto':
|
||||
site_timeout = calculate_directory_scan_timeout(tool_config)
|
||||
logger.info(f"✓ 工具 {tool_name} 动态计算 timeout: {site_timeout}秒")
|
||||
|
||||
# 准备所有站点的扫描参数
|
||||
scan_params_list = []
|
||||
for idx, site_url in enumerate(sites, 1):
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='directory_scan',
|
||||
command_params={'url': site_url},
|
||||
tool_config=tool_config
|
||||
)
|
||||
log_file = _generate_log_filename(tool_name, site_url, directory_scan_dir)
|
||||
scan_params_list.append({
|
||||
'idx': idx,
|
||||
'site_url': site_url,
|
||||
'command': command,
|
||||
'log_file': str(log_file),
|
||||
'timeout': site_timeout
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"✗ [%d/%d] 构建 %s 命令失败: %s - 站点: %s",
|
||||
idx, len(sites), tool_name, e, site_url
|
||||
)
|
||||
failed_sites.append(site_url)
|
||||
|
||||
if not scan_params_list:
|
||||
logger.warning("没有有效的扫描任务")
|
||||
continue
|
||||
|
||||
# ============================================================
|
||||
# 分批执行策略:控制实际并发的 ffuf 进程数
|
||||
# ============================================================
|
||||
total_tasks = len(scan_params_list)
|
||||
logger.info("开始分批执行 %d 个扫描任务(每批 %d 个)...", total_tasks, max_workers)
|
||||
|
||||
# 进度里程碑跟踪
|
||||
last_progress_percent = 0
|
||||
tool_directories = 0
|
||||
tool_processed = 0
|
||||
|
||||
batch_num = 0
|
||||
for batch_start in range(0, total_tasks, max_workers):
|
||||
batch_end = min(batch_start + max_workers, total_tasks)
|
||||
batch_params = scan_params_list[batch_start:batch_end]
|
||||
batch_num += 1
|
||||
|
||||
logger.info("执行第 %d 批任务(%d-%d/%d)...", batch_num, batch_start + 1, batch_end, total_tasks)
|
||||
|
||||
# 提交当前批次的任务(非阻塞,立即返回 future)
|
||||
futures = []
|
||||
for params in batch_params:
|
||||
future = run_and_stream_save_directories_task.submit(
|
||||
cmd=params['command'],
|
||||
tool_name=tool_name,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
site_url=params['site_url'],
|
||||
cwd=str(directory_scan_dir),
|
||||
shell=True,
|
||||
batch_size=1000,
|
||||
timeout=params['timeout'],
|
||||
log_file=params['log_file']
|
||||
)
|
||||
futures.append((params['idx'], params['site_url'], future))
|
||||
|
||||
# 等待当前批次所有任务完成(阻塞,确保本批完成后再启动下一批)
|
||||
for idx, site_url, future in futures:
|
||||
try:
|
||||
result = future.result() # 阻塞等待单个任务完成
|
||||
directories_found = result.get('created_directories', 0)
|
||||
total_directories += directories_found
|
||||
tool_directories += directories_found
|
||||
processed_sites_count += 1
|
||||
tool_processed += 1
|
||||
|
||||
logger.info(
|
||||
"✓ [%d/%d] 站点扫描完成: %s - 发现 %d 个目录",
|
||||
idx, len(sites), site_url, directories_found
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
failed_sites.append(site_url)
|
||||
if 'timeout' in str(exc).lower() or isinstance(exc, subprocess.TimeoutExpired):
|
||||
logger.warning(
|
||||
"⚠️ [%d/%d] 站点扫描超时: %s - 错误: %s",
|
||||
idx, len(sites), site_url, exc
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
"✗ [%d/%d] 站点扫描失败: %s - 错误: %s",
|
||||
idx, len(sites), site_url, exc
|
||||
)
|
||||
|
||||
# 进度里程碑:每 20% 输出一次
|
||||
current_progress = int((batch_end / total_tasks) * 100)
|
||||
if current_progress >= last_progress_percent + 20:
|
||||
user_log(scan_id, "directory_scan", f"Progress: {batch_end}/{total_tasks} sites scanned")
|
||||
last_progress_percent = (current_progress // 20) * 20
|
||||
|
||||
# 工具完成日志(开发者日志 + 用户日志)
|
||||
logger.info(
|
||||
"✓ 工具 %s 执行完成 - 已处理站点: %d/%d, 发现目录: %d",
|
||||
tool_name, tool_processed, total_tasks, tool_directories
|
||||
)
|
||||
user_log(scan_id, "directory_scan", f"{tool_name} completed: found {tool_directories} directories")
|
||||
|
||||
# 输出汇总信息
|
||||
if failed_sites:
|
||||
logger.warning(
|
||||
"部分站点扫描失败: %d/%d",
|
||||
len(failed_sites), len(sites)
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"✓ 并发目录扫描执行完成 - 成功: %d/%d, 失败: %d, 总目录数: %d",
|
||||
processed_sites_count, len(sites), len(failed_sites), total_directories
|
||||
)
|
||||
|
||||
return total_directories, processed_sites_count, failed_sites
|
||||
|
||||
|
||||
@flow(
|
||||
name="directory_scan",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def directory_scan_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: dict
|
||||
) -> dict:
|
||||
"""
|
||||
目录扫描 Flow
|
||||
|
||||
主要功能:
|
||||
1. 从 target 获取所有站点的 URL
|
||||
2. 对每个站点 URL 执行目录扫描(支持 ffuf 等工具)
|
||||
3. 流式保存扫描结果到数据库 Directory 表
|
||||
|
||||
工作流程:
|
||||
Step 0: 创建工作目录
|
||||
Step 1: 导出站点 URL 列表到文件(供扫描工具使用)
|
||||
Step 2: 验证工具配置
|
||||
Step 3: 并发执行扫描工具并实时保存结果(使用 ThreadPoolTaskRunner)
|
||||
|
||||
ffuf 输出字段:
|
||||
- url: 发现的目录/文件 URL
|
||||
- length: 响应内容长度
|
||||
- status: HTTP 状态码
|
||||
- words: 响应内容单词数
|
||||
- lines: 响应内容行数
|
||||
- content_type: 内容类型
|
||||
- duration: 请求耗时
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
enabled_tools: 启用的工具配置字典
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'scan_id': int,
|
||||
'target': str,
|
||||
'scan_workspace_dir': str,
|
||||
'sites_file': str,
|
||||
'site_count': int,
|
||||
'total_directories': int, # 发现的总目录数
|
||||
'processed_sites': int, # 成功处理的站点数
|
||||
'failed_sites_count': int, # 失败的站点数
|
||||
'executed_tasks': list
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: 参数错误
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始目录扫描\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Target: {target_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
user_log(scan_id, "directory_scan", "Starting directory scan")
|
||||
|
||||
# 参数验证
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
if not enabled_tools:
|
||||
raise ValueError("enabled_tools 不能为空")
|
||||
|
||||
# Step 0: 创建工作目录
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
directory_scan_dir = setup_scan_directory(scan_workspace_dir, 'directory_scan')
|
||||
|
||||
# Step 1: 导出站点 URL(支持懒加载)
|
||||
sites_file, site_count = _export_site_urls(target_id, target_name, directory_scan_dir)
|
||||
|
||||
if site_count == 0:
|
||||
logger.warning("跳过目录扫描:没有站点可扫描 - Scan ID: %s", scan_id)
|
||||
user_log(scan_id, "directory_scan", "Skipped: no sites to scan", "warning")
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'sites_file': sites_file,
|
||||
'site_count': 0,
|
||||
'total_directories': 0,
|
||||
'processed_sites': 0,
|
||||
'failed_sites_count': 0,
|
||||
'executed_tasks': ['export_sites']
|
||||
}
|
||||
|
||||
# Step 2: 工具配置信息
|
||||
logger.info("Step 2: 工具配置信息")
|
||||
tool_info = []
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
mw = _get_max_workers(tool_config)
|
||||
tool_info.append(f"{tool_name}(max_workers={mw})")
|
||||
logger.info("✓ 启用工具: %s", ', '.join(tool_info))
|
||||
|
||||
# Step 3: 并发执行扫描工具并实时保存结果
|
||||
logger.info("Step 3: 并发执行扫描工具并实时保存结果")
|
||||
total_directories, processed_sites, failed_sites = _run_scans_concurrently(
|
||||
enabled_tools=enabled_tools,
|
||||
sites_file=sites_file,
|
||||
directory_scan_dir=directory_scan_dir,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
site_count=site_count,
|
||||
target_name=target_name
|
||||
)
|
||||
|
||||
# 检查是否所有站点都失败
|
||||
if processed_sites == 0 and site_count > 0:
|
||||
logger.warning("所有站点扫描均失败 - 总站点数: %d, 失败数: %d", site_count, len(failed_sites))
|
||||
# 不抛出异常,让扫描继续
|
||||
|
||||
# 记录 Flow 完成
|
||||
logger.info("✓ 目录扫描完成 - 发现目录: %d", total_directories)
|
||||
user_log(scan_id, "directory_scan", f"directory_scan completed: found {total_directories} directories")
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'sites_file': sites_file,
|
||||
'site_count': site_count,
|
||||
'total_directories': total_directories,
|
||||
'processed_sites': processed_sites,
|
||||
'failed_sites_count': len(failed_sites),
|
||||
'executed_tasks': ['export_sites', 'run_and_stream_save_directories']
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("目录扫描失败: %s", e)
|
||||
raise
|
||||
@@ -1,390 +0,0 @@
|
||||
"""
|
||||
指纹识别 Flow
|
||||
|
||||
负责编排指纹识别的完整流程
|
||||
|
||||
架构:
|
||||
- Flow 负责编排多个原子 Task
|
||||
- 在 site_scan 后串行执行
|
||||
- 使用 xingfinger 工具识别技术栈
|
||||
- 流式处理输出,批量更新数据库
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
from prefect import flow
|
||||
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.tasks.fingerprint_detect import (
|
||||
export_urls_for_fingerprint_task,
|
||||
run_xingfinger_and_stream_update_tech_task,
|
||||
)
|
||||
from apps.scan.utils import build_scan_command, user_log
|
||||
from apps.scan.utils.fingerprint_helpers import get_fingerprint_paths
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def calculate_fingerprint_detect_timeout(
|
||||
url_count: int,
|
||||
base_per_url: float = 10.0,
|
||||
min_timeout: int = 300
|
||||
) -> int:
|
||||
"""
|
||||
根据 URL 数量计算超时时间
|
||||
|
||||
公式:超时时间 = URL 数量 × 每 URL 基础时间
|
||||
最小值:300秒
|
||||
无上限
|
||||
|
||||
Args:
|
||||
url_count: URL 数量
|
||||
base_per_url: 每 URL 基础时间(秒),默认 10秒
|
||||
min_timeout: 最小超时时间(秒),默认 300秒
|
||||
|
||||
Returns:
|
||||
int: 计算出的超时时间(秒)
|
||||
|
||||
"""
|
||||
timeout = int(url_count * base_per_url)
|
||||
return max(min_timeout, timeout)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _export_urls(
|
||||
target_id: int,
|
||||
fingerprint_dir: Path,
|
||||
source: str = 'website'
|
||||
) -> tuple[str, int]:
|
||||
"""
|
||||
导出 URL 到文件
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
fingerprint_dir: 指纹识别目录
|
||||
source: 数据源类型
|
||||
|
||||
Returns:
|
||||
tuple: (urls_file, total_count)
|
||||
"""
|
||||
logger.info("Step 1: 导出 URL 列表 (source=%s)", source)
|
||||
|
||||
urls_file = str(fingerprint_dir / 'urls.txt')
|
||||
export_result = export_urls_for_fingerprint_task(
|
||||
target_id=target_id,
|
||||
output_file=urls_file,
|
||||
source=source,
|
||||
batch_size=1000
|
||||
)
|
||||
|
||||
total_count = export_result['total_count']
|
||||
|
||||
logger.info(
|
||||
"✓ URL 导出完成 - 文件: %s, 数量: %d",
|
||||
export_result['output_file'],
|
||||
total_count
|
||||
)
|
||||
|
||||
return export_result['output_file'], total_count
|
||||
|
||||
|
||||
def _run_fingerprint_detect(
|
||||
enabled_tools: dict,
|
||||
urls_file: str,
|
||||
url_count: int,
|
||||
fingerprint_dir: Path,
|
||||
scan_id: int,
|
||||
target_id: int,
|
||||
source: str
|
||||
) -> tuple[dict, list]:
|
||||
"""
|
||||
执行指纹识别任务
|
||||
|
||||
Args:
|
||||
enabled_tools: 已启用的工具配置字典
|
||||
urls_file: URL 文件路径
|
||||
url_count: URL 总数
|
||||
fingerprint_dir: 指纹识别目录
|
||||
scan_id: 扫描任务 ID
|
||||
target_id: 目标 ID
|
||||
source: 数据源类型
|
||||
|
||||
Returns:
|
||||
tuple: (tool_stats, failed_tools)
|
||||
"""
|
||||
tool_stats = {}
|
||||
failed_tools = []
|
||||
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# 1. 获取指纹库路径
|
||||
lib_names = tool_config.get('fingerprint_libs', ['ehole'])
|
||||
fingerprint_paths = get_fingerprint_paths(lib_names)
|
||||
|
||||
if not fingerprint_paths:
|
||||
reason = f"没有可用的指纹库: {lib_names}"
|
||||
logger.warning(reason)
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
continue
|
||||
|
||||
# 2. 将指纹库路径合并到 tool_config(用于命令构建)
|
||||
tool_config_with_paths = {**tool_config, **fingerprint_paths}
|
||||
|
||||
# 3. 构建命令
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='fingerprint_detect',
|
||||
command_params={
|
||||
'urls_file': urls_file
|
||||
},
|
||||
tool_config=tool_config_with_paths
|
||||
)
|
||||
except Exception as e:
|
||||
reason = f"命令构建失败: {str(e)}"
|
||||
logger.error("构建 %s 命令失败: %s", tool_name, e)
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
continue
|
||||
|
||||
# 4. 计算超时时间
|
||||
timeout = calculate_fingerprint_detect_timeout(url_count)
|
||||
|
||||
# 5. 生成日志文件路径
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
log_file = fingerprint_dir / f"{tool_name}_{timestamp}.log"
|
||||
|
||||
logger.info(
|
||||
"开始执行 %s 指纹识别 - URL数: %d, 超时: %ds, 指纹库: %s",
|
||||
tool_name, url_count, timeout, list(fingerprint_paths.keys())
|
||||
)
|
||||
user_log(scan_id, "fingerprint_detect", f"Running {tool_name}: {command}")
|
||||
|
||||
# 6. 执行扫描任务
|
||||
try:
|
||||
result = run_xingfinger_and_stream_update_tech_task(
|
||||
cmd=command,
|
||||
tool_name=tool_name,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
source=source,
|
||||
cwd=str(fingerprint_dir),
|
||||
timeout=timeout,
|
||||
log_file=str(log_file),
|
||||
batch_size=100
|
||||
)
|
||||
|
||||
tool_stats[tool_name] = {
|
||||
'command': command,
|
||||
'result': result,
|
||||
'timeout': timeout,
|
||||
'fingerprint_libs': list(fingerprint_paths.keys())
|
||||
}
|
||||
|
||||
tool_updated = result.get('updated_count', 0)
|
||||
logger.info(
|
||||
"✓ 工具 %s 执行完成 - 处理记录: %d, 更新: %d, 未找到: %d",
|
||||
tool_name,
|
||||
result.get('processed_records', 0),
|
||||
tool_updated,
|
||||
result.get('not_found_count', 0)
|
||||
)
|
||||
user_log(scan_id, "fingerprint_detect", f"{tool_name} completed: identified {tool_updated} fingerprints")
|
||||
|
||||
except Exception as exc:
|
||||
reason = str(exc)
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
logger.error("工具 %s 执行失败: %s", tool_name, exc, exc_info=True)
|
||||
user_log(scan_id, "fingerprint_detect", f"{tool_name} failed: {reason}", "error")
|
||||
|
||||
if failed_tools:
|
||||
logger.warning(
|
||||
"以下指纹识别工具执行失败: %s",
|
||||
', '.join([f['tool'] for f in failed_tools])
|
||||
)
|
||||
|
||||
return tool_stats, failed_tools
|
||||
|
||||
|
||||
@flow(
|
||||
name="fingerprint_detect",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def fingerprint_detect_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: dict
|
||||
) -> dict:
|
||||
"""
|
||||
指纹识别 Flow
|
||||
|
||||
主要功能:
|
||||
1. 从数据库导出目标下所有 WebSite URL 到文件
|
||||
2. 使用 xingfinger 进行技术栈识别
|
||||
3. 解析结果并更新 WebSite.tech 字段(合并去重)
|
||||
|
||||
工作流程:
|
||||
Step 0: 创建工作目录
|
||||
Step 1: 导出 URL 列表
|
||||
Step 2: 解析配置,获取启用的工具
|
||||
Step 3: 执行 xingfinger 并解析结果
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
enabled_tools: 启用的工具配置(xingfinger)
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'scan_id': int,
|
||||
'target': str,
|
||||
'scan_workspace_dir': str,
|
||||
'urls_file': str,
|
||||
'url_count': int,
|
||||
'processed_records': int,
|
||||
'updated_count': int,
|
||||
'created_count': int,
|
||||
'snapshot_count': int,
|
||||
'executed_tasks': list,
|
||||
'tool_stats': dict
|
||||
}
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始指纹识别\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Target: {target_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
user_log(scan_id, "fingerprint_detect", "Starting fingerprint detection")
|
||||
|
||||
# 参数验证
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
|
||||
# 数据源类型(当前只支持 website)
|
||||
source = 'website'
|
||||
|
||||
# Step 0: 创建工作目录
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
fingerprint_dir = setup_scan_directory(scan_workspace_dir, 'fingerprint_detect')
|
||||
|
||||
# Step 1: 导出 URL(支持懒加载)
|
||||
urls_file, url_count = _export_urls(target_id, fingerprint_dir, source)
|
||||
|
||||
if url_count == 0:
|
||||
logger.warning("跳过指纹识别:没有 URL 可扫描 - Scan ID: %s", scan_id)
|
||||
user_log(scan_id, "fingerprint_detect", "Skipped: no URLs to scan", "warning")
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'urls_file': urls_file,
|
||||
'url_count': 0,
|
||||
'processed_records': 0,
|
||||
'updated_count': 0,
|
||||
'created_count': 0,
|
||||
'snapshot_count': 0,
|
||||
'executed_tasks': ['export_urls_for_fingerprint'],
|
||||
'tool_stats': {
|
||||
'total': 0,
|
||||
'successful': 0,
|
||||
'failed': 0,
|
||||
'successful_tools': [],
|
||||
'failed_tools': [],
|
||||
'details': {}
|
||||
}
|
||||
}
|
||||
|
||||
# Step 2: 工具配置信息
|
||||
logger.info("Step 2: 工具配置信息")
|
||||
logger.info("✓ 启用工具: %s", ', '.join(enabled_tools.keys()))
|
||||
|
||||
# Step 3: 执行指纹识别
|
||||
logger.info("Step 3: 执行指纹识别")
|
||||
tool_stats, failed_tools = _run_fingerprint_detect(
|
||||
enabled_tools=enabled_tools,
|
||||
urls_file=urls_file,
|
||||
url_count=url_count,
|
||||
fingerprint_dir=fingerprint_dir,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
source=source
|
||||
)
|
||||
|
||||
# 动态生成已执行的任务列表
|
||||
executed_tasks = ['export_urls_for_fingerprint']
|
||||
executed_tasks.extend([f'run_xingfinger ({tool})' for tool in tool_stats.keys()])
|
||||
|
||||
# 汇总所有工具的结果
|
||||
total_processed = sum(stats['result'].get('processed_records', 0) for stats in tool_stats.values())
|
||||
total_updated = sum(stats['result'].get('updated_count', 0) for stats in tool_stats.values())
|
||||
total_created = sum(stats['result'].get('created_count', 0) for stats in tool_stats.values())
|
||||
total_snapshots = sum(stats['result'].get('snapshot_count', 0) for stats in tool_stats.values())
|
||||
|
||||
# 记录 Flow 完成
|
||||
logger.info("✓ 指纹识别完成 - 识别指纹: %d", total_updated)
|
||||
user_log(scan_id, "fingerprint_detect", f"fingerprint_detect completed: identified {total_updated} fingerprints")
|
||||
|
||||
successful_tools = [name for name in enabled_tools.keys()
|
||||
if name not in [f['tool'] for f in failed_tools]]
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'urls_file': urls_file,
|
||||
'url_count': url_count,
|
||||
'processed_records': total_processed,
|
||||
'updated_count': total_updated,
|
||||
'created_count': total_created,
|
||||
'snapshot_count': total_snapshots,
|
||||
'executed_tasks': executed_tasks,
|
||||
'tool_stats': {
|
||||
'total': len(enabled_tools),
|
||||
'successful': len(successful_tools),
|
||||
'failed': len(failed_tools),
|
||||
'successful_tools': successful_tools,
|
||||
'failed_tools': failed_tools,
|
||||
'details': tool_stats
|
||||
}
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("配置错误: %s", e)
|
||||
raise
|
||||
except RuntimeError as e:
|
||||
logger.error("运行时错误: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("指纹识别失败: %s", e)
|
||||
raise
|
||||
@@ -1,282 +0,0 @@
|
||||
"""
|
||||
扫描初始化 Flow
|
||||
|
||||
负责编排扫描任务的初始化流程
|
||||
|
||||
职责:
|
||||
- 使用 FlowOrchestrator 解析 YAML 配置
|
||||
- 在 Prefect Flow 中执行子 Flow(Subflow)
|
||||
- 按照 YAML 顺序编排工作流
|
||||
- 不包含具体业务逻辑(由 Tasks 和 FlowOrchestrator 实现)
|
||||
|
||||
架构:
|
||||
- Flow: Prefect 编排层(本文件)
|
||||
- FlowOrchestrator: 配置解析和执行计划(apps/scan/services/)
|
||||
- Tasks: 执行层(apps/scan/tasks/)
|
||||
- Handlers: 状态管理(apps/scan/handlers/)
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
# 注意:动态扫描容器应使用 run_initiate_scan.py 启动,以便在导入前设置环境变量
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
from prefect import flow, task
|
||||
from pathlib import Path
|
||||
import logging
|
||||
|
||||
from apps.scan.handlers import (
|
||||
on_initiate_scan_flow_running,
|
||||
on_initiate_scan_flow_completed,
|
||||
on_initiate_scan_flow_failed,
|
||||
)
|
||||
from prefect.futures import wait
|
||||
from apps.scan.utils import setup_scan_workspace
|
||||
from apps.scan.orchestrators import FlowOrchestrator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="run_subflow")
|
||||
def _run_subflow_task(scan_type: str, flow_func, flow_kwargs: dict):
|
||||
"""包装子 Flow 的 Task,用于在并行阶段并发执行子 Flow。"""
|
||||
logger.info("开始执行子 Flow: %s", scan_type)
|
||||
return flow_func(**flow_kwargs)
|
||||
|
||||
|
||||
@flow(
|
||||
name='initiate_scan',
|
||||
description='扫描任务初始化流程',
|
||||
log_prints=True,
|
||||
on_running=[on_initiate_scan_flow_running],
|
||||
on_completion=[on_initiate_scan_flow_completed],
|
||||
on_failure=[on_initiate_scan_flow_failed],
|
||||
)
|
||||
def initiate_scan_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
engine_name: str,
|
||||
scheduled_scan_name: str | None = None,
|
||||
) -> dict:
|
||||
"""
|
||||
初始化扫描任务(动态工作流编排)
|
||||
|
||||
根据 YAML 配置动态编排工作流:
|
||||
- 从数据库获取 engine_config (YAML)
|
||||
- 检测启用的扫描类型
|
||||
- 按照定义的阶段执行:
|
||||
Stage 1: Discovery (顺序执行)
|
||||
- subdomain_discovery
|
||||
- port_scan
|
||||
- site_scan
|
||||
Stage 2: Analysis (并行执行)
|
||||
- url_fetch
|
||||
- directory_scan
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: Scan 工作空间目录路径
|
||||
engine_name: 引擎名称(用于显示)
|
||||
scheduled_scan_name: 定时扫描任务名称(可选,用于通知显示)
|
||||
|
||||
Returns:
|
||||
dict: 执行结果摘要
|
||||
|
||||
Raises:
|
||||
ValueError: 参数验证失败或配置无效
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
try:
|
||||
# ==================== 参数验证 ====================
|
||||
if not scan_id:
|
||||
raise ValueError("scan_id is required")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir is required")
|
||||
if not engine_name:
|
||||
raise ValueError("engine_name is required")
|
||||
|
||||
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始初始化扫描任务\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Target: {target_name}\n" +
|
||||
f" Engine: {engine_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
# ==================== Task 1: 创建 Scan 工作空间 ====================
|
||||
scan_workspace_path = setup_scan_workspace(scan_workspace_dir)
|
||||
|
||||
# ==================== Task 2: 获取引擎配置 ====================
|
||||
from apps.scan.models import Scan
|
||||
scan = Scan.objects.get(id=scan_id)
|
||||
engine_config = scan.yaml_configuration
|
||||
|
||||
# 使用 engine_names 进行显示
|
||||
display_engine_name = ', '.join(scan.engine_names) if scan.engine_names else engine_name
|
||||
|
||||
# ==================== Task 3: 解析配置,生成执行计划 ====================
|
||||
orchestrator = FlowOrchestrator(engine_config)
|
||||
|
||||
# FlowOrchestrator 已经解析了所有工具配置
|
||||
enabled_tools_by_type = orchestrator.enabled_tools_by_type
|
||||
|
||||
logger.info(
|
||||
f"执行计划生成成功:\n"
|
||||
f" 扫描类型: {' → '.join(orchestrator.scan_types)}\n"
|
||||
f" 总共 {len(orchestrator.scan_types)} 个 Flow"
|
||||
)
|
||||
|
||||
# ==================== 初始化阶段进度 ====================
|
||||
# 在解析完配置后立即初始化,此时已有完整的 scan_types 列表
|
||||
from apps.scan.services import ScanService
|
||||
scan_service = ScanService()
|
||||
scan_service.init_stage_progress(scan_id, orchestrator.scan_types)
|
||||
logger.info(f"✓ 初始化阶段进度 - Stages: {orchestrator.scan_types}")
|
||||
|
||||
# ==================== 更新 Target 最后扫描时间 ====================
|
||||
# 在开始扫描时更新,表示"最后一次扫描开始时间"
|
||||
from apps.targets.services import TargetService
|
||||
target_service = TargetService()
|
||||
target_service.update_last_scanned_at(target_id)
|
||||
logger.info(f"✓ 更新 Target 最后扫描时间 - Target ID: {target_id}")
|
||||
|
||||
# ==================== Task 3: 执行 Flow(动态阶段执行)====================
|
||||
# 注意:各阶段状态更新由 scan_flow_handlers.py 自动处理(running/completed/failed)
|
||||
executed_flows = []
|
||||
results = {}
|
||||
|
||||
# 通用执行参数
|
||||
flow_kwargs = {
|
||||
'scan_id': scan_id,
|
||||
'target_name': target_name,
|
||||
'target_id': target_id,
|
||||
'scan_workspace_dir': str(scan_workspace_path)
|
||||
}
|
||||
|
||||
def record_flow_result(scan_type, result=None, error=None):
|
||||
"""
|
||||
统一的结果记录函数
|
||||
|
||||
Args:
|
||||
scan_type: 扫描类型名称
|
||||
result: 执行结果(成功时)
|
||||
error: 异常对象(失败时)
|
||||
"""
|
||||
if error:
|
||||
# 失败处理:记录错误但不抛出异常,让扫描继续执行后续阶段
|
||||
error_msg = f"{scan_type} 执行失败: {str(error)}"
|
||||
logger.warning(error_msg)
|
||||
executed_flows.append(f"{scan_type} (失败)")
|
||||
results[scan_type] = {'success': False, 'error': str(error)}
|
||||
# 不再抛出异常,让扫描继续
|
||||
else:
|
||||
# 成功处理
|
||||
executed_flows.append(scan_type)
|
||||
results[scan_type] = result
|
||||
logger.info(f"✓ {scan_type} 执行成功")
|
||||
|
||||
def get_valid_flows(flow_names):
|
||||
"""
|
||||
获取有效的 Flow 函数列表,并为每个 Flow 准备专属参数
|
||||
|
||||
Args:
|
||||
flow_names: 扫描类型名称列表
|
||||
|
||||
Returns:
|
||||
list: [(scan_type, flow_func, flow_specific_kwargs), ...] 有效的函数列表
|
||||
"""
|
||||
valid_flows = []
|
||||
for scan_type in flow_names:
|
||||
flow_func = orchestrator.get_flow_function(scan_type)
|
||||
if flow_func:
|
||||
# 为每个 Flow 准备专属的参数(包含对应的 enabled_tools)
|
||||
flow_specific_kwargs = dict(flow_kwargs)
|
||||
flow_specific_kwargs['enabled_tools'] = enabled_tools_by_type.get(scan_type, {})
|
||||
valid_flows.append((scan_type, flow_func, flow_specific_kwargs))
|
||||
else:
|
||||
logger.warning(f"跳过未实现的 Flow: {scan_type}")
|
||||
return valid_flows
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# 动态阶段执行(基于 FlowOrchestrator 定义)
|
||||
# ---------------------------------------------------------
|
||||
for mode, enabled_flows in orchestrator.get_execution_stages():
|
||||
if mode == 'sequential':
|
||||
# 顺序执行
|
||||
logger.info(f"\n{'='*60}\n顺序执行阶段: {', '.join(enabled_flows)}\n{'='*60}")
|
||||
for scan_type, flow_func, flow_specific_kwargs in get_valid_flows(enabled_flows):
|
||||
logger.info(f"\n{'='*60}\n执行 Flow: {scan_type}\n{'='*60}")
|
||||
try:
|
||||
result = flow_func(**flow_specific_kwargs)
|
||||
record_flow_result(scan_type, result=result)
|
||||
except Exception as e:
|
||||
record_flow_result(scan_type, error=e)
|
||||
|
||||
elif mode == 'parallel':
|
||||
# 并行执行阶段:通过 Task 包装子 Flow,并使用 Prefect TaskRunner 并发运行
|
||||
logger.info(f"\n{'='*60}\n并行执行阶段: {', '.join(enabled_flows)}\n{'='*60}")
|
||||
futures = []
|
||||
|
||||
# 提交所有并行子 Flow 任务
|
||||
for scan_type, flow_func, flow_specific_kwargs in get_valid_flows(enabled_flows):
|
||||
logger.info(f"\n{'='*60}\n提交并行子 Flow 任务: {scan_type}\n{'='*60}")
|
||||
future = _run_subflow_task.submit(
|
||||
scan_type=scan_type,
|
||||
flow_func=flow_func,
|
||||
flow_kwargs=flow_specific_kwargs,
|
||||
)
|
||||
futures.append((scan_type, future))
|
||||
|
||||
# 等待所有并行子 Flow 完成
|
||||
if futures:
|
||||
wait([f for _, f in futures])
|
||||
|
||||
# 检查结果(复用统一的结果处理逻辑)
|
||||
for scan_type, future in futures:
|
||||
try:
|
||||
result = future.result()
|
||||
record_flow_result(scan_type, result=result)
|
||||
except Exception as e:
|
||||
record_flow_result(scan_type, error=e)
|
||||
|
||||
# ==================== 完成 ====================
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"✓ 扫描任务初始化完成\n" +
|
||||
f" 执行的 Flow: {', '.join(executed_flows)}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
# ==================== 返回结果 ====================
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': str(scan_workspace_path),
|
||||
'executed_flows': executed_flows,
|
||||
'results': results
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
# 参数错误
|
||||
logger.error("参数错误: %s", e)
|
||||
raise
|
||||
except RuntimeError as e:
|
||||
# 执行失败
|
||||
logger.error("运行时错误: %s", e)
|
||||
raise
|
||||
except OSError as e:
|
||||
# 文件系统错误(工作空间创建失败)
|
||||
logger.error("文件系统错误: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
# 其他未预期错误
|
||||
logger.exception("初始化扫描任务失败: %s", e)
|
||||
# 注意:失败状态更新由 Prefect State Handlers 自动处理
|
||||
raise
|
||||
@@ -1,490 +0,0 @@
|
||||
|
||||
"""
|
||||
站点扫描 Flow
|
||||
|
||||
负责编排站点扫描的完整流程
|
||||
|
||||
架构:
|
||||
- Flow 负责编排多个原子 Task
|
||||
- 支持串行执行扫描工具(流式处理)
|
||||
- 每个 Task 可独立重试
|
||||
- 配置由 YAML 解析
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
from prefect import flow
|
||||
from apps.scan.tasks.site_scan import export_site_urls_task, run_and_stream_save_websites_task
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.utils import config_parser, build_scan_command, user_log
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def calculate_timeout_by_line_count(
|
||||
tool_config: dict,
|
||||
file_path: str,
|
||||
base_per_time: int = 1,
|
||||
min_timeout: int = 60
|
||||
) -> int:
|
||||
"""
|
||||
根据文件行数计算 timeout
|
||||
|
||||
使用 wc -l 统计文件行数,根据行数和每行基础时间计算 timeout
|
||||
|
||||
Args:
|
||||
tool_config: 工具配置字典(此函数未使用,但保持接口一致性)
|
||||
file_path: 要统计行数的文件路径
|
||||
base_per_time: 每行的基础时间(秒),默认1秒
|
||||
min_timeout: 最小超时时间(秒),默认60秒
|
||||
|
||||
Returns:
|
||||
int: 计算出的超时时间(秒),不低于 min_timeout
|
||||
|
||||
Example:
|
||||
timeout = calculate_timeout_by_line_count(
|
||||
tool_config={},
|
||||
file_path='/path/to/urls.txt',
|
||||
base_per_time=2
|
||||
)
|
||||
"""
|
||||
try:
|
||||
# 使用 wc -l 快速统计行数
|
||||
result = subprocess.run(
|
||||
['wc', '-l', file_path],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
# wc -l 输出格式:行数 + 空格 + 文件名
|
||||
line_count = int(result.stdout.strip().split()[0])
|
||||
|
||||
# 计算 timeout:行数 × 每行基础时间,不低于最小值
|
||||
timeout = max(line_count * base_per_time, min_timeout)
|
||||
|
||||
logger.info(
|
||||
f"timeout 自动计算: 文件={file_path}, "
|
||||
f"行数={line_count}, 每行时间={base_per_time}秒, 最小值={min_timeout}秒, timeout={timeout}秒"
|
||||
)
|
||||
|
||||
return timeout
|
||||
|
||||
except Exception as e:
|
||||
# 如果 wc -l 失败,使用默认值
|
||||
logger.warning(f"wc -l 计算行数失败: {e},使用默认 timeout: {min_timeout}秒")
|
||||
return min_timeout
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _export_site_urls(target_id: int, site_scan_dir: Path, target_name: str = None) -> tuple[str, int, int]:
|
||||
"""
|
||||
导出站点 URL 到文件
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
site_scan_dir: 站点扫描目录
|
||||
target_name: 目标名称(用于懒加载时写入默认值)
|
||||
|
||||
Returns:
|
||||
tuple: (urls_file, total_urls, association_count)
|
||||
|
||||
Raises:
|
||||
ValueError: URL 数量为 0
|
||||
"""
|
||||
logger.info("Step 1: 导出站点URL列表")
|
||||
|
||||
urls_file = str(site_scan_dir / 'site_urls.txt')
|
||||
export_result = export_site_urls_task(
|
||||
target_id=target_id,
|
||||
output_file=urls_file,
|
||||
batch_size=1000 # 每次处理1000个子域名
|
||||
)
|
||||
|
||||
total_urls = export_result['total_urls']
|
||||
association_count = export_result['association_count'] # 主机端口关联数
|
||||
|
||||
logger.info(
|
||||
"✓ 站点URL导出完成 - 文件: %s, URL数量: %d, 关联数: %d",
|
||||
export_result['output_file'],
|
||||
total_urls,
|
||||
association_count
|
||||
)
|
||||
|
||||
if total_urls == 0:
|
||||
logger.warning("目标下没有可用的站点URL,无法执行站点扫描")
|
||||
# 不抛出异常,由上层决定如何处理
|
||||
# raise ValueError("目标下没有可用的站点URL,无法执行站点扫描")
|
||||
|
||||
return export_result['output_file'], total_urls, association_count
|
||||
|
||||
|
||||
def _run_scans_sequentially(
|
||||
enabled_tools: dict,
|
||||
urls_file: str,
|
||||
total_urls: int,
|
||||
site_scan_dir: Path,
|
||||
scan_id: int,
|
||||
target_id: int,
|
||||
target_name: str
|
||||
) -> tuple[dict, int, list, list]:
|
||||
"""
|
||||
串行执行站点扫描任务
|
||||
|
||||
Args:
|
||||
enabled_tools: 已启用的工具配置字典
|
||||
urls_file: URL 文件路径
|
||||
total_urls: URL 总数
|
||||
site_scan_dir: 站点扫描目录
|
||||
scan_id: 扫描任务 ID
|
||||
target_id: 目标 ID
|
||||
target_name: 目标名称(用于错误日志)
|
||||
|
||||
Returns:
|
||||
tuple: (tool_stats, processed_records, successful_tool_names, failed_tools)
|
||||
|
||||
Raises:
|
||||
RuntimeError: 所有工具均失败
|
||||
"""
|
||||
tool_stats = {}
|
||||
processed_records = 0
|
||||
failed_tools = []
|
||||
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# 1. 构建完整命令(变量替换)
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='site_scan',
|
||||
command_params={
|
||||
'url_file': urls_file
|
||||
},
|
||||
tool_config=tool_config
|
||||
)
|
||||
except Exception as e:
|
||||
reason = f"命令构建失败: {str(e)}"
|
||||
logger.error(f"构建 {tool_name} 命令失败: {e}")
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
continue
|
||||
|
||||
# 2. 获取超时时间(支持 'auto' 动态计算)
|
||||
config_timeout = tool_config.get('timeout', 300)
|
||||
if config_timeout == 'auto':
|
||||
# 动态计算超时时间
|
||||
timeout = calculate_timeout_by_line_count(tool_config, urls_file, base_per_time=1)
|
||||
logger.info(f"✓ 工具 {tool_name} 动态计算 timeout: {timeout}秒")
|
||||
else:
|
||||
# 使用配置的超时时间和动态计算的较大值
|
||||
dynamic_timeout = calculate_timeout_by_line_count(tool_config, urls_file, base_per_time=1)
|
||||
timeout = max(dynamic_timeout, config_timeout)
|
||||
|
||||
# 2.1 生成日志文件路径(类似端口扫描)
|
||||
from datetime import datetime
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
log_file = site_scan_dir / f"{tool_name}_{timestamp}.log"
|
||||
|
||||
logger.info(
|
||||
"开始执行 %s 站点扫描 - URL数: %d, 最终超时: %ds",
|
||||
tool_name, total_urls, timeout
|
||||
)
|
||||
user_log(scan_id, "site_scan", f"Running {tool_name}: {command}")
|
||||
|
||||
# 3. 执行扫描任务
|
||||
try:
|
||||
# 流式执行扫描并实时保存结果
|
||||
result = run_and_stream_save_websites_task(
|
||||
cmd=command,
|
||||
tool_name=tool_name,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
cwd=str(site_scan_dir),
|
||||
shell=True,
|
||||
timeout=timeout,
|
||||
log_file=str(log_file)
|
||||
)
|
||||
|
||||
tool_stats[tool_name] = {
|
||||
'command': command,
|
||||
'result': result,
|
||||
'timeout': timeout
|
||||
}
|
||||
tool_records = result.get('processed_records', 0)
|
||||
tool_created = result.get('created_websites', 0)
|
||||
processed_records += tool_records
|
||||
|
||||
logger.info(
|
||||
"✓ 工具 %s 流式处理完成 - 处理记录: %d, 创建站点: %d, 跳过: %d",
|
||||
tool_name,
|
||||
tool_records,
|
||||
tool_created,
|
||||
result.get('skipped_no_subdomain', 0) + result.get('skipped_failed', 0)
|
||||
)
|
||||
user_log(scan_id, "site_scan", f"{tool_name} completed: found {tool_created} websites")
|
||||
|
||||
except subprocess.TimeoutExpired as exc:
|
||||
# 超时异常单独处理
|
||||
reason = f"timeout after {timeout}s"
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
logger.warning(
|
||||
"⚠️ 工具 %s 执行超时 - 超时配置: %d秒\n"
|
||||
"注意:超时前已解析的站点数据已保存到数据库,但扫描未完全完成。",
|
||||
tool_name, timeout
|
||||
)
|
||||
user_log(scan_id, "site_scan", f"{tool_name} failed: {reason}", "error")
|
||||
except Exception as exc:
|
||||
# 其他异常
|
||||
reason = str(exc)
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
logger.error("工具 %s 执行失败: %s", tool_name, exc, exc_info=True)
|
||||
user_log(scan_id, "site_scan", f"{tool_name} failed: {reason}", "error")
|
||||
|
||||
if failed_tools:
|
||||
logger.warning(
|
||||
"以下扫描工具执行失败: %s",
|
||||
', '.join([f['tool'] for f in failed_tools])
|
||||
)
|
||||
|
||||
if not tool_stats:
|
||||
error_details = "; ".join([f"{f['tool']}: {f['reason']}" for f in failed_tools])
|
||||
logger.warning("所有站点扫描工具均失败 - 目标: %s, 失败工具: %s", target_name, error_details)
|
||||
# 返回空结果,不抛出异常,让扫描继续
|
||||
return {}, 0, [], failed_tools
|
||||
|
||||
# 动态计算成功的工具列表
|
||||
successful_tool_names = [name for name in enabled_tools.keys()
|
||||
if name not in [f['tool'] for f in failed_tools]]
|
||||
|
||||
logger.info(
|
||||
"✓ 串行站点扫描执行完成 - 成功: %d/%d (成功: %s, 失败: %s)",
|
||||
len(tool_stats), len(enabled_tools),
|
||||
', '.join(successful_tool_names) if successful_tool_names else '无',
|
||||
', '.join([f['tool'] for f in failed_tools]) if failed_tools else '无'
|
||||
)
|
||||
|
||||
return tool_stats, processed_records, successful_tool_names, failed_tools
|
||||
|
||||
|
||||
def calculate_timeout(url_count: int, base: int = 600, per_url: int = 1) -> int:
|
||||
"""
|
||||
根据 URL 数量动态计算扫描超时时间
|
||||
|
||||
规则:
|
||||
- 基础时间:默认 600 秒(10 分钟)
|
||||
- 每个 URL 额外增加:默认 1 秒
|
||||
|
||||
Args:
|
||||
url_count: URL 数量,必须为正整数
|
||||
base: 基础超时时间(秒),默认 600
|
||||
per_url: 每个 URL 增加的时间(秒),默认 1
|
||||
|
||||
Returns:
|
||||
int: 计算得到的超时时间(秒),不超过 max_timeout
|
||||
|
||||
Raises:
|
||||
ValueError: 当 url_count 为负数或 0 时抛出异常
|
||||
"""
|
||||
if url_count < 0:
|
||||
raise ValueError(f"URL数量不能为负数: {url_count}")
|
||||
if url_count == 0:
|
||||
raise ValueError("URL数量不能为0")
|
||||
|
||||
timeout = base + int(url_count * per_url)
|
||||
|
||||
# 不设置上限,由调用方根据需要控制
|
||||
return timeout
|
||||
|
||||
|
||||
@flow(
|
||||
name="site_scan",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def site_scan_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: dict
|
||||
) -> dict:
|
||||
"""
|
||||
站点扫描 Flow
|
||||
|
||||
主要功能:
|
||||
1. 从target获取所有子域名与其对应的端口号,拼接成URL写入文件
|
||||
2. 用httpx进行批量请求并实时保存到数据库(流式处理)
|
||||
|
||||
工作流程:
|
||||
Step 0: 创建工作目录
|
||||
Step 1: 导出站点 URL 列表
|
||||
Step 2: 解析配置,获取启用的工具
|
||||
Step 3: 串行执行扫描工具并实时保存结果
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
enabled_tools: 启用的工具配置字典
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'scan_id': int,
|
||||
'target': str,
|
||||
'scan_workspace_dir': str,
|
||||
'urls_file': str,
|
||||
'total_urls': int,
|
||||
'association_count': int,
|
||||
'processed_records': int,
|
||||
'created_websites': int,
|
||||
'skipped_no_subdomain': int,
|
||||
'skipped_failed': int,
|
||||
'executed_tasks': list,
|
||||
'tool_stats': {
|
||||
'total': int,
|
||||
'successful': int,
|
||||
'failed': int,
|
||||
'successful_tools': list[str],
|
||||
'failed_tools': list[dict]
|
||||
}
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: 配置错误
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始站点扫描\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Target: {target_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
# 参数验证
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
|
||||
user_log(scan_id, "site_scan", "Starting site scan")
|
||||
|
||||
# Step 0: 创建工作目录
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
site_scan_dir = setup_scan_directory(scan_workspace_dir, 'site_scan')
|
||||
|
||||
# Step 1: 导出站点 URL
|
||||
urls_file, total_urls, association_count = _export_site_urls(
|
||||
target_id, site_scan_dir, target_name
|
||||
)
|
||||
|
||||
if total_urls == 0:
|
||||
logger.warning("跳过站点扫描:没有站点 URL 可扫描 - Scan ID: %s", scan_id)
|
||||
user_log(scan_id, "site_scan", "Skipped: no site URLs to scan", "warning")
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'urls_file': urls_file,
|
||||
'total_urls': 0,
|
||||
'association_count': association_count,
|
||||
'processed_records': 0,
|
||||
'created_websites': 0,
|
||||
'skipped_no_subdomain': 0,
|
||||
'skipped_failed': 0,
|
||||
'executed_tasks': ['export_site_urls'],
|
||||
'tool_stats': {
|
||||
'total': 0,
|
||||
'successful': 0,
|
||||
'failed': 0,
|
||||
'successful_tools': [],
|
||||
'failed_tools': [],
|
||||
'details': {}
|
||||
}
|
||||
}
|
||||
|
||||
# Step 2: 工具配置信息
|
||||
logger.info("Step 2: 工具配置信息")
|
||||
logger.info(
|
||||
"✓ 启用工具: %s",
|
||||
', '.join(enabled_tools.keys())
|
||||
)
|
||||
|
||||
# Step 3: 串行执行扫描工具
|
||||
logger.info("Step 3: 串行执行扫描工具并实时保存结果")
|
||||
tool_stats, processed_records, successful_tool_names, failed_tools = _run_scans_sequentially(
|
||||
enabled_tools=enabled_tools,
|
||||
urls_file=urls_file,
|
||||
total_urls=total_urls,
|
||||
site_scan_dir=site_scan_dir,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
target_name=target_name
|
||||
)
|
||||
|
||||
# 动态生成已执行的任务列表
|
||||
executed_tasks = ['export_site_urls', 'parse_config']
|
||||
executed_tasks.extend([f'run_and_stream_save_websites ({tool})' for tool in tool_stats.keys()])
|
||||
|
||||
# 汇总所有工具的结果
|
||||
total_created = sum(stats['result'].get('created_websites', 0) for stats in tool_stats.values())
|
||||
total_skipped_no_subdomain = sum(stats['result'].get('skipped_no_subdomain', 0) for stats in tool_stats.values())
|
||||
total_skipped_failed = sum(stats['result'].get('skipped_failed', 0) for stats in tool_stats.values())
|
||||
|
||||
# 记录 Flow 完成
|
||||
logger.info("✓ 站点扫描完成 - 创建站点: %d", total_created)
|
||||
user_log(scan_id, "site_scan", f"site_scan completed: found {total_created} websites")
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'urls_file': urls_file,
|
||||
'total_urls': total_urls,
|
||||
'association_count': association_count,
|
||||
'processed_records': processed_records,
|
||||
'created_websites': total_created,
|
||||
'skipped_no_subdomain': total_skipped_no_subdomain,
|
||||
'skipped_failed': total_skipped_failed,
|
||||
'executed_tasks': executed_tasks,
|
||||
'tool_stats': {
|
||||
'total': len(enabled_tools),
|
||||
'successful': len(successful_tool_names),
|
||||
'failed': len(failed_tools),
|
||||
'successful_tools': successful_tool_names,
|
||||
'failed_tools': failed_tools,
|
||||
'details': tool_stats
|
||||
}
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("配置错误: %s", e)
|
||||
raise
|
||||
except RuntimeError as e:
|
||||
logger.error("运行时错误: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("站点扫描失败: %s", e)
|
||||
raise
|
||||
@@ -1,782 +0,0 @@
|
||||
"""
|
||||
子域名发现扫描 Flow
|
||||
|
||||
负责编排子域名发现扫描的完整流程
|
||||
|
||||
架构:
|
||||
- Flow 负责编排多个原子 Task
|
||||
- 支持并行执行扫描工具
|
||||
- 每个 Task 可独立重试
|
||||
- 配置由 YAML 解析
|
||||
|
||||
增强流程(4 阶段):
|
||||
Stage 1: 被动收集(并行) - 必选
|
||||
Stage 2: 字典爆破(可选) - 子域名字典爆破
|
||||
Stage 3: 变异生成 + 验证(可选) - dnsgen + 通用存活验证
|
||||
Stage 4: DNS 存活验证(可选) - 通用存活验证
|
||||
|
||||
各阶段可灵活开关,最终结果根据实际执行的阶段动态决定
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
from prefect import flow
|
||||
from pathlib import Path
|
||||
import logging
|
||||
import os
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.utils import build_scan_command, ensure_wordlist_local, user_log
|
||||
from apps.engine.services.wordlist_service import WordlistService
|
||||
from apps.common.normalizer import normalize_domain
|
||||
from apps.common.validators import validate_domain
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
import subprocess
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _validate_and_normalize_target(target_name: str) -> str:
|
||||
"""
|
||||
验证并规范化目标域名
|
||||
|
||||
Args:
|
||||
target_name: 原始目标域名
|
||||
|
||||
Returns:
|
||||
str: 规范化后的域名
|
||||
|
||||
Raises:
|
||||
ValueError: 域名无效时抛出异常
|
||||
|
||||
Example:
|
||||
>>> _validate_and_normalize_target('EXAMPLE.COM')
|
||||
'example.com'
|
||||
>>> _validate_and_normalize_target('http://example.com')
|
||||
'example.com'
|
||||
"""
|
||||
try:
|
||||
normalized_target = normalize_domain(target_name)
|
||||
validate_domain(normalized_target)
|
||||
logger.debug("域名验证通过: %s -> %s", target_name, normalized_target)
|
||||
return normalized_target
|
||||
except ValueError as e:
|
||||
error_msg = f"无效的目标域名: {target_name} - {e}"
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg) from e
|
||||
|
||||
|
||||
def _run_scans_parallel(
|
||||
enabled_tools: dict,
|
||||
domain_name: str,
|
||||
result_dir: Path,
|
||||
scan_id: int,
|
||||
provider_config_path: str = None
|
||||
) -> tuple[list, list, list]:
|
||||
"""
|
||||
并行运行所有启用的子域名扫描工具
|
||||
|
||||
Args:
|
||||
enabled_tools: 启用的工具配置字典 {'tool_name': {'timeout': 600, ...}}
|
||||
domain_name: 目标域名
|
||||
result_dir: 结果输出目录
|
||||
scan_id: 扫描任务 ID(用于记录日志)
|
||||
provider_config_path: Provider 配置文件路径(可选,用于 subfinder)
|
||||
|
||||
Returns:
|
||||
tuple: (result_files, failed_tools, successful_tool_names)
|
||||
|
||||
Raises:
|
||||
RuntimeError: 所有工具均失败
|
||||
"""
|
||||
# 导入任务函数
|
||||
from apps.scan.tasks.subdomain_discovery import run_subdomain_discovery_task
|
||||
|
||||
# 生成时间戳(所有工具共用)
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
|
||||
failures = [] # 记录命令构建失败的工具
|
||||
futures = {}
|
||||
|
||||
# 1. 构建命令并提交并行任务
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# 1.1 生成唯一的输出文件路径(绝对路径)
|
||||
short_uuid = uuid.uuid4().hex[:4]
|
||||
output_file = str(result_dir / f"{tool_name}_{timestamp}_{short_uuid}.txt")
|
||||
|
||||
# 1.2 构建完整命令(变量替换)
|
||||
try:
|
||||
command_params = {
|
||||
'domain': domain_name, # 对应 {domain}
|
||||
'output_file': output_file # 对应 {output_file}
|
||||
}
|
||||
|
||||
# 如果是 subfinder 且有 provider_config,添加到参数
|
||||
if tool_name == 'subfinder' and provider_config_path:
|
||||
command_params['provider_config'] = provider_config_path
|
||||
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='subdomain_discovery',
|
||||
command_params=command_params,
|
||||
tool_config=tool_config
|
||||
)
|
||||
except Exception as e:
|
||||
failure_msg = f"{tool_name}: 命令构建失败 - {e}"
|
||||
failures.append(failure_msg)
|
||||
logger.error(f"构建 {tool_name} 命令失败: {e}")
|
||||
continue
|
||||
|
||||
# 1.3 获取超时时间(支持 'auto' 动态计算)
|
||||
timeout = tool_config['timeout']
|
||||
if timeout == 'auto':
|
||||
# 子域名发现工具通常运行时间较长,使用默认值 600 秒
|
||||
timeout = 600
|
||||
logger.info(f"✓ 工具 {tool_name} 使用默认 timeout: {timeout}秒")
|
||||
|
||||
# 1.4 提交任务
|
||||
logger.debug(
|
||||
f"提交任务 - 工具: {tool_name}, 超时: {timeout}s, 输出: {output_file}"
|
||||
)
|
||||
|
||||
# 记录工具开始执行日志
|
||||
user_log(scan_id, "subdomain_discovery", f"Running {tool_name}: {command}")
|
||||
|
||||
future = run_subdomain_discovery_task.submit(
|
||||
tool=tool_name,
|
||||
command=command,
|
||||
timeout=timeout,
|
||||
output_file=output_file
|
||||
)
|
||||
futures[tool_name] = future
|
||||
|
||||
# 2. 检查是否有任何工具成功提交
|
||||
if not futures:
|
||||
logger.warning(
|
||||
"所有扫描工具均无法启动 - 目标: %s, 失败详情: %s",
|
||||
domain_name, "; ".join(failures)
|
||||
)
|
||||
# 返回空结果,不抛出异常,让扫描继续
|
||||
return [], [{'tool': 'all', 'reason': '所有工具均无法启动'}], []
|
||||
|
||||
# 3. 等待并行任务完成,获取结果
|
||||
result_files = []
|
||||
failed_tools = []
|
||||
|
||||
for tool_name, future in futures.items():
|
||||
try:
|
||||
result = future.result() # 返回文件路径(字符串)或 ""(失败)
|
||||
if result:
|
||||
result_files.append(result)
|
||||
logger.info("✓ 扫描工具 %s 执行成功: %s", tool_name, result)
|
||||
user_log(scan_id, "subdomain_discovery", f"{tool_name} completed")
|
||||
else:
|
||||
failure_msg = f"{tool_name}: 未生成结果文件"
|
||||
failures.append(failure_msg)
|
||||
failed_tools.append({'tool': tool_name, 'reason': '未生成结果文件'})
|
||||
logger.warning("⚠️ 扫描工具 %s 未生成结果文件", tool_name)
|
||||
user_log(scan_id, "subdomain_discovery", f"{tool_name} failed: no output file", "error")
|
||||
except Exception as e:
|
||||
failure_msg = f"{tool_name}: {str(e)}"
|
||||
failures.append(failure_msg)
|
||||
failed_tools.append({'tool': tool_name, 'reason': str(e)})
|
||||
logger.warning("⚠️ 扫描工具 %s 执行失败: %s", tool_name, str(e))
|
||||
user_log(scan_id, "subdomain_discovery", f"{tool_name} failed: {str(e)}", "error")
|
||||
|
||||
# 4. 检查是否有成功的工具
|
||||
if not result_files:
|
||||
logger.warning(
|
||||
"所有扫描工具均失败 - 目标: %s, 失败详情: %s",
|
||||
domain_name, "; ".join(failures)
|
||||
)
|
||||
# 返回空结果,不抛出异常,让扫描继续
|
||||
return [], failed_tools, []
|
||||
|
||||
# 5. 动态计算成功的工具列表
|
||||
successful_tool_names = [name for name in futures.keys()
|
||||
if name not in [f['tool'] for f in failed_tools]]
|
||||
|
||||
logger.info(
|
||||
"✓ 扫描工具并行执行完成 - 成功: %d/%d (成功: %s, 失败: %s)",
|
||||
len(result_files), len(futures),
|
||||
', '.join(successful_tool_names) if successful_tool_names else '无',
|
||||
', '.join([f['tool'] for f in failed_tools]) if failed_tools else '无'
|
||||
)
|
||||
|
||||
return result_files, failed_tools, successful_tool_names
|
||||
|
||||
|
||||
def _run_single_tool(
|
||||
tool_name: str,
|
||||
tool_config: dict,
|
||||
command_params: dict,
|
||||
result_dir: Path,
|
||||
scan_type: str = 'subdomain_discovery',
|
||||
scan_id: int = None
|
||||
) -> str:
|
||||
"""
|
||||
运行单个扫描工具
|
||||
|
||||
Args:
|
||||
tool_name: 工具名称
|
||||
tool_config: 工具配置
|
||||
command_params: 命令参数
|
||||
result_dir: 结果目录
|
||||
scan_type: 扫描类型
|
||||
scan_id: 扫描 ID(用于记录用户日志)
|
||||
|
||||
Returns:
|
||||
str: 输出文件路径,失败返回空字符串
|
||||
"""
|
||||
from apps.scan.tasks.subdomain_discovery import run_subdomain_discovery_task
|
||||
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
short_uuid = uuid.uuid4().hex[:4]
|
||||
output_file = str(result_dir / f"{tool_name}_{timestamp}_{short_uuid}.txt")
|
||||
|
||||
# 添加 output_file 到参数
|
||||
command_params['output_file'] = output_file
|
||||
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type=scan_type,
|
||||
command_params=command_params,
|
||||
tool_config=tool_config
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"构建 {tool_name} 命令失败: {e}")
|
||||
return ""
|
||||
|
||||
timeout = tool_config.get('timeout', 3600)
|
||||
if timeout == 'auto':
|
||||
timeout = 3600
|
||||
|
||||
logger.info(f"执行 {tool_name}: {command}")
|
||||
if scan_id:
|
||||
user_log(scan_id, scan_type, f"Running {tool_name}: {command}")
|
||||
|
||||
try:
|
||||
result = run_subdomain_discovery_task(
|
||||
tool=tool_name,
|
||||
command=command,
|
||||
timeout=timeout,
|
||||
output_file=output_file
|
||||
)
|
||||
return result if result else ""
|
||||
except Exception as e:
|
||||
logger.warning(f"{tool_name} 执行失败: {e}")
|
||||
return ""
|
||||
|
||||
|
||||
def _count_lines(file_path: str) -> int:
|
||||
"""
|
||||
统计文件非空行数
|
||||
|
||||
Args:
|
||||
file_path: 文件路径
|
||||
|
||||
Returns:
|
||||
int: 非空行数量
|
||||
"""
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
return sum(1 for line in f if line.strip())
|
||||
except Exception as e:
|
||||
logger.warning(f"统计文件行数失败: {file_path} - {e}")
|
||||
return 0
|
||||
|
||||
|
||||
def _merge_files(file_list: list, output_file: str) -> str:
|
||||
"""
|
||||
合并多个文件并去重
|
||||
|
||||
Args:
|
||||
file_list: 文件路径列表
|
||||
output_file: 输出文件路径
|
||||
|
||||
Returns:
|
||||
str: 输出文件路径
|
||||
"""
|
||||
domains = set()
|
||||
for f in file_list:
|
||||
if f and Path(f).exists():
|
||||
with open(f, 'r', encoding='utf-8', errors='ignore') as fp:
|
||||
for line in fp:
|
||||
line = line.strip()
|
||||
if line:
|
||||
domains.add(line)
|
||||
|
||||
with open(output_file, 'w', encoding='utf-8') as fp:
|
||||
for domain in sorted(domains):
|
||||
fp.write(domain + '\n')
|
||||
|
||||
logger.info(f"合并完成: {len(domains)} 个域名 -> {output_file}")
|
||||
return output_file
|
||||
|
||||
|
||||
@flow(
|
||||
name="subdomain_discovery",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def subdomain_discovery_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: dict
|
||||
) -> dict:
|
||||
"""子域名发现扫描流程
|
||||
|
||||
工作流程(4 阶段):
|
||||
Stage 1: 被动收集(并行) - 必选
|
||||
Stage 2: 字典爆破(可选) - 子域名字典爆破
|
||||
Stage 3: 变异生成 + 验证(可选) - dnsgen + 通用存活验证
|
||||
Stage 4: DNS 存活验证(可选) - 通用存活验证
|
||||
Final: 保存到数据库
|
||||
|
||||
注意:
|
||||
- 子域名发现只对 DOMAIN 类型目标有意义
|
||||
- IP 和 CIDR 类型目标会自动跳过
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称(域名)
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: Scan 工作空间目录(由 Service 层创建)
|
||||
enabled_tools: 扫描配置字典:
|
||||
{
|
||||
'passive_tools': {...},
|
||||
'bruteforce': {...},
|
||||
'permutation': {...},
|
||||
'resolve': {...}
|
||||
}
|
||||
|
||||
Returns:
|
||||
dict: 扫描结果
|
||||
|
||||
Raises:
|
||||
ValueError: 配置错误
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
try:
|
||||
# ==================== 参数验证 ====================
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
if enabled_tools is None:
|
||||
raise ValueError("enabled_tools 不能为空")
|
||||
|
||||
scan_config = enabled_tools
|
||||
|
||||
# 如果未提供目标域名,跳过扫描
|
||||
if not target_name:
|
||||
logger.warning("未提供目标域名,跳过子域名发现扫描")
|
||||
return _empty_result(scan_id, '', scan_workspace_dir)
|
||||
|
||||
# ==================== 检查 Target 类型 ====================
|
||||
# 子域名发现只对 DOMAIN 类型有意义,IP 和 CIDR 类型跳过
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
|
||||
if target and target.type != Target.TargetType.DOMAIN:
|
||||
logger.info(
|
||||
"跳过子域名发现扫描: Target 类型为 %s (ID=%d, Name=%s),子域名发现仅适用于域名类型",
|
||||
target.type, target_id, target_name
|
||||
)
|
||||
return _empty_result(scan_id, target_name, scan_workspace_dir)
|
||||
|
||||
# 导入任务函数
|
||||
from apps.scan.tasks.subdomain_discovery import (
|
||||
run_subdomain_discovery_task,
|
||||
merge_and_validate_task,
|
||||
save_domains_task
|
||||
)
|
||||
|
||||
# Step 0: 准备工作
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
result_dir = setup_scan_directory(scan_workspace_dir, 'subdomain_discovery')
|
||||
|
||||
# 验证并规范化目标域名
|
||||
try:
|
||||
domain_name = _validate_and_normalize_target(target_name)
|
||||
except ValueError as e:
|
||||
logger.warning("目标域名无效,跳过子域名发现扫描: %s", e)
|
||||
return _empty_result(scan_id, target_name, scan_workspace_dir)
|
||||
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始子域名发现扫描\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Domain: {domain_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
user_log(scan_id, "subdomain_discovery", f"Starting subdomain discovery for {domain_name}")
|
||||
|
||||
# 解析配置
|
||||
passive_tools = scan_config.get('passive_tools', {})
|
||||
bruteforce_config = scan_config.get('bruteforce', {})
|
||||
permutation_config = scan_config.get('permutation', {})
|
||||
resolve_config = scan_config.get('resolve', {})
|
||||
|
||||
# 过滤出启用的被动工具
|
||||
enabled_passive_tools = {
|
||||
k: v for k, v in passive_tools.items()
|
||||
if v.get('enabled', True)
|
||||
}
|
||||
|
||||
executed_tasks = []
|
||||
all_result_files = []
|
||||
failed_tools = []
|
||||
successful_tool_names = []
|
||||
|
||||
# ==================== 生成 Provider 配置文件 ====================
|
||||
# 为 subfinder 生成第三方数据源配置
|
||||
provider_config_path = None
|
||||
try:
|
||||
from apps.scan.services.subfinder_provider_config_service import SubfinderProviderConfigService
|
||||
provider_config_service = SubfinderProviderConfigService()
|
||||
provider_config_path = provider_config_service.generate(str(result_dir))
|
||||
if provider_config_path:
|
||||
logger.info(f"Provider 配置文件已生成: {provider_config_path}")
|
||||
user_log(scan_id, "subdomain_discovery", "Provider config generated for subfinder")
|
||||
except Exception as e:
|
||||
logger.warning(f"生成 Provider 配置文件失败: {e}")
|
||||
|
||||
# ==================== Stage 1: 被动收集(并行)====================
|
||||
if enabled_passive_tools:
|
||||
logger.info("=" * 40)
|
||||
logger.info("Stage 1: 被动收集(并行)")
|
||||
logger.info("=" * 40)
|
||||
logger.info("启用工具: %s", ', '.join(enabled_passive_tools.keys()))
|
||||
user_log(scan_id, "subdomain_discovery", f"Stage 1: passive collection ({', '.join(enabled_passive_tools.keys())})")
|
||||
result_files, stage1_failed, stage1_success = _run_scans_parallel(
|
||||
enabled_tools=enabled_passive_tools,
|
||||
domain_name=domain_name,
|
||||
result_dir=result_dir,
|
||||
scan_id=scan_id,
|
||||
provider_config_path=provider_config_path
|
||||
)
|
||||
all_result_files.extend(result_files)
|
||||
failed_tools.extend(stage1_failed)
|
||||
successful_tool_names.extend(stage1_success)
|
||||
executed_tasks.extend([f'passive ({tool})' for tool in stage1_success])
|
||||
|
||||
# 合并 Stage 1 结果
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
current_result = str(result_dir / f"subs_passive_{timestamp}.txt")
|
||||
if all_result_files:
|
||||
current_result = _merge_files(all_result_files, current_result)
|
||||
executed_tasks.append('merge_passive')
|
||||
else:
|
||||
# 创建空文件
|
||||
Path(current_result).touch()
|
||||
|
||||
# ==================== Stage 2: 字典爆破(可选)====================
|
||||
bruteforce_enabled = bruteforce_config.get('enabled', False)
|
||||
if bruteforce_enabled:
|
||||
logger.info("=" * 40)
|
||||
logger.info("Stage 2: 字典爆破")
|
||||
logger.info("=" * 40)
|
||||
user_log(scan_id, "subdomain_discovery", "Stage 2: bruteforce")
|
||||
|
||||
bruteforce_tool_config = bruteforce_config.get('subdomain_bruteforce', {})
|
||||
wordlist_name = bruteforce_tool_config.get('wordlist_name', 'dns_wordlist.txt')
|
||||
|
||||
try:
|
||||
# 确保本地存在字典文件(含 hash 校验)
|
||||
local_wordlist_path = ensure_wordlist_local(wordlist_name)
|
||||
|
||||
# 获取字典记录用于计算 timeout
|
||||
wordlist_service = WordlistService()
|
||||
wordlist = wordlist_service.get_wordlist_by_name(wordlist_name)
|
||||
|
||||
timeout_value = bruteforce_tool_config.get('timeout', 3600)
|
||||
if timeout_value == 'auto' and wordlist:
|
||||
line_count = getattr(wordlist, 'line_count', None)
|
||||
if line_count is None:
|
||||
try:
|
||||
with open(local_wordlist_path, 'rb') as f:
|
||||
line_count = sum(1 for _ in f)
|
||||
except OSError:
|
||||
line_count = 0
|
||||
|
||||
try:
|
||||
line_count_int = int(line_count)
|
||||
except (TypeError, ValueError):
|
||||
line_count_int = 0
|
||||
|
||||
timeout_value = line_count_int * 3 if line_count_int > 0 else 3600
|
||||
bruteforce_tool_config = {
|
||||
**bruteforce_tool_config,
|
||||
'timeout': timeout_value,
|
||||
}
|
||||
|
||||
brute_result = _run_single_tool(
|
||||
tool_name='subdomain_bruteforce',
|
||||
tool_config=bruteforce_tool_config,
|
||||
command_params={
|
||||
'domain': domain_name,
|
||||
'wordlist': local_wordlist_path,
|
||||
},
|
||||
result_dir=result_dir,
|
||||
scan_id=scan_id
|
||||
)
|
||||
|
||||
if brute_result:
|
||||
# 合并 Stage 1 + Stage 2
|
||||
current_result = _merge_files(
|
||||
[current_result, brute_result],
|
||||
str(result_dir / f"subs_merged_{timestamp}.txt")
|
||||
)
|
||||
successful_tool_names.append('subdomain_bruteforce')
|
||||
executed_tasks.append('bruteforce')
|
||||
logger.info("✓ subdomain_bruteforce 执行完成")
|
||||
user_log(scan_id, "subdomain_discovery", "subdomain_bruteforce completed")
|
||||
else:
|
||||
failed_tools.append({'tool': 'subdomain_bruteforce', 'reason': '执行失败'})
|
||||
logger.warning("⚠️ subdomain_bruteforce 执行失败")
|
||||
user_log(scan_id, "subdomain_discovery", "subdomain_bruteforce failed: execution failed", "error")
|
||||
except Exception as exc:
|
||||
failed_tools.append({'tool': 'subdomain_bruteforce', 'reason': str(exc)})
|
||||
logger.warning("字典准备失败,跳过字典爆破: %s", exc)
|
||||
user_log(scan_id, "subdomain_discovery", f"subdomain_bruteforce failed: {str(exc)}", "error")
|
||||
|
||||
# ==================== Stage 3: 变异生成 + 验证(可选)====================
|
||||
permutation_enabled = permutation_config.get('enabled', False)
|
||||
if permutation_enabled:
|
||||
logger.info("=" * 40)
|
||||
logger.info("Stage 3: 变异生成 + 存活验证(流式管道)")
|
||||
logger.info("=" * 40)
|
||||
user_log(scan_id, "subdomain_discovery", "Stage 3: permutation + resolve")
|
||||
|
||||
permutation_tool_config = permutation_config.get('subdomain_permutation_resolve', {})
|
||||
|
||||
# === Step 3.1: 泛解析采样检测 ===
|
||||
# 生成原文件 100 倍的变异样本,检查解析结果是否超过 50 倍
|
||||
before_count = _count_lines(current_result)
|
||||
|
||||
# 配置参数
|
||||
SAMPLE_MULTIPLIER = 100 # 采样数量 = 原文件 × 100
|
||||
EXPANSION_THRESHOLD = 50 # 膨胀阈值 = 原文件 × 50
|
||||
SAMPLE_TIMEOUT = 7200 # 采样超时 2 小时
|
||||
|
||||
sample_size = before_count * SAMPLE_MULTIPLIER
|
||||
max_allowed = before_count * EXPANSION_THRESHOLD
|
||||
|
||||
sample_output = str(result_dir / f"subs_permuted_sample_{timestamp}.txt")
|
||||
sample_cmd = (
|
||||
f"cat {current_result} | dnsgen - | head -n {sample_size} | "
|
||||
f"puredns resolve -r /app/backend/resources/resolvers.txt "
|
||||
f"--write {sample_output} --wildcard-tests 50 --wildcard-batch 1000000 --quiet"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"泛解析采样检测: 原文件 {before_count} 个, "
|
||||
f"采样 {sample_size} 个, 阈值 {max_allowed} 个"
|
||||
)
|
||||
|
||||
try:
|
||||
subprocess.run(
|
||||
sample_cmd,
|
||||
shell=True,
|
||||
timeout=SAMPLE_TIMEOUT,
|
||||
check=False,
|
||||
capture_output=True
|
||||
)
|
||||
sample_result_count = _count_lines(sample_output) if Path(sample_output).exists() else 0
|
||||
|
||||
logger.info(
|
||||
f"采样结果: {sample_result_count} 个域名存活 "
|
||||
f"(原文件: {before_count}, 阈值: {max_allowed})"
|
||||
)
|
||||
|
||||
if sample_result_count > max_allowed:
|
||||
# 采样结果超过阈值,说明存在泛解析,跳过完整变异
|
||||
ratio = sample_result_count / before_count if before_count > 0 else sample_result_count
|
||||
logger.warning(
|
||||
f"跳过变异: 采样检测到泛解析 "
|
||||
f"({sample_result_count} > {max_allowed}, 膨胀率 {ratio:.1f}x)"
|
||||
)
|
||||
failed_tools.append({
|
||||
'tool': 'subdomain_permutation_resolve',
|
||||
'reason': f"采样检测到泛解析 (膨胀率 {ratio:.1f}x)"
|
||||
})
|
||||
user_log(scan_id, "subdomain_discovery", f"subdomain_permutation_resolve skipped: wildcard detected (ratio {ratio:.1f}x)", "warning")
|
||||
else:
|
||||
# === Step 3.2: 采样通过,执行完整变异 ===
|
||||
logger.info("采样检测通过,执行完整变异...")
|
||||
|
||||
permuted_result = _run_single_tool(
|
||||
tool_name='subdomain_permutation_resolve',
|
||||
tool_config=permutation_tool_config,
|
||||
command_params={
|
||||
'input_file': current_result,
|
||||
},
|
||||
result_dir=result_dir,
|
||||
scan_id=scan_id
|
||||
)
|
||||
|
||||
if permuted_result:
|
||||
# 合并原结果 + 变异验证结果
|
||||
current_result = _merge_files(
|
||||
[current_result, permuted_result],
|
||||
str(result_dir / f"subs_with_permuted_{timestamp}.txt")
|
||||
)
|
||||
successful_tool_names.append('subdomain_permutation_resolve')
|
||||
executed_tasks.append('permutation')
|
||||
logger.info("✓ subdomain_permutation_resolve 执行完成")
|
||||
user_log(scan_id, "subdomain_discovery", "subdomain_permutation_resolve completed")
|
||||
else:
|
||||
failed_tools.append({'tool': 'subdomain_permutation_resolve', 'reason': '执行失败'})
|
||||
logger.warning("⚠️ subdomain_permutation_resolve 执行失败")
|
||||
user_log(scan_id, "subdomain_discovery", "subdomain_permutation_resolve failed: execution failed", "error")
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
failed_tools.append({'tool': 'subdomain_permutation_resolve', 'reason': '采样检测超时'})
|
||||
logger.warning(f"采样检测超时 ({SAMPLE_TIMEOUT}秒),跳过变异")
|
||||
user_log(scan_id, "subdomain_discovery", "subdomain_permutation_resolve failed: sample detection timeout", "error")
|
||||
except Exception as e:
|
||||
failed_tools.append({'tool': 'subdomain_permutation_resolve', 'reason': f'采样检测失败: {e}'})
|
||||
logger.warning(f"采样检测失败: {e},跳过变异")
|
||||
user_log(scan_id, "subdomain_discovery", f"subdomain_permutation_resolve failed: {str(e)}", "error")
|
||||
|
||||
# ==================== Stage 4: DNS 存活验证(可选)====================
|
||||
# 无论是否启用 Stage 3,只要 resolve.enabled 为 true 就会执行,对当前所有候选子域做统一 DNS 验证
|
||||
resolve_enabled = resolve_config.get('enabled', False)
|
||||
if resolve_enabled:
|
||||
logger.info("=" * 40)
|
||||
logger.info("Stage 4: DNS 存活验证")
|
||||
logger.info("=" * 40)
|
||||
user_log(scan_id, "subdomain_discovery", "Stage 4: DNS resolve")
|
||||
|
||||
resolve_tool_config = resolve_config.get('subdomain_resolve', {})
|
||||
|
||||
# 根据当前候选子域数量动态计算 timeout(支持 timeout: auto)
|
||||
timeout_value = resolve_tool_config.get('timeout', 3600)
|
||||
if timeout_value == 'auto':
|
||||
line_count = 0
|
||||
try:
|
||||
with open(current_result, 'rb') as f:
|
||||
line_count = sum(1 for _ in f)
|
||||
except OSError:
|
||||
line_count = 0
|
||||
|
||||
try:
|
||||
line_count_int = int(line_count)
|
||||
except (TypeError, ValueError):
|
||||
line_count_int = 0
|
||||
|
||||
timeout_value = line_count_int * 3 if line_count_int > 0 else 3600
|
||||
resolve_tool_config = {
|
||||
**resolve_tool_config,
|
||||
'timeout': timeout_value,
|
||||
}
|
||||
|
||||
alive_result = _run_single_tool(
|
||||
tool_name='subdomain_resolve',
|
||||
tool_config=resolve_tool_config,
|
||||
command_params={
|
||||
'input_file': current_result,
|
||||
},
|
||||
result_dir=result_dir,
|
||||
scan_id=scan_id
|
||||
)
|
||||
|
||||
if alive_result:
|
||||
current_result = alive_result
|
||||
successful_tool_names.append('subdomain_resolve')
|
||||
executed_tasks.append('resolve')
|
||||
logger.info("✓ subdomain_resolve 执行完成")
|
||||
user_log(scan_id, "subdomain_discovery", "subdomain_resolve completed")
|
||||
else:
|
||||
failed_tools.append({'tool': 'subdomain_resolve', 'reason': '执行失败'})
|
||||
logger.warning("⚠️ subdomain_resolve 执行失败")
|
||||
user_log(scan_id, "subdomain_discovery", "subdomain_resolve failed: execution failed", "error")
|
||||
|
||||
# ==================== Final: 保存到数据库 ====================
|
||||
logger.info("=" * 40)
|
||||
logger.info("Final: 保存到数据库")
|
||||
logger.info("=" * 40)
|
||||
|
||||
# 最终验证和保存
|
||||
final_file = merge_and_validate_task(
|
||||
result_files=[current_result],
|
||||
result_dir=str(result_dir)
|
||||
)
|
||||
|
||||
save_result = save_domains_task(
|
||||
domains_file=final_file,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id
|
||||
)
|
||||
processed_domains = save_result.get('processed_records', 0)
|
||||
executed_tasks.append('save_domains')
|
||||
|
||||
# 记录 Flow 完成
|
||||
logger.info("="*60 + "\n✓ 子域名发现扫描完成\n" + "="*60)
|
||||
user_log(scan_id, "subdomain_discovery", f"subdomain_discovery completed: found {processed_domains} subdomains")
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': domain_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'total': processed_domains,
|
||||
'executed_tasks': executed_tasks,
|
||||
'tool_stats': {
|
||||
'total': len(enabled_passive_tools) + (1 if bruteforce_enabled else 0) +
|
||||
(1 if permutation_enabled else 0) + (1 if resolve_enabled else 0),
|
||||
'successful': len(successful_tool_names),
|
||||
'failed': len(failed_tools),
|
||||
'successful_tools': successful_tool_names,
|
||||
'failed_tools': failed_tools
|
||||
}
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("配置错误: %s", e)
|
||||
raise
|
||||
except RuntimeError as e:
|
||||
logger.error("运行时错误: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("子域名发现扫描失败: %s", e)
|
||||
raise
|
||||
|
||||
|
||||
def _empty_result(scan_id: int, target: str, scan_workspace_dir: str) -> dict:
|
||||
"""返回空结果"""
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'total': 0,
|
||||
'executed_tasks': [],
|
||||
'tool_stats': {
|
||||
'total': 0,
|
||||
'successful': 0,
|
||||
'failed': 0,
|
||||
'successful_tools': [],
|
||||
'failed_tools': []
|
||||
}
|
||||
}
|
||||
@@ -1,251 +0,0 @@
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from prefect import flow
|
||||
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.utils import build_scan_command, ensure_nuclei_templates_local, user_log
|
||||
from apps.scan.tasks.vuln_scan import (
|
||||
export_endpoints_task,
|
||||
run_vuln_tool_task,
|
||||
run_and_stream_save_dalfox_vulns_task,
|
||||
run_and_stream_save_nuclei_vulns_task,
|
||||
)
|
||||
from .utils import calculate_timeout_by_line_count
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@flow(
|
||||
name="endpoints_vuln_scan_flow",
|
||||
log_prints=True,
|
||||
)
|
||||
def endpoints_vuln_scan_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: Dict[str, dict],
|
||||
) -> dict:
|
||||
"""基于 Endpoint 的漏洞扫描 Flow(串行执行 Dalfox 等工具)。"""
|
||||
try:
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
if not enabled_tools:
|
||||
raise ValueError("enabled_tools 不能为空")
|
||||
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
vuln_scan_dir = setup_scan_directory(scan_workspace_dir, 'vuln_scan')
|
||||
endpoints_file = vuln_scan_dir / "input_endpoints.txt"
|
||||
|
||||
# Step 1: 导出 Endpoint URL
|
||||
export_result = export_endpoints_task(
|
||||
target_id=target_id,
|
||||
output_file=str(endpoints_file),
|
||||
)
|
||||
total_endpoints = export_result.get("total_count", 0)
|
||||
|
||||
if total_endpoints == 0 or not endpoints_file.exists() or endpoints_file.stat().st_size == 0:
|
||||
logger.warning("目标下没有可用 Endpoint,跳过漏洞扫描")
|
||||
return {
|
||||
"success": True,
|
||||
"scan_id": scan_id,
|
||||
"target": target_name,
|
||||
"scan_workspace_dir": scan_workspace_dir,
|
||||
"endpoints_file": str(endpoints_file),
|
||||
"endpoint_count": 0,
|
||||
"executed_tools": [],
|
||||
"tool_results": {},
|
||||
}
|
||||
|
||||
logger.info("Endpoint 导出完成,共 %d 条,开始执行漏洞扫描", total_endpoints)
|
||||
|
||||
tool_results: Dict[str, dict] = {}
|
||||
|
||||
# Step 2: 并行执行每个漏洞扫描工具(目前主要是 Dalfox)
|
||||
# 1)先为每个工具 submit Prefect Task,让 Worker 并行调度
|
||||
# 2)再统一收集各自的结果,组装成 tool_results
|
||||
tool_futures: Dict[str, dict] = {}
|
||||
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# Nuclei 需要先确保本地模板存在(支持多个模板仓库)
|
||||
template_args = ""
|
||||
if tool_name == "nuclei":
|
||||
repo_names = tool_config.get("template_repo_names")
|
||||
if not repo_names or not isinstance(repo_names, (list, tuple)):
|
||||
logger.error("Nuclei 配置缺少 template_repo_names(数组),跳过")
|
||||
continue
|
||||
template_paths = []
|
||||
try:
|
||||
for repo_name in repo_names:
|
||||
path = ensure_nuclei_templates_local(repo_name)
|
||||
template_paths.append(path)
|
||||
logger.info("Nuclei 模板路径 [%s]: %s", repo_name, path)
|
||||
except Exception as e:
|
||||
logger.error("获取 Nuclei 模板失败: %s,跳过 nuclei 扫描", e)
|
||||
continue
|
||||
template_args = " ".join(f"-t {p}" for p in template_paths)
|
||||
|
||||
# 构建命令参数
|
||||
command_params = {"endpoints_file": str(endpoints_file)}
|
||||
if template_args:
|
||||
command_params["template_args"] = template_args
|
||||
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type="vuln_scan",
|
||||
command_params=command_params,
|
||||
tool_config=tool_config,
|
||||
)
|
||||
|
||||
raw_timeout = tool_config.get("timeout", 600)
|
||||
|
||||
if isinstance(raw_timeout, str) and raw_timeout == "auto":
|
||||
# timeout=auto 时,根据 endpoints_file 行数自动计算超时时间
|
||||
# Dalfox: 每行 100 秒,Nuclei: 每行 30 秒
|
||||
base_per_time = 30 if tool_name == "nuclei" else 100
|
||||
timeout = calculate_timeout_by_line_count(
|
||||
tool_config=tool_config,
|
||||
file_path=str(endpoints_file),
|
||||
base_per_time=base_per_time,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
timeout = int(raw_timeout)
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(
|
||||
f"工具 {tool_name} 的 timeout 配置无效: {raw_timeout!r}"
|
||||
) from e
|
||||
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
log_file = vuln_scan_dir / f"{tool_name}_{timestamp}.log"
|
||||
|
||||
# Dalfox XSS 使用流式任务,一边解析一边保存漏洞结果
|
||||
if tool_name == "dalfox_xss":
|
||||
logger.info("开始执行漏洞扫描工具 %s(流式保存漏洞结果,已提交任务)", tool_name)
|
||||
user_log(scan_id, "vuln_scan", f"Running {tool_name}: {command}")
|
||||
future = run_and_stream_save_dalfox_vulns_task.submit(
|
||||
cmd=command,
|
||||
tool_name=tool_name,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
cwd=str(vuln_scan_dir),
|
||||
shell=True,
|
||||
batch_size=1,
|
||||
timeout=timeout,
|
||||
log_file=str(log_file),
|
||||
)
|
||||
|
||||
tool_futures[tool_name] = {
|
||||
"future": future,
|
||||
"command": command,
|
||||
"timeout": timeout,
|
||||
"log_file": str(log_file),
|
||||
"mode": "streaming",
|
||||
}
|
||||
elif tool_name == "nuclei":
|
||||
# Nuclei 使用流式任务
|
||||
logger.info("开始执行漏洞扫描工具 %s(流式保存漏洞结果,已提交任务)", tool_name)
|
||||
user_log(scan_id, "vuln_scan", f"Running {tool_name}: {command}")
|
||||
future = run_and_stream_save_nuclei_vulns_task.submit(
|
||||
cmd=command,
|
||||
tool_name=tool_name,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
cwd=str(vuln_scan_dir),
|
||||
shell=True,
|
||||
batch_size=1,
|
||||
timeout=timeout,
|
||||
log_file=str(log_file),
|
||||
)
|
||||
|
||||
tool_futures[tool_name] = {
|
||||
"future": future,
|
||||
"command": command,
|
||||
"timeout": timeout,
|
||||
"log_file": str(log_file),
|
||||
"mode": "streaming",
|
||||
}
|
||||
else:
|
||||
# 其他工具仍使用非流式执行逻辑
|
||||
logger.info("开始执行漏洞扫描工具 %s(已提交任务)", tool_name)
|
||||
user_log(scan_id, "vuln_scan", f"Running {tool_name}: {command}")
|
||||
future = run_vuln_tool_task.submit(
|
||||
tool_name=tool_name,
|
||||
command=command,
|
||||
timeout=timeout,
|
||||
log_file=str(log_file),
|
||||
)
|
||||
|
||||
tool_futures[tool_name] = {
|
||||
"future": future,
|
||||
"command": command,
|
||||
"timeout": timeout,
|
||||
"log_file": str(log_file),
|
||||
"mode": "normal",
|
||||
}
|
||||
|
||||
# 统一收集所有工具的执行结果
|
||||
for tool_name, meta in tool_futures.items():
|
||||
future = meta["future"]
|
||||
try:
|
||||
result = future.result()
|
||||
|
||||
if meta["mode"] == "streaming":
|
||||
created_vulns = result.get("created_vulns", 0)
|
||||
tool_results[tool_name] = {
|
||||
"command": meta["command"],
|
||||
"timeout": meta["timeout"],
|
||||
"processed_records": result.get("processed_records"),
|
||||
"created_vulns": created_vulns,
|
||||
"command_log_file": meta["log_file"],
|
||||
}
|
||||
logger.info("✓ 工具 %s 执行完成 - 漏洞: %d", tool_name, created_vulns)
|
||||
user_log(scan_id, "vuln_scan", f"{tool_name} completed: found {created_vulns} vulnerabilities")
|
||||
else:
|
||||
tool_results[tool_name] = {
|
||||
"command": meta["command"],
|
||||
"timeout": meta["timeout"],
|
||||
"duration": result.get("duration"),
|
||||
"returncode": result.get("returncode"),
|
||||
"command_log_file": result.get("command_log_file"),
|
||||
}
|
||||
logger.info("✓ 工具 %s 执行完成 - returncode=%s", tool_name, result.get("returncode"))
|
||||
user_log(scan_id, "vuln_scan", f"{tool_name} completed")
|
||||
except Exception as e:
|
||||
reason = str(e)
|
||||
logger.error("工具 %s 执行失败: %s", tool_name, e, exc_info=True)
|
||||
user_log(scan_id, "vuln_scan", f"{tool_name} failed: {reason}", "error")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"scan_id": scan_id,
|
||||
"target": target_name,
|
||||
"scan_workspace_dir": scan_workspace_dir,
|
||||
"endpoints_file": str(endpoints_file),
|
||||
"endpoint_count": total_endpoints,
|
||||
"executed_tools": list(enabled_tools.keys()),
|
||||
"tool_results": tool_results,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Endpoint 漏洞扫描失败: %s", e)
|
||||
raise
|
||||
@@ -1,119 +0,0 @@
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
import logging
|
||||
from typing import Dict, Tuple
|
||||
|
||||
from prefect import flow
|
||||
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.configs.command_templates import get_command_template
|
||||
from apps.scan.utils import user_log
|
||||
from .endpoints_vuln_scan_flow import endpoints_vuln_scan_flow
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _classify_vuln_tools(enabled_tools: Dict[str, dict]) -> Tuple[Dict[str, dict], Dict[str, dict]]:
|
||||
"""根据命令模板中的 input_type 对漏洞扫描工具进行分类。
|
||||
|
||||
当前支持:
|
||||
- endpoints_file: 以端点列表文件为输入(例如 Dalfox XSS)
|
||||
预留:
|
||||
- 其他 input_type 将被归类到 other_tools,暂不处理。
|
||||
"""
|
||||
endpoints_tools: Dict[str, dict] = {}
|
||||
other_tools: Dict[str, dict] = {}
|
||||
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
template = get_command_template("vuln_scan", tool_name) or {}
|
||||
input_type = template.get("input_type", "endpoints_file")
|
||||
|
||||
if input_type == "endpoints_file":
|
||||
endpoints_tools[tool_name] = tool_config
|
||||
else:
|
||||
other_tools[tool_name] = tool_config
|
||||
|
||||
return endpoints_tools, other_tools
|
||||
|
||||
|
||||
@flow(
|
||||
name="vuln_scan",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def vuln_scan_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: Dict[str, dict],
|
||||
) -> dict:
|
||||
"""漏洞扫描主 Flow:串行编排各类漏洞扫描子 Flow。
|
||||
|
||||
支持工具:
|
||||
- dalfox_xss: XSS 漏洞扫描(流式保存)
|
||||
- nuclei: 通用漏洞扫描(流式保存,支持模板 commit hash 同步)
|
||||
"""
|
||||
try:
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
if not enabled_tools:
|
||||
raise ValueError("enabled_tools 不能为空")
|
||||
|
||||
logger.info("开始漏洞扫描 - Scan ID: %s, Target: %s", scan_id, target_name)
|
||||
user_log(scan_id, "vuln_scan", "Starting vulnerability scan")
|
||||
|
||||
# Step 1: 分类工具
|
||||
endpoints_tools, other_tools = _classify_vuln_tools(enabled_tools)
|
||||
|
||||
logger.info(
|
||||
"漏洞扫描工具分类 - endpoints_file: %s, 其他: %s",
|
||||
list(endpoints_tools.keys()) or "无",
|
||||
list(other_tools.keys()) or "无",
|
||||
)
|
||||
|
||||
if other_tools:
|
||||
logger.warning(
|
||||
"存在暂不支持输入类型的漏洞扫描工具,将被忽略: %s",
|
||||
list(other_tools.keys()),
|
||||
)
|
||||
|
||||
if not endpoints_tools:
|
||||
raise ValueError("漏洞扫描需要至少启用一个以 endpoints_file 为输入的工具(如 dalfox_xss、nuclei)。")
|
||||
|
||||
# Step 2: 执行 Endpoint 漏洞扫描子 Flow(串行)
|
||||
endpoint_result = endpoints_vuln_scan_flow(
|
||||
scan_id=scan_id,
|
||||
target_name=target_name,
|
||||
target_id=target_id,
|
||||
scan_workspace_dir=scan_workspace_dir,
|
||||
enabled_tools=endpoints_tools,
|
||||
)
|
||||
|
||||
# 记录 Flow 完成
|
||||
total_vulns = sum(
|
||||
r.get("created_vulns", 0)
|
||||
for r in endpoint_result.get("tool_results", {}).values()
|
||||
)
|
||||
logger.info("✓ 漏洞扫描完成 - 新增漏洞: %d", total_vulns)
|
||||
user_log(scan_id, "vuln_scan", f"vuln_scan completed: found {total_vulns} vulnerabilities")
|
||||
|
||||
# 目前只有一个子 Flow,直接返回其结果
|
||||
return endpoint_result
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("漏洞扫描主 Flow 失败: %s", e)
|
||||
raise
|
||||
@@ -1,189 +0,0 @@
|
||||
"""
|
||||
扫描流程处理器
|
||||
|
||||
负责处理扫描流程(端口扫描、子域名发现等)的状态变化和通知
|
||||
|
||||
职责:
|
||||
- 更新各阶段的进度状态(running/completed/failed)
|
||||
- 发送扫描阶段的通知
|
||||
- 记录 Flow 性能指标
|
||||
"""
|
||||
|
||||
import logging
|
||||
from prefect import Flow
|
||||
from prefect.client.schemas import FlowRun, State
|
||||
|
||||
from apps.scan.utils.performance import FlowPerformanceTracker
|
||||
from apps.scan.utils import user_log
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 存储每个 flow_run 的性能追踪器
|
||||
_flow_trackers: dict[str, FlowPerformanceTracker] = {}
|
||||
|
||||
|
||||
def _get_stage_from_flow_name(flow_name: str) -> str | None:
|
||||
"""
|
||||
从 Flow name 获取对应的 stage
|
||||
|
||||
Flow name 直接作为 stage(与 engine_config 的 key 一致)
|
||||
排除主 Flow(initiate_scan)
|
||||
"""
|
||||
# 排除主 Flow,它不是阶段 Flow
|
||||
if flow_name == 'initiate_scan':
|
||||
return None
|
||||
return flow_name
|
||||
|
||||
|
||||
def on_scan_flow_running(flow: Flow, flow_run: FlowRun, state: State) -> None:
|
||||
"""
|
||||
扫描流程开始运行时的回调
|
||||
|
||||
职责:
|
||||
- 更新阶段进度为 running
|
||||
- 发送扫描开始通知
|
||||
- 启动性能追踪
|
||||
|
||||
Args:
|
||||
flow: Prefect Flow 对象
|
||||
flow_run: Flow 运行实例
|
||||
state: Flow 当前状态
|
||||
"""
|
||||
logger.info("🚀 扫描流程开始运行 - Flow: %s, Run ID: %s", flow.name, flow_run.id)
|
||||
|
||||
# 提取流程参数
|
||||
flow_params = flow_run.parameters or {}
|
||||
scan_id = flow_params.get('scan_id')
|
||||
target_name = flow_params.get('target_name', 'unknown')
|
||||
target_id = flow_params.get('target_id')
|
||||
|
||||
# 启动性能追踪
|
||||
if scan_id:
|
||||
tracker = FlowPerformanceTracker(flow.name, scan_id)
|
||||
tracker.start(target_id=target_id, target_name=target_name)
|
||||
_flow_trackers[str(flow_run.id)] = tracker
|
||||
|
||||
# 更新阶段进度
|
||||
stage = _get_stage_from_flow_name(flow.name)
|
||||
if scan_id and stage:
|
||||
try:
|
||||
from apps.scan.services import ScanService
|
||||
service = ScanService()
|
||||
service.start_stage(scan_id, stage)
|
||||
logger.info(f"✓ 阶段进度已更新为 running - Scan ID: {scan_id}, Stage: {stage}")
|
||||
except Exception as e:
|
||||
logger.error(f"更新阶段进度失败 - Scan ID: {scan_id}, Stage: {stage}: {e}")
|
||||
|
||||
|
||||
def on_scan_flow_completed(flow: Flow, flow_run: FlowRun, state: State) -> None:
|
||||
"""
|
||||
扫描流程完成时的回调
|
||||
|
||||
职责:
|
||||
- 更新阶段进度为 completed
|
||||
- 发送扫描完成通知(可选)
|
||||
- 记录性能指标
|
||||
|
||||
Args:
|
||||
flow: Prefect Flow 对象
|
||||
flow_run: Flow 运行实例
|
||||
state: Flow 当前状态
|
||||
"""
|
||||
logger.info("✅ 扫描流程完成 - Flow: %s, Run ID: %s", flow.name, flow_run.id)
|
||||
|
||||
# 提取流程参数
|
||||
flow_params = flow_run.parameters or {}
|
||||
scan_id = flow_params.get('scan_id')
|
||||
|
||||
# 获取 flow result
|
||||
result = None
|
||||
try:
|
||||
result = state.result() if state.result else None
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 记录性能指标
|
||||
tracker = _flow_trackers.pop(str(flow_run.id), None)
|
||||
if tracker:
|
||||
tracker.finish(success=True)
|
||||
|
||||
# 更新阶段进度
|
||||
stage = _get_stage_from_flow_name(flow.name)
|
||||
if scan_id and stage:
|
||||
try:
|
||||
from apps.scan.services import ScanService
|
||||
service = ScanService()
|
||||
# 从 flow result 中提取 detail(如果有)
|
||||
detail = None
|
||||
if isinstance(result, dict):
|
||||
detail = result.get('detail')
|
||||
service.complete_stage(scan_id, stage, detail)
|
||||
logger.info(f"✓ 阶段进度已更新为 completed - Scan ID: {scan_id}, Stage: {stage}")
|
||||
# 每个阶段完成后刷新缓存统计,便于前端实时看到增量
|
||||
try:
|
||||
service.update_cached_stats(scan_id)
|
||||
logger.info("✓ 阶段完成后已刷新缓存统计 - Scan ID: %s", scan_id)
|
||||
except Exception as e:
|
||||
logger.error("阶段完成后刷新缓存统计失败 - Scan ID: %s, 错误: %s", scan_id, e)
|
||||
except Exception as e:
|
||||
logger.error(f"更新阶段进度失败 - Scan ID: {scan_id}, Stage: {stage}: {e}")
|
||||
|
||||
|
||||
def on_scan_flow_failed(flow: Flow, flow_run: FlowRun, state: State) -> None:
|
||||
"""
|
||||
扫描流程失败时的回调
|
||||
|
||||
职责:
|
||||
- 更新阶段进度为 failed
|
||||
- 发送扫描失败通知
|
||||
- 记录性能指标(含错误信息)
|
||||
- 写入 ScanLog 供前端显示
|
||||
|
||||
Args:
|
||||
flow: Prefect Flow 对象
|
||||
flow_run: Flow 运行实例
|
||||
state: Flow 当前状态
|
||||
"""
|
||||
logger.info("❌ 扫描流程失败 - Flow: %s, Run ID: %s", flow.name, flow_run.id)
|
||||
|
||||
# 提取流程参数
|
||||
flow_params = flow_run.parameters or {}
|
||||
scan_id = flow_params.get('scan_id')
|
||||
target_name = flow_params.get('target_name', 'unknown')
|
||||
|
||||
# 提取错误信息
|
||||
error_message = str(state.message) if state.message else "未知错误"
|
||||
|
||||
# 写入 ScanLog 供前端显示
|
||||
stage = _get_stage_from_flow_name(flow.name)
|
||||
if scan_id and stage:
|
||||
user_log(scan_id, stage, f"Failed: {error_message}", "error")
|
||||
|
||||
# 记录性能指标(失败情况)
|
||||
tracker = _flow_trackers.pop(str(flow_run.id), None)
|
||||
if tracker:
|
||||
tracker.finish(success=False, error_message=error_message)
|
||||
|
||||
# 更新阶段进度
|
||||
stage = _get_stage_from_flow_name(flow.name)
|
||||
if scan_id and stage:
|
||||
try:
|
||||
from apps.scan.services import ScanService
|
||||
service = ScanService()
|
||||
service.fail_stage(scan_id, stage, error_message)
|
||||
logger.info(f"✓ 阶段进度已更新为 failed - Scan ID: {scan_id}, Stage: {stage}")
|
||||
except Exception as e:
|
||||
logger.error(f"更新阶段进度失败 - Scan ID: {scan_id}, Stage: {stage}: {e}")
|
||||
|
||||
# 发送通知
|
||||
try:
|
||||
from apps.scan.notifications import create_notification, NotificationLevel
|
||||
message = f"任务:{flow.name}\n状态:执行失败\n错误:{error_message}"
|
||||
create_notification(
|
||||
title=target_name,
|
||||
message=message,
|
||||
level=NotificationLevel.HIGH
|
||||
)
|
||||
logger.error(f"✓ 扫描失败通知已发送 - Target: {target_name}, Flow: {flow.name}, Error: {error_message}")
|
||||
except Exception as e:
|
||||
logger.error(f"发送扫描失败通知失败 - Flow: {flow.name}: {e}")
|
||||
@@ -1,189 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
扫描任务启动脚本
|
||||
|
||||
用于动态扫描容器启动时执行。
|
||||
必须在 Django 导入之前获取配置并设置环境变量。
|
||||
"""
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
|
||||
|
||||
def diagnose_prefect_environment():
|
||||
"""诊断 Prefect 运行环境,输出详细信息用于排查问题"""
|
||||
print("\n" + "="*60)
|
||||
print("Prefect 环境诊断")
|
||||
print("="*60)
|
||||
|
||||
# 1. 检查 Prefect 相关环境变量
|
||||
print("\n[诊断] Prefect 环境变量:")
|
||||
prefect_vars = [
|
||||
'PREFECT_HOME',
|
||||
'PREFECT_API_URL',
|
||||
'PREFECT_SERVER_EPHEMERAL_ENABLED',
|
||||
'PREFECT_SERVER_EPHEMERAL_STARTUP_TIMEOUT_SECONDS',
|
||||
'PREFECT_SERVER_DATABASE_CONNECTION_URL',
|
||||
'PREFECT_LOGGING_LEVEL',
|
||||
'PREFECT_DEBUG_MODE',
|
||||
]
|
||||
for var in prefect_vars:
|
||||
value = os.environ.get(var, 'NOT SET')
|
||||
print(f" {var}={value}")
|
||||
|
||||
# 2. 检查 PREFECT_HOME 目录
|
||||
prefect_home = os.environ.get('PREFECT_HOME', os.path.expanduser('~/.prefect'))
|
||||
print(f"\n[诊断] PREFECT_HOME 目录: {prefect_home}")
|
||||
if os.path.exists(prefect_home):
|
||||
print(f" ✓ 目录存在")
|
||||
print(f" 可写: {os.access(prefect_home, os.W_OK)}")
|
||||
try:
|
||||
files = os.listdir(prefect_home)
|
||||
print(f" 文件列表: {files[:10]}{'...' if len(files) > 10 else ''}")
|
||||
except Exception as e:
|
||||
print(f" ✗ 无法列出文件: {e}")
|
||||
else:
|
||||
print(f" 目录不存在,尝试创建...")
|
||||
try:
|
||||
os.makedirs(prefect_home, exist_ok=True)
|
||||
print(f" ✓ 创建成功")
|
||||
except Exception as e:
|
||||
print(f" ✗ 创建失败: {e}")
|
||||
|
||||
# 3. 检查 uvicorn 是否可用
|
||||
print(f"\n[诊断] uvicorn 可用性:")
|
||||
import shutil
|
||||
uvicorn_path = shutil.which('uvicorn')
|
||||
if uvicorn_path:
|
||||
print(f" ✓ uvicorn 路径: {uvicorn_path}")
|
||||
else:
|
||||
print(f" ✗ uvicorn 不在 PATH 中")
|
||||
print(f" PATH: {os.environ.get('PATH', 'NOT SET')}")
|
||||
|
||||
# 4. 检查 Prefect 版本
|
||||
print(f"\n[诊断] Prefect 版本:")
|
||||
try:
|
||||
import prefect
|
||||
print(f" ✓ prefect=={prefect.__version__}")
|
||||
except Exception as e:
|
||||
print(f" ✗ 无法导入 prefect: {e}")
|
||||
|
||||
# 5. 检查 SQLite 支持
|
||||
print(f"\n[诊断] SQLite 支持:")
|
||||
try:
|
||||
import sqlite3
|
||||
print(f" ✓ sqlite3 版本: {sqlite3.sqlite_version}")
|
||||
# 测试创建数据库
|
||||
test_db = os.path.join(prefect_home, 'test.db')
|
||||
conn = sqlite3.connect(test_db)
|
||||
conn.execute('CREATE TABLE IF NOT EXISTS test (id INTEGER)')
|
||||
conn.close()
|
||||
os.remove(test_db)
|
||||
print(f" ✓ SQLite 读写测试通过")
|
||||
except Exception as e:
|
||||
print(f" ✗ SQLite 测试失败: {e}")
|
||||
|
||||
# 6. 检查端口绑定能力
|
||||
print(f"\n[诊断] 端口绑定测试:")
|
||||
try:
|
||||
import socket
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.bind(('127.0.0.1', 0))
|
||||
port = sock.getsockname()[1]
|
||||
sock.close()
|
||||
print(f" ✓ 可以绑定 127.0.0.1 端口 (测试端口: {port})")
|
||||
except Exception as e:
|
||||
print(f" ✗ 端口绑定失败: {e}")
|
||||
|
||||
# 7. 检查内存情况
|
||||
print(f"\n[诊断] 系统资源:")
|
||||
try:
|
||||
import psutil
|
||||
mem = psutil.virtual_memory()
|
||||
print(f" 内存总量: {mem.total / 1024 / 1024:.0f} MB")
|
||||
print(f" 可用内存: {mem.available / 1024 / 1024:.0f} MB")
|
||||
print(f" 内存使用率: {mem.percent}%")
|
||||
except ImportError:
|
||||
print(f" psutil 未安装,跳过内存检查")
|
||||
except Exception as e:
|
||||
print(f" ✗ 资源检查失败: {e}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("诊断完成")
|
||||
print("="*60 + "\n")
|
||||
|
||||
|
||||
def main():
|
||||
print("="*60)
|
||||
print("run_initiate_scan.py 启动")
|
||||
print(f" Python: {sys.version}")
|
||||
print(f" CWD: {os.getcwd()}")
|
||||
print(f" SERVER_URL: {os.environ.get('SERVER_URL', 'NOT SET')}")
|
||||
print("="*60)
|
||||
|
||||
# 1. 从配置中心获取配置并初始化 Django(必须在 Django 导入之前)
|
||||
print("[1/4] 从配置中心获取配置...")
|
||||
try:
|
||||
from apps.common.container_bootstrap import fetch_config_and_setup_django
|
||||
fetch_config_and_setup_django()
|
||||
print("[1/4] ✓ 配置获取成功")
|
||||
except Exception as e:
|
||||
print(f"[1/4] ✗ 配置获取失败: {e}")
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# 2. 解析命令行参数
|
||||
print("[2/4] 解析命令行参数...")
|
||||
parser = argparse.ArgumentParser(description="执行扫描初始化 Flow")
|
||||
parser.add_argument("--scan_id", type=int, required=True, help="扫描任务 ID")
|
||||
parser.add_argument("--target_name", type=str, required=True, help="目标名称")
|
||||
parser.add_argument("--target_id", type=int, required=True, help="目标 ID")
|
||||
parser.add_argument("--scan_workspace_dir", type=str, required=True, help="扫描工作目录")
|
||||
parser.add_argument("--engine_name", type=str, required=True, help="引擎名称")
|
||||
parser.add_argument("--scheduled_scan_name", type=str, default=None, help="定时扫描任务名称(可选)")
|
||||
|
||||
args = parser.parse_args()
|
||||
print(f"[2/4] ✓ 参数解析成功:")
|
||||
print(f" scan_id: {args.scan_id}")
|
||||
print(f" target_name: {args.target_name}")
|
||||
print(f" target_id: {args.target_id}")
|
||||
print(f" scan_workspace_dir: {args.scan_workspace_dir}")
|
||||
print(f" engine_name: {args.engine_name}")
|
||||
print(f" scheduled_scan_name: {args.scheduled_scan_name}")
|
||||
|
||||
# 2.5. 运行 Prefect 环境诊断(仅在 DEBUG 模式下)
|
||||
if os.environ.get('DEBUG', '').lower() == 'true':
|
||||
diagnose_prefect_environment()
|
||||
|
||||
# 3. 现在可以安全导入 Django 相关模块
|
||||
print("[3/4] 导入 initiate_scan_flow...")
|
||||
try:
|
||||
from apps.scan.flows.initiate_scan_flow import initiate_scan_flow
|
||||
print("[3/4] ✓ 导入成功")
|
||||
except Exception as e:
|
||||
print(f"[3/4] ✗ 导入失败: {e}")
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# 4. 执行 Flow
|
||||
print("[4/4] 执行 initiate_scan_flow...")
|
||||
try:
|
||||
result = initiate_scan_flow(
|
||||
scan_id=args.scan_id,
|
||||
target_name=args.target_name,
|
||||
target_id=args.target_id,
|
||||
scan_workspace_dir=args.scan_workspace_dir,
|
||||
engine_name=args.engine_name,
|
||||
scheduled_scan_name=args.scheduled_scan_name,
|
||||
)
|
||||
print("[4/4] ✓ Flow 执行完成")
|
||||
print(f"结果: {result}")
|
||||
except Exception as e:
|
||||
print(f"[4/4] ✗ Flow 执行失败: {e}")
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,295 +0,0 @@
|
||||
"""
|
||||
快速扫描服务
|
||||
|
||||
负责解析用户输入(URL、域名、IP、CIDR)并创建对应的资产数据
|
||||
"""
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Literal, List, Dict, Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from django.db import transaction
|
||||
|
||||
from apps.common.validators import validate_url, detect_input_type, validate_domain, validate_ip, validate_cidr, is_valid_ip
|
||||
from apps.targets.services.target_service import TargetService
|
||||
from apps.targets.models import Target
|
||||
from apps.asset.dtos import WebSiteDTO
|
||||
from apps.asset.dtos.asset import EndpointDTO
|
||||
from apps.asset.repositories.asset.website_repository import DjangoWebSiteRepository
|
||||
from apps.asset.repositories.asset.endpoint_repository import DjangoEndpointRepository
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedInputDTO:
|
||||
"""
|
||||
解析输入 DTO
|
||||
|
||||
只在快速扫描流程中使用
|
||||
"""
|
||||
original_input: str
|
||||
input_type: Literal['url', 'domain', 'ip', 'cidr']
|
||||
target_name: str # host/domain/ip/cidr
|
||||
target_type: Literal['domain', 'ip', 'cidr']
|
||||
website_url: Optional[str] = None # 根 URL(scheme://host[:port])
|
||||
endpoint_url: Optional[str] = None # 完整 URL(含路径)
|
||||
is_valid: bool = True
|
||||
error: Optional[str] = None
|
||||
line_number: Optional[int] = None
|
||||
|
||||
|
||||
class QuickScanService:
|
||||
"""快速扫描服务 - 解析输入并创建资产"""
|
||||
|
||||
def __init__(self):
|
||||
self.target_service = TargetService()
|
||||
self.website_repo = DjangoWebSiteRepository()
|
||||
self.endpoint_repo = DjangoEndpointRepository()
|
||||
|
||||
def parse_inputs(self, inputs: List[str]) -> List[ParsedInputDTO]:
|
||||
"""
|
||||
解析多行输入
|
||||
|
||||
Args:
|
||||
inputs: 输入字符串列表(每行一个)
|
||||
|
||||
Returns:
|
||||
解析结果列表(跳过空行)
|
||||
"""
|
||||
results = []
|
||||
for line_number, input_str in enumerate(inputs, start=1):
|
||||
input_str = input_str.strip()
|
||||
|
||||
# 空行跳过
|
||||
if not input_str:
|
||||
continue
|
||||
|
||||
try:
|
||||
# 检测输入类型
|
||||
input_type = detect_input_type(input_str)
|
||||
|
||||
if input_type == 'url':
|
||||
dto = self._parse_url_input(input_str, line_number)
|
||||
else:
|
||||
dto = self._parse_target_input(input_str, input_type, line_number)
|
||||
|
||||
results.append(dto)
|
||||
except ValueError as e:
|
||||
# 解析失败,记录错误
|
||||
results.append(ParsedInputDTO(
|
||||
original_input=input_str,
|
||||
input_type='domain', # 默认类型
|
||||
target_name=input_str,
|
||||
target_type='domain',
|
||||
is_valid=False,
|
||||
error=str(e),
|
||||
line_number=line_number
|
||||
))
|
||||
|
||||
return results
|
||||
|
||||
def _parse_url_input(self, url_str: str, line_number: int) -> ParsedInputDTO:
|
||||
"""
|
||||
解析 URL 输入
|
||||
|
||||
Args:
|
||||
url_str: URL 字符串
|
||||
line_number: 行号
|
||||
|
||||
Returns:
|
||||
ParsedInputDTO
|
||||
"""
|
||||
# 验证 URL 格式
|
||||
validate_url(url_str)
|
||||
|
||||
# 使用标准库解析
|
||||
parsed = urlparse(url_str)
|
||||
|
||||
host = parsed.hostname # 不含端口
|
||||
has_path = parsed.path and parsed.path != '/'
|
||||
|
||||
# 构建 root_url: scheme://host[:port]
|
||||
root_url = f"{parsed.scheme}://{parsed.netloc}"
|
||||
|
||||
# 检测 host 类型(domain 或 ip)
|
||||
target_type = 'ip' if is_valid_ip(host) else 'domain'
|
||||
|
||||
return ParsedInputDTO(
|
||||
original_input=url_str,
|
||||
input_type='url',
|
||||
target_name=host,
|
||||
target_type=target_type,
|
||||
website_url=root_url,
|
||||
endpoint_url=url_str if has_path else None,
|
||||
line_number=line_number
|
||||
)
|
||||
|
||||
def _parse_target_input(
|
||||
self,
|
||||
input_str: str,
|
||||
input_type: str,
|
||||
line_number: int
|
||||
) -> ParsedInputDTO:
|
||||
"""
|
||||
解析非 URL 输入(domain/ip/cidr)
|
||||
|
||||
Args:
|
||||
input_str: 输入字符串
|
||||
input_type: 输入类型
|
||||
line_number: 行号
|
||||
|
||||
Returns:
|
||||
ParsedInputDTO
|
||||
"""
|
||||
# 验证格式
|
||||
if input_type == 'domain':
|
||||
validate_domain(input_str)
|
||||
target_type = 'domain'
|
||||
elif input_type == 'ip':
|
||||
validate_ip(input_str)
|
||||
target_type = 'ip'
|
||||
elif input_type == 'cidr':
|
||||
validate_cidr(input_str)
|
||||
target_type = 'cidr'
|
||||
else:
|
||||
raise ValueError(f"未知的输入类型: {input_type}")
|
||||
|
||||
return ParsedInputDTO(
|
||||
original_input=input_str,
|
||||
input_type=input_type,
|
||||
target_name=input_str,
|
||||
target_type=target_type,
|
||||
website_url=None,
|
||||
endpoint_url=None,
|
||||
line_number=line_number
|
||||
)
|
||||
|
||||
@transaction.atomic
|
||||
def process_quick_scan(
|
||||
self,
|
||||
inputs: List[str],
|
||||
engine_id: int
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
处理快速扫描请求
|
||||
|
||||
Args:
|
||||
inputs: 输入字符串列表
|
||||
engine_id: 扫描引擎 ID
|
||||
|
||||
Returns:
|
||||
处理结果字典
|
||||
"""
|
||||
# 1. 解析输入
|
||||
parsed_inputs = self.parse_inputs(inputs)
|
||||
|
||||
# 分离有效和无效输入
|
||||
valid_inputs = [p for p in parsed_inputs if p.is_valid]
|
||||
invalid_inputs = [p for p in parsed_inputs if not p.is_valid]
|
||||
|
||||
if not valid_inputs:
|
||||
return {
|
||||
'targets': [],
|
||||
'target_stats': {'created': 0, 'reused': 0, 'failed': len(invalid_inputs)},
|
||||
'asset_stats': {'websites_created': 0, 'endpoints_created': 0},
|
||||
'errors': [
|
||||
{'line_number': p.line_number, 'input': p.original_input, 'error': p.error}
|
||||
for p in invalid_inputs
|
||||
]
|
||||
}
|
||||
|
||||
# 2. 创建资产
|
||||
asset_result = self.create_assets_from_parsed_inputs(valid_inputs)
|
||||
|
||||
# 3. 返回结果
|
||||
return {
|
||||
'targets': asset_result['targets'],
|
||||
'target_stats': asset_result['target_stats'],
|
||||
'asset_stats': asset_result['asset_stats'],
|
||||
'errors': [
|
||||
{'line_number': p.line_number, 'input': p.original_input, 'error': p.error}
|
||||
for p in invalid_inputs
|
||||
]
|
||||
}
|
||||
|
||||
def create_assets_from_parsed_inputs(
|
||||
self,
|
||||
parsed_inputs: List[ParsedInputDTO]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
从解析结果创建资产
|
||||
|
||||
Args:
|
||||
parsed_inputs: 解析结果列表(只包含有效输入)
|
||||
|
||||
Returns:
|
||||
创建结果字典
|
||||
"""
|
||||
# 1. 收集所有 target 数据(内存操作,去重)
|
||||
targets_data = {}
|
||||
for dto in parsed_inputs:
|
||||
if dto.target_name not in targets_data:
|
||||
targets_data[dto.target_name] = {'name': dto.target_name, 'type': dto.target_type}
|
||||
|
||||
targets_list = list(targets_data.values())
|
||||
|
||||
# 2. 批量创建 Target(复用现有方法)
|
||||
target_result = self.target_service.batch_create_targets(targets_list)
|
||||
|
||||
# 3. 查询刚创建的 Target,建立 name → id 映射
|
||||
target_names = [d['name'] for d in targets_list]
|
||||
targets = Target.objects.filter(name__in=target_names)
|
||||
target_id_map = {t.name: t.id for t in targets}
|
||||
|
||||
# 4. 收集 Website DTO(内存操作,去重)
|
||||
website_dtos = []
|
||||
seen_websites = set()
|
||||
for dto in parsed_inputs:
|
||||
if dto.website_url and dto.website_url not in seen_websites:
|
||||
seen_websites.add(dto.website_url)
|
||||
target_id = target_id_map.get(dto.target_name)
|
||||
if target_id:
|
||||
website_dtos.append(WebSiteDTO(
|
||||
target_id=target_id,
|
||||
url=dto.website_url,
|
||||
host=dto.target_name
|
||||
))
|
||||
|
||||
# 5. 批量创建 Website(存在即跳过)
|
||||
websites_created = 0
|
||||
if website_dtos:
|
||||
websites_created = self.website_repo.bulk_create_ignore_conflicts(website_dtos)
|
||||
|
||||
# 6. 收集 Endpoint DTO(内存操作,去重)
|
||||
endpoint_dtos = []
|
||||
seen_endpoints = set()
|
||||
for dto in parsed_inputs:
|
||||
if dto.endpoint_url and dto.endpoint_url not in seen_endpoints:
|
||||
seen_endpoints.add(dto.endpoint_url)
|
||||
target_id = target_id_map.get(dto.target_name)
|
||||
if target_id:
|
||||
endpoint_dtos.append(EndpointDTO(
|
||||
target_id=target_id,
|
||||
url=dto.endpoint_url,
|
||||
host=dto.target_name
|
||||
))
|
||||
|
||||
# 7. 批量创建 Endpoint(存在即跳过)
|
||||
endpoints_created = 0
|
||||
if endpoint_dtos:
|
||||
endpoints_created = self.endpoint_repo.bulk_create_ignore_conflicts(endpoint_dtos)
|
||||
|
||||
return {
|
||||
'targets': list(targets),
|
||||
'target_stats': {
|
||||
'created': target_result['created_count'],
|
||||
'reused': 0, # bulk_create 无法区分新建和复用
|
||||
'failed': target_result['failed_count']
|
||||
},
|
||||
'asset_stats': {
|
||||
'websites_created': websites_created,
|
||||
'endpoints_created': endpoints_created
|
||||
}
|
||||
}
|
||||
@@ -1,258 +0,0 @@
|
||||
"""
|
||||
扫描任务服务
|
||||
|
||||
负责 Scan 模型的所有业务逻辑
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
from typing import Dict, List, TYPE_CHECKING
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from django.conf import settings
|
||||
from django.db import transaction
|
||||
from django.db.utils import DatabaseError, IntegrityError, OperationalError
|
||||
from django.core.exceptions import ValidationError, ObjectDoesNotExist
|
||||
|
||||
from apps.scan.models import Scan
|
||||
from apps.scan.repositories import DjangoScanRepository
|
||||
from apps.targets.repositories import DjangoTargetRepository, DjangoOrganizationRepository
|
||||
from apps.engine.repositories import DjangoEngineRepository
|
||||
from apps.targets.models import Target
|
||||
from apps.engine.models import ScanEngine
|
||||
from apps.common.definitions import ScanStatus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ScanService:
|
||||
"""
|
||||
扫描任务服务(协调者)
|
||||
|
||||
职责:
|
||||
- 协调各个子服务
|
||||
- 提供统一的公共接口
|
||||
- 保持向后兼容
|
||||
|
||||
注意:
|
||||
- 具体业务逻辑已拆分到子服务
|
||||
- 本类主要负责委托和协调
|
||||
"""
|
||||
|
||||
# 终态集合:这些状态一旦设置,不应该被覆盖
|
||||
FINAL_STATUSES = {
|
||||
ScanStatus.COMPLETED,
|
||||
ScanStatus.FAILED,
|
||||
ScanStatus.CANCELLED
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
初始化服务
|
||||
"""
|
||||
# 初始化子服务
|
||||
from apps.scan.services.scan_creation_service import ScanCreationService
|
||||
from apps.scan.services.scan_state_service import ScanStateService
|
||||
from apps.scan.services.scan_control_service import ScanControlService
|
||||
from apps.scan.services.scan_stats_service import ScanStatsService
|
||||
|
||||
self.creation_service = ScanCreationService()
|
||||
self.state_service = ScanStateService()
|
||||
self.control_service = ScanControlService()
|
||||
self.stats_service = ScanStatsService()
|
||||
|
||||
# 保留 ScanRepository(用于 get_scan 方法)
|
||||
self.scan_repo = DjangoScanRepository()
|
||||
|
||||
def get_scan(self, scan_id: int, prefetch_relations: bool) -> Scan | None:
|
||||
"""
|
||||
获取扫描任务(包含关联对象)
|
||||
|
||||
自动预加载 engine 和 target,避免 N+1 查询问题
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
|
||||
Returns:
|
||||
Scan 对象(包含 engine 和 target)或 None
|
||||
"""
|
||||
return self.scan_repo.get_by_id(scan_id, prefetch_relations)
|
||||
|
||||
def get_all_scans(self, prefetch_relations: bool = True):
|
||||
return self.scan_repo.get_all(prefetch_relations=prefetch_relations)
|
||||
|
||||
def prepare_initiate_scan(
|
||||
self,
|
||||
organization_id: int | None = None,
|
||||
target_id: int | None = None,
|
||||
engine_id: int | None = None
|
||||
) -> tuple[List[Target], ScanEngine]:
|
||||
"""
|
||||
为创建扫描任务做准备,返回所需的目标列表和扫描引擎
|
||||
"""
|
||||
return self.creation_service.prepare_initiate_scan(
|
||||
organization_id, target_id, engine_id
|
||||
)
|
||||
|
||||
def prepare_initiate_scan_multi_engine(
|
||||
self,
|
||||
organization_id: int | None = None,
|
||||
target_id: int | None = None,
|
||||
engine_ids: List[int] | None = None
|
||||
) -> tuple[List[Target], str, List[str], List[int]]:
|
||||
"""
|
||||
为创建多引擎扫描任务做准备
|
||||
|
||||
Returns:
|
||||
(目标列表, 合并配置, 引擎名称列表, 引擎ID列表)
|
||||
"""
|
||||
return self.creation_service.prepare_initiate_scan_multi_engine(
|
||||
organization_id, target_id, engine_ids
|
||||
)
|
||||
|
||||
def create_scans(
|
||||
self,
|
||||
targets: List[Target],
|
||||
engine_ids: List[int],
|
||||
engine_names: List[str],
|
||||
yaml_configuration: str,
|
||||
scheduled_scan_name: str | None = None
|
||||
) -> List[Scan]:
|
||||
"""批量创建扫描任务(委托给 ScanCreationService)"""
|
||||
return self.creation_service.create_scans(
|
||||
targets, engine_ids, engine_names, yaml_configuration, scheduled_scan_name
|
||||
)
|
||||
|
||||
# ==================== 状态管理方法(委托给 ScanStateService) ====================
|
||||
|
||||
def update_status(
|
||||
self,
|
||||
scan_id: int,
|
||||
status: ScanStatus,
|
||||
error_message: str | None = None,
|
||||
stopped_at: datetime | None = None
|
||||
) -> bool:
|
||||
"""更新 Scan 状态(委托给 ScanStateService)"""
|
||||
return self.state_service.update_status(
|
||||
scan_id, status, error_message, stopped_at
|
||||
)
|
||||
|
||||
def update_status_if_match(
|
||||
self,
|
||||
scan_id: int,
|
||||
current_status: ScanStatus,
|
||||
new_status: ScanStatus,
|
||||
stopped_at: datetime | None = None
|
||||
) -> bool:
|
||||
"""条件更新 Scan 状态(委托给 ScanStateService)"""
|
||||
return self.state_service.update_status_if_match(
|
||||
scan_id, current_status, new_status, stopped_at
|
||||
)
|
||||
|
||||
def update_cached_stats(self, scan_id: int) -> dict | None:
|
||||
"""更新缓存统计数据(委托给 ScanStateService),返回统计数据字典"""
|
||||
return self.state_service.update_cached_stats(scan_id)
|
||||
|
||||
# ==================== 进度跟踪方法(委托给 ScanStateService) ====================
|
||||
|
||||
def init_stage_progress(self, scan_id: int, stages: list[str]) -> bool:
|
||||
"""初始化阶段进度(委托给 ScanStateService)"""
|
||||
return self.state_service.init_stage_progress(scan_id, stages)
|
||||
|
||||
def start_stage(self, scan_id: int, stage: str) -> bool:
|
||||
"""开始执行某个阶段(委托给 ScanStateService)"""
|
||||
return self.state_service.start_stage(scan_id, stage)
|
||||
|
||||
def complete_stage(self, scan_id: int, stage: str, detail: str | None = None) -> bool:
|
||||
"""完成某个阶段(委托给 ScanStateService)"""
|
||||
return self.state_service.complete_stage(scan_id, stage, detail)
|
||||
|
||||
def fail_stage(self, scan_id: int, stage: str, error: str | None = None) -> bool:
|
||||
"""标记某个阶段失败(委托给 ScanStateService)"""
|
||||
return self.state_service.fail_stage(scan_id, stage, error)
|
||||
|
||||
def cancel_running_stages(self, scan_id: int, final_status: str = "cancelled") -> bool:
|
||||
"""取消所有正在运行的阶段(委托给 ScanStateService)"""
|
||||
return self.state_service.cancel_running_stages(scan_id, final_status)
|
||||
|
||||
# TODO:待接入
|
||||
def add_command_to_scan(self, scan_id: int, stage_name: str, tool_name: str, command: str) -> bool:
|
||||
"""
|
||||
增量添加命令到指定扫描阶段
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务ID
|
||||
stage_name: 阶段名称(如 'subdomain_discovery', 'port_scan')
|
||||
tool_name: 工具名称
|
||||
command: 执行命令
|
||||
|
||||
Returns:
|
||||
bool: 是否成功添加
|
||||
"""
|
||||
try:
|
||||
scan = self.get_scan(scan_id, prefetch_relations=False)
|
||||
if not scan:
|
||||
logger.error(f"扫描任务不存在: {scan_id}")
|
||||
return False
|
||||
|
||||
stage_progress = scan.stage_progress or {}
|
||||
|
||||
# 确保指定阶段存在
|
||||
if stage_name not in stage_progress:
|
||||
stage_progress[stage_name] = {'status': 'running', 'commands': []}
|
||||
|
||||
# 确保 commands 列表存在
|
||||
if 'commands' not in stage_progress[stage_name]:
|
||||
stage_progress[stage_name]['commands'] = []
|
||||
|
||||
# 增量添加命令
|
||||
command_entry = f"{tool_name}: {command}"
|
||||
stage_progress[stage_name]['commands'].append(command_entry)
|
||||
|
||||
scan.stage_progress = stage_progress
|
||||
scan.save(update_fields=['stage_progress'])
|
||||
|
||||
command_count = len(stage_progress[stage_name]['commands'])
|
||||
logger.info(f"✓ 记录命令: {stage_name}.{tool_name} (总计: {command_count})")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"记录命令失败: {e}")
|
||||
return False
|
||||
|
||||
# ==================== 删除和控制方法(委托给 ScanControlService) ====================
|
||||
|
||||
def delete_scans_two_phase(self, scan_ids: List[int]) -> dict:
|
||||
"""两阶段删除扫描任务(委托给 ScanControlService)"""
|
||||
return self.control_service.delete_scans_two_phase(scan_ids)
|
||||
|
||||
def stop_scan(self, scan_id: int) -> tuple[bool, int]:
|
||||
"""停止扫描任务(委托给 ScanControlService)"""
|
||||
return self.control_service.stop_scan(scan_id)
|
||||
|
||||
def hard_delete_scans(self, scan_ids: List[int]) -> tuple[int, Dict[str, int]]:
|
||||
"""
|
||||
硬删除扫描任务(真正删除数据)
|
||||
|
||||
用于 Worker 容器中执行,删除已软删除的扫描及其关联数据。
|
||||
|
||||
Args:
|
||||
scan_ids: 扫描任务 ID 列表
|
||||
|
||||
Returns:
|
||||
(删除数量, 详情字典)
|
||||
"""
|
||||
return self.scan_repo.hard_delete_by_ids(scan_ids)
|
||||
|
||||
# ==================== 统计方法(委托给 ScanStatsService) ====================
|
||||
|
||||
def get_statistics(self) -> dict:
|
||||
"""获取扫描统计数据(委托给 ScanStatsService)"""
|
||||
return self.stats_service.get_statistics()
|
||||
|
||||
|
||||
|
||||
# 导出接口
|
||||
__all__ = ['ScanService']
|
||||
@@ -1,388 +0,0 @@
|
||||
"""
|
||||
目标导出服务
|
||||
|
||||
提供统一的目标提取和文件导出功能,支持:
|
||||
- URL 导出(流式写入 + 默认值回退)
|
||||
- 域名/IP 导出(用于端口扫描)
|
||||
- 黑名单过滤集成
|
||||
"""
|
||||
|
||||
import ipaddress
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional, List
|
||||
|
||||
from django.db.models import QuerySet
|
||||
|
||||
from apps.common.utils import BlacklistFilter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_export_service(target_id: int) -> 'TargetExportService':
|
||||
"""
|
||||
工厂函数:创建带黑名单过滤的导出服务
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID,用于加载黑名单规则
|
||||
|
||||
Returns:
|
||||
TargetExportService: 配置好黑名单过滤器的导出服务实例
|
||||
"""
|
||||
from apps.common.services import BlacklistService
|
||||
|
||||
rules = BlacklistService().get_rules(target_id)
|
||||
blacklist_filter = BlacklistFilter(rules)
|
||||
return TargetExportService(blacklist_filter=blacklist_filter)
|
||||
|
||||
|
||||
class TargetExportService:
|
||||
"""
|
||||
目标导出服务 - 提供统一的目标提取和文件导出功能
|
||||
|
||||
使用方式:
|
||||
from apps.common.services import BlacklistService
|
||||
from apps.common.utils import BlacklistFilter
|
||||
|
||||
# 获取规则并创建过滤器
|
||||
blacklist_service = BlacklistService()
|
||||
rules = blacklist_service.get_rules(target_id)
|
||||
blacklist_filter = BlacklistFilter(rules)
|
||||
|
||||
# 使用导出服务
|
||||
export_service = TargetExportService(blacklist_filter=blacklist_filter)
|
||||
result = export_service.export_urls(target_id, output_path, queryset)
|
||||
"""
|
||||
|
||||
def __init__(self, blacklist_filter: Optional[BlacklistFilter] = None):
|
||||
"""
|
||||
初始化导出服务
|
||||
|
||||
Args:
|
||||
blacklist_filter: 黑名单过滤器,None 表示禁用过滤
|
||||
"""
|
||||
self.blacklist_filter = blacklist_filter
|
||||
|
||||
def export_urls(
|
||||
self,
|
||||
target_id: int,
|
||||
output_path: str,
|
||||
queryset: QuerySet,
|
||||
url_field: str = 'url',
|
||||
batch_size: int = 1000
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
统一 URL 导出函数
|
||||
|
||||
自动判断数据库有无数据:
|
||||
- 有数据:流式写入数据库数据到文件
|
||||
- 无数据:调用默认值生成器生成 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_path: 输出文件路径
|
||||
queryset: 数据源 queryset(由 Task 层构建,应为 values_list flat=True)
|
||||
url_field: URL 字段名(用于黑名单过滤)
|
||||
batch_size: 批次大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_count': int
|
||||
}
|
||||
|
||||
Raises:
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
output_file = Path(output_path)
|
||||
output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
logger.info("开始导出 URL - target_id=%s, output=%s", target_id, output_path)
|
||||
|
||||
total_count = 0
|
||||
filtered_count = 0
|
||||
try:
|
||||
with open(output_file, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for url in queryset.iterator(chunk_size=batch_size):
|
||||
if url:
|
||||
# 黑名单过滤
|
||||
if self.blacklist_filter and not self.blacklist_filter.is_allowed(url):
|
||||
filtered_count += 1
|
||||
continue
|
||||
f.write(f"{url}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个 URL...", total_count)
|
||||
except IOError as e:
|
||||
logger.error("文件写入失败: %s - %s", output_path, e)
|
||||
raise
|
||||
|
||||
if filtered_count > 0:
|
||||
logger.info("黑名单过滤: 过滤 %d 个 URL", filtered_count)
|
||||
|
||||
# 默认值回退模式
|
||||
if total_count == 0:
|
||||
total_count = self._generate_default_urls(target_id, output_file)
|
||||
|
||||
logger.info("✓ URL 导出完成 - 数量: %d, 文件: %s", total_count, output_path)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_file),
|
||||
'total_count': total_count
|
||||
}
|
||||
|
||||
def _generate_default_urls(
|
||||
self,
|
||||
target_id: int,
|
||||
output_path: Path
|
||||
) -> int:
|
||||
"""
|
||||
默认值生成器(内部函数)
|
||||
|
||||
根据 Target 类型生成默认 URL:
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 http(s)://ip
|
||||
- URL: 直接使用目标 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_path: 输出文件路径
|
||||
|
||||
Returns:
|
||||
int: 写入的 URL 总数
|
||||
"""
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
|
||||
if not target:
|
||||
logger.warning("Target ID %d 不存在,无法生成默认 URL", target_id)
|
||||
return 0
|
||||
|
||||
target_name = target.name
|
||||
target_type = target.type
|
||||
|
||||
logger.info("懒加载模式:Target 类型=%s, 名称=%s", target_type, target_name)
|
||||
|
||||
total_urls = 0
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
urls = [f"http://{target_name}", f"https://{target_name}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
elif target_type == Target.TargetType.IP:
|
||||
urls = [f"http://{target_name}", f"https://{target_name}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
try:
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
|
||||
for ip in network.hosts():
|
||||
urls = [f"http://{ip}", f"https://{ip}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
if total_urls % 10000 == 0:
|
||||
logger.info("已生成 %d 个 URL...", total_urls)
|
||||
|
||||
# /32 或 /128 特殊处理
|
||||
if total_urls == 0:
|
||||
ip = str(network.network_address)
|
||||
urls = [f"http://{ip}", f"https://{ip}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("CIDR 解析失败: %s - %s", target_name, e)
|
||||
raise ValueError(f"无效的 CIDR: {target_name}") from e
|
||||
|
||||
elif target_type == Target.TargetType.URL:
|
||||
if self._should_write_url(target_name):
|
||||
f.write(f"{target_name}\n")
|
||||
total_urls = 1
|
||||
else:
|
||||
logger.warning("不支持的 Target 类型: %s", target_type)
|
||||
|
||||
logger.info("✓ 懒加载生成默认 URL - 数量: %d", total_urls)
|
||||
return total_urls
|
||||
|
||||
def _should_write_url(self, url: str) -> bool:
|
||||
"""检查 URL 是否应该写入(通过黑名单过滤)"""
|
||||
if self.blacklist_filter:
|
||||
return self.blacklist_filter.is_allowed(url)
|
||||
return True
|
||||
|
||||
def export_hosts(
|
||||
self,
|
||||
target_id: int,
|
||||
output_path: str,
|
||||
batch_size: int = 1000
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
主机列表导出函数(用于端口扫描)
|
||||
|
||||
根据 Target 类型选择导出逻辑:
|
||||
- DOMAIN: 从 Subdomain 表流式导出子域名
|
||||
- IP: 直接写入 IP 地址
|
||||
- CIDR: 展开为所有主机 IP
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_path: 输出文件路径
|
||||
batch_size: 批次大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_count': int,
|
||||
'target_type': str
|
||||
}
|
||||
"""
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
from apps.asset.services.asset.subdomain_service import SubdomainService
|
||||
|
||||
output_file = Path(output_path)
|
||||
output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 获取 Target 信息
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
|
||||
if not target:
|
||||
raise ValueError(f"Target ID {target_id} 不存在")
|
||||
|
||||
target_type = target.type
|
||||
target_name = target.name
|
||||
|
||||
logger.info(
|
||||
"开始导出主机列表 - Target ID: %d, Name: %s, Type: %s, 输出文件: %s",
|
||||
target_id, target_name, target_type, output_path
|
||||
)
|
||||
|
||||
total_count = 0
|
||||
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
total_count = self._export_domains(target_id, target_name, output_file, batch_size)
|
||||
type_desc = "域名"
|
||||
|
||||
elif target_type == Target.TargetType.IP:
|
||||
total_count = self._export_ip(target_name, output_file)
|
||||
type_desc = "IP"
|
||||
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
total_count = self._export_cidr(target_name, output_file)
|
||||
type_desc = "CIDR IP"
|
||||
|
||||
else:
|
||||
raise ValueError(f"不支持的目标类型: {target_type}")
|
||||
|
||||
logger.info(
|
||||
"✓ 主机列表导出完成 - 类型: %s, 总数: %d, 文件: %s",
|
||||
type_desc, total_count, output_path
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_file),
|
||||
'total_count': total_count,
|
||||
'target_type': target_type
|
||||
}
|
||||
|
||||
def _export_domains(
|
||||
self,
|
||||
target_id: int,
|
||||
target_name: str,
|
||||
output_path: Path,
|
||||
batch_size: int
|
||||
) -> int:
|
||||
"""导出域名类型目标的根域名 + 子域名"""
|
||||
from apps.asset.services.asset.subdomain_service import SubdomainService
|
||||
|
||||
subdomain_service = SubdomainService()
|
||||
domain_iterator = subdomain_service.iter_subdomain_names_by_target(
|
||||
target_id=target_id,
|
||||
chunk_size=batch_size
|
||||
)
|
||||
|
||||
total_count = 0
|
||||
written_domains = set() # 去重(子域名表可能已包含根域名)
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
# 1. 先写入根域名
|
||||
if self._should_write_target(target_name):
|
||||
f.write(f"{target_name}\n")
|
||||
written_domains.add(target_name)
|
||||
total_count += 1
|
||||
|
||||
# 2. 再写入子域名(跳过已写入的根域名)
|
||||
for domain_name in domain_iterator:
|
||||
if domain_name in written_domains:
|
||||
continue
|
||||
if self._should_write_target(domain_name):
|
||||
f.write(f"{domain_name}\n")
|
||||
written_domains.add(domain_name)
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个域名...", total_count)
|
||||
|
||||
return total_count
|
||||
|
||||
def _export_ip(self, target_name: str, output_path: Path) -> int:
|
||||
"""导出 IP 类型目标"""
|
||||
if self._should_write_target(target_name):
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{target_name}\n")
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def _export_cidr(self, target_name: str, output_path: Path) -> int:
|
||||
"""导出 CIDR 类型目标,展开为每个 IP"""
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
total_count = 0
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for ip in network.hosts():
|
||||
ip_str = str(ip)
|
||||
if self._should_write_target(ip_str):
|
||||
f.write(f"{ip_str}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个 IP...", total_count)
|
||||
|
||||
# /32 或 /128 特殊处理
|
||||
if total_count == 0:
|
||||
ip_str = str(network.network_address)
|
||||
if self._should_write_target(ip_str):
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{ip_str}\n")
|
||||
total_count = 1
|
||||
|
||||
return total_count
|
||||
|
||||
def _should_write_target(self, target: str) -> bool:
|
||||
"""检查目标是否应该写入(通过黑名单过滤)"""
|
||||
if self.blacklist_filter:
|
||||
return self.blacklist_filter.is_allowed(target)
|
||||
return True
|
||||
@@ -1,71 +0,0 @@
|
||||
"""
|
||||
导出站点 URL 到 TXT 文件的 Task
|
||||
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
数据源: WebSite.url
|
||||
"""
|
||||
import logging
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.models import WebSite
|
||||
from apps.scan.services import TargetExportService
|
||||
from apps.scan.services.target_export_service import create_export_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="export_sites")
|
||||
def export_sites_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
batch_size: int = 1000,
|
||||
) -> dict:
|
||||
"""
|
||||
导出目标下的所有站点 URL 到 TXT 文件
|
||||
|
||||
数据源: WebSite.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
batch_size: 每次读取的批次大小,默认 1000
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_count': int
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: 参数错误
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用工厂函数创建导出服务
|
||||
export_service = create_export_service(target_id)
|
||||
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'success': result['success'],
|
||||
'output_file': result['output_file'],
|
||||
'total_count': result['total_count']
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
"""
|
||||
导出 URL 任务
|
||||
|
||||
用于指纹识别前导出目标下的 URL 到文件
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.models import WebSite
|
||||
from apps.scan.services.target_export_service import create_export_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="export_urls_for_fingerprint")
|
||||
def export_urls_for_fingerprint_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
source: str = 'website',
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出目标下的 URL 到文件(用于指纹识别)
|
||||
|
||||
数据源: WebSite.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
- URL: 直接使用目标 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径
|
||||
source: 数据源类型(保留参数,兼容旧调用)
|
||||
batch_size: 批量读取大小
|
||||
|
||||
Returns:
|
||||
dict: {'output_file': str, 'total_count': int, 'source': str}
|
||||
"""
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用工厂函数创建导出服务
|
||||
export_service = create_export_service(target_id)
|
||||
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'output_file': result['output_file'],
|
||||
'total_count': result['total_count'],
|
||||
'source': source
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
"""
|
||||
导出主机列表到 TXT 文件的 Task
|
||||
|
||||
使用 TargetExportService.export_hosts() 统一处理导出逻辑
|
||||
|
||||
根据 Target 类型决定导出内容:
|
||||
- DOMAIN: 从 Subdomain 表导出子域名
|
||||
- IP: 直接写入 target.name
|
||||
- CIDR: 展开 CIDR 范围内的所有 IP
|
||||
"""
|
||||
import logging
|
||||
from prefect import task
|
||||
|
||||
from apps.scan.services.target_export_service import create_export_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="export_hosts")
|
||||
def export_hosts_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出主机列表到 TXT 文件
|
||||
|
||||
根据 Target 类型自动决定导出内容:
|
||||
- DOMAIN: 从 Subdomain 表导出子域名(流式处理,支持 10万+ 域名)
|
||||
- IP: 直接写入 target.name(单个 IP)
|
||||
- CIDR: 展开 CIDR 范围内的所有可用 IP
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
batch_size: 每次读取的批次大小,默认 1000(仅对 DOMAIN 类型有效)
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_count': int,
|
||||
'target_type': str
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: Target 不存在
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
# 使用工厂函数创建导出服务
|
||||
export_service = create_export_service(target_id)
|
||||
|
||||
result = export_service.export_hosts(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'success': result['success'],
|
||||
'output_file': result['output_file'],
|
||||
'total_count': result['total_count'],
|
||||
'target_type': result['target_type']
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
"""
|
||||
导出站点URL到文件的Task
|
||||
|
||||
直接使用 HostPortMapping 表查询 host+port 组合,拼接成URL格式写入文件
|
||||
使用 TargetExportService 处理默认值回退逻辑
|
||||
|
||||
特殊逻辑:
|
||||
- 80 端口:只生成 HTTP URL(省略端口号)
|
||||
- 443 端口:只生成 HTTPS URL(省略端口号)
|
||||
- 其他端口:生成 HTTP 和 HTTPS 两个URL(带端口号)
|
||||
"""
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.services import HostPortMappingService
|
||||
from apps.scan.services.target_export_service import create_export_service
|
||||
from apps.common.services import BlacklistService
|
||||
from apps.common.utils import BlacklistFilter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _generate_urls_from_port(host: str, port: int) -> list[str]:
|
||||
"""
|
||||
根据端口生成 URL 列表
|
||||
|
||||
- 80 端口:只生成 HTTP URL(省略端口号)
|
||||
- 443 端口:只生成 HTTPS URL(省略端口号)
|
||||
- 其他端口:生成 HTTP 和 HTTPS 两个URL(带端口号)
|
||||
"""
|
||||
if port == 80:
|
||||
return [f"http://{host}"]
|
||||
elif port == 443:
|
||||
return [f"https://{host}"]
|
||||
else:
|
||||
return [f"http://{host}:{port}", f"https://{host}:{port}"]
|
||||
|
||||
|
||||
@task(name="export_site_urls")
|
||||
def export_site_urls_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出目标下的所有站点URL到文件(基于 HostPortMapping 表)
|
||||
|
||||
数据源: HostPortMapping (host + port)
|
||||
|
||||
特殊逻辑:
|
||||
- 80 端口:只生成 HTTP URL(省略端口号)
|
||||
- 443 端口:只生成 HTTPS URL(省略端口号)
|
||||
- 其他端口:生成 HTTP 和 HTTPS 两个URL(带端口号)
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
batch_size: 每次处理的批次大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_urls': int,
|
||||
'association_count': int # 主机端口关联数量
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: 参数错误
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
logger.info("开始统计站点URL - Target ID: %d, 输出文件: %s", target_id, output_file)
|
||||
|
||||
# 确保输出目录存在
|
||||
output_path = Path(output_file)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 获取规则并创建过滤器
|
||||
blacklist_filter = BlacklistFilter(BlacklistService().get_rules(target_id))
|
||||
|
||||
# 直接查询 HostPortMapping 表,按 host 排序
|
||||
service = HostPortMappingService()
|
||||
associations = service.iter_host_port_by_target(
|
||||
target_id=target_id,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
|
||||
total_urls = 0
|
||||
association_count = 0
|
||||
|
||||
# 流式写入文件(特殊端口逻辑)
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for assoc in associations:
|
||||
association_count += 1
|
||||
host = assoc['host']
|
||||
port = assoc['port']
|
||||
|
||||
# 先校验 host,通过了再生成 URL
|
||||
if not blacklist_filter.is_allowed(host):
|
||||
continue
|
||||
|
||||
# 根据端口号生成URL
|
||||
for url in _generate_urls_from_port(host, port):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
if association_count % 1000 == 0:
|
||||
logger.info("已处理 %d 条关联,生成 %d 个URL...", association_count, total_urls)
|
||||
|
||||
logger.info(
|
||||
"✓ 站点URL导出完成 - 关联数: %d, 总URL数: %d, 文件: %s",
|
||||
association_count, total_urls, str(output_path)
|
||||
)
|
||||
|
||||
# 默认值回退模式:使用工厂函数创建导出服务
|
||||
if total_urls == 0:
|
||||
export_service = create_export_service(target_id)
|
||||
total_urls = export_service._generate_default_urls(target_id, output_path)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_path),
|
||||
'total_urls': total_urls,
|
||||
'association_count': association_count
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
"""
|
||||
合并并去重域名任务
|
||||
|
||||
合并 merge + parse + validate 三个步骤,优化性能:
|
||||
- 单命令实现(LC_ALL=C sort -u)
|
||||
- C语言级性能,单进程高效
|
||||
- 无临时文件,零额外开销
|
||||
- 支持千万级数据处理
|
||||
|
||||
性能优势:
|
||||
- LC_ALL=C 字节序比较(比locale快20-30%)
|
||||
- 单进程直接处理多文件(无管道开销)
|
||||
- 内存占用恒定(~50MB for 50万域名)
|
||||
- 50万域名处理时间:~0.5秒(相比 Python 提升 ~67%)
|
||||
|
||||
Note:
|
||||
- 工具(amass/subfinder)输出已标准化(小写,无空行)
|
||||
- sort -u 自动处理去重和排序
|
||||
- 无需额外过滤,性能最优
|
||||
"""
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from prefect import task
|
||||
from typing import List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 注:使用纯系统命令实现,无需 Python 缓冲区配置
|
||||
# 工具(amass/subfinder)输出已是小写且标准化
|
||||
|
||||
@task(
|
||||
name='merge_and_deduplicate',
|
||||
retries=1,
|
||||
log_prints=True
|
||||
)
|
||||
def merge_and_validate_task(
|
||||
result_files: List[str],
|
||||
result_dir: str
|
||||
) -> str:
|
||||
"""
|
||||
合并扫描结果并去重(高性能流式处理)
|
||||
|
||||
流程:
|
||||
1. 使用 LC_ALL=C sort -u 直接处理多文件
|
||||
2. 排序去重一步完成
|
||||
3. 返回去重后的文件路径
|
||||
|
||||
命令:LC_ALL=C sort -u file1 file2 file3 -o output
|
||||
注:工具输出已标准化(小写,无空行),无需额外处理
|
||||
|
||||
Args:
|
||||
result_files: 结果文件路径列表
|
||||
result_dir: 结果目录
|
||||
|
||||
Returns:
|
||||
str: 去重后的域名文件路径
|
||||
|
||||
Raises:
|
||||
RuntimeError: 处理失败
|
||||
|
||||
Performance:
|
||||
- 纯系统命令(C语言实现),单进程极简
|
||||
- LC_ALL=C: 字节序比较
|
||||
- sort -u: 直接处理多文件(无管道开销)
|
||||
|
||||
Design:
|
||||
- 极简单命令,无冗余处理
|
||||
- 单进程直接执行(无管道/重定向开销)
|
||||
- 内存占用仅在 sort 阶段(外部排序,不会 OOM)
|
||||
"""
|
||||
logger.info("开始合并并去重 %d 个结果文件(系统命令优化)", len(result_files))
|
||||
|
||||
result_path = Path(result_dir)
|
||||
|
||||
# 验证文件存在性
|
||||
valid_files = []
|
||||
for file_path_str in result_files:
|
||||
file_path = Path(file_path_str)
|
||||
if file_path.exists():
|
||||
valid_files.append(str(file_path))
|
||||
else:
|
||||
logger.warning("结果文件不存在: %s", file_path)
|
||||
|
||||
if not valid_files:
|
||||
raise RuntimeError("所有结果文件都不存在")
|
||||
|
||||
# 生成输出文件路径
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
short_uuid = uuid.uuid4().hex[:4]
|
||||
merged_file = result_path / f"merged_{timestamp}_{short_uuid}.txt"
|
||||
|
||||
try:
|
||||
# ==================== 使用系统命令一步完成:排序去重 ====================
|
||||
# LC_ALL=C: 使用字节序比较(比locale快20-30%)
|
||||
# sort -u: 直接处理多文件,排序去重
|
||||
# -o: 安全输出(比重定向更可靠)
|
||||
cmd = f"LC_ALL=C sort -u {' '.join(valid_files)} -o {merged_file}"
|
||||
|
||||
logger.debug("执行命令: %s", cmd)
|
||||
|
||||
# 按输入文件总行数动态计算超时时间
|
||||
total_lines = 0
|
||||
for file_path in valid_files:
|
||||
try:
|
||||
line_count_proc = subprocess.run(
|
||||
["wc", "-l", file_path],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
total_lines += int(line_count_proc.stdout.strip().split()[0])
|
||||
except (subprocess.CalledProcessError, ValueError, IndexError):
|
||||
continue
|
||||
|
||||
timeout = 3600
|
||||
if total_lines > 0:
|
||||
# 按行数线性计算:每行约 0.1 秒
|
||||
base_per_line = 0.1
|
||||
est = int(total_lines * base_per_line)
|
||||
timeout = max(600, est)
|
||||
|
||||
logger.info(
|
||||
"Subdomain 合并去重 timeout 自动计算: 输入总行数=%d, timeout=%d秒",
|
||||
total_lines,
|
||||
timeout,
|
||||
)
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
check=True,
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
logger.debug("✓ 合并去重完成")
|
||||
|
||||
# ==================== 统计结果 ====================
|
||||
if not merged_file.exists():
|
||||
raise RuntimeError("合并文件未被创建")
|
||||
|
||||
# 统计行数(使用系统命令提升大文件性能)
|
||||
try:
|
||||
line_count_proc = subprocess.run(
|
||||
["wc", "-l", str(merged_file)],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
unique_count = int(line_count_proc.stdout.strip().split()[0])
|
||||
except (subprocess.CalledProcessError, ValueError, IndexError) as e:
|
||||
logger.warning(
|
||||
"wc -l 统计失败(文件: %s),降级为 Python 逐行统计 - 错误: %s",
|
||||
merged_file, e
|
||||
)
|
||||
unique_count = 0
|
||||
with open(merged_file, 'r', encoding='utf-8') as file_obj:
|
||||
for _ in file_obj:
|
||||
unique_count += 1
|
||||
|
||||
if unique_count == 0:
|
||||
raise RuntimeError("未找到任何有效域名")
|
||||
|
||||
file_size = merged_file.stat().st_size
|
||||
|
||||
logger.info(
|
||||
"✓ 合并去重完成 - 去重后: %d 个域名, 文件大小: %.2f KB",
|
||||
unique_count,
|
||||
file_size / 1024
|
||||
)
|
||||
|
||||
return str(merged_file)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
error_msg = "合并去重超时(>60分钟),请检查数据量或系统资源"
|
||||
logger.warning(error_msg) # 超时是可预期的
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
error_msg = f"系统命令执行失败: {e.stderr if e.stderr else str(e)}"
|
||||
logger.warning(error_msg) # 超时是可预期的
|
||||
raise RuntimeError(error_msg) from e
|
||||
|
||||
except IOError as e:
|
||||
error_msg = f"文件读写失败: {e}"
|
||||
logger.warning(error_msg) # 超时是可预期的
|
||||
raise RuntimeError(error_msg) from e
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"合并去重失败: {e}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
raise
|
||||
@@ -1,73 +0,0 @@
|
||||
"""
|
||||
导出站点 URL 列表任务
|
||||
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
数据源: WebSite.url(用于 katana 等爬虫工具)
|
||||
"""
|
||||
|
||||
import logging
|
||||
from prefect import task
|
||||
from typing import Optional
|
||||
|
||||
from apps.asset.models import WebSite
|
||||
from apps.scan.services.target_export_service import create_export_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(
|
||||
name='export_sites_for_url_fetch',
|
||||
retries=1,
|
||||
log_prints=True
|
||||
)
|
||||
def export_sites_task(
|
||||
output_file: str,
|
||||
target_id: int,
|
||||
scan_id: int,
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出站点 URL 列表到文件(用于 katana 等爬虫工具)
|
||||
|
||||
数据源: WebSite.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
output_file: 输出文件路径
|
||||
target_id: 目标 ID
|
||||
scan_id: 扫描 ID(保留参数,兼容旧调用)
|
||||
batch_size: 批次大小(内存优化)
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'output_file': str, # 输出文件路径
|
||||
'asset_count': int, # 资产数量
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: 参数错误
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用工厂函数创建导出服务
|
||||
export_service = create_export_service(target_id)
|
||||
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'output_file': result['output_file'],
|
||||
'asset_count': result['total_count'],
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
"""导出 Endpoint URL 到文件的 Task
|
||||
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
数据源: Endpoint.url
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Optional
|
||||
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.models import Endpoint
|
||||
from apps.scan.services.target_export_service import create_export_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="export_endpoints")
|
||||
def export_endpoints_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
batch_size: int = 1000,
|
||||
) -> Dict[str, object]:
|
||||
"""导出目标下的所有 Endpoint URL 到文本文件。
|
||||
|
||||
数据源: Endpoint.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
batch_size: 每次从数据库迭代的批大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
"success": bool,
|
||||
"output_file": str,
|
||||
"total_count": int,
|
||||
}
|
||||
"""
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = Endpoint.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用工厂函数创建导出服务
|
||||
export_service = create_export_service(target_id)
|
||||
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
"success": result['success'],
|
||||
"output_file": result['output_file'],
|
||||
"total_count": result['total_count'],
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
"""
|
||||
扫描模块工具包
|
||||
|
||||
提供扫描相关的工具函数。
|
||||
"""
|
||||
|
||||
from .directory_cleanup import remove_directory
|
||||
from .command_builder import build_scan_command
|
||||
from .command_executor import execute_and_wait, execute_stream
|
||||
from .wordlist_helpers import ensure_wordlist_local
|
||||
from .nuclei_helpers import ensure_nuclei_templates_local
|
||||
from .performance import FlowPerformanceTracker, CommandPerformanceTracker
|
||||
from .workspace_utils import setup_scan_workspace, setup_scan_directory
|
||||
from .user_logger import user_log
|
||||
from . import config_parser
|
||||
|
||||
__all__ = [
|
||||
# 目录清理
|
||||
'remove_directory',
|
||||
# 工作空间
|
||||
'setup_scan_workspace', # 创建 Scan 根工作空间
|
||||
'setup_scan_directory', # 创建扫描子目录
|
||||
# 命令构建
|
||||
'build_scan_command', # 扫描工具命令构建(基于 f-string)
|
||||
# 命令执行
|
||||
'execute_and_wait', # 等待式执行(文件输出)
|
||||
'execute_stream', # 流式执行(实时处理)
|
||||
# 字典文件
|
||||
'ensure_wordlist_local', # 确保本地字典文件(含 hash 校验)
|
||||
# Nuclei 模板
|
||||
'ensure_nuclei_templates_local', # 确保本地模板(含 commit hash 校验)
|
||||
# 性能监控
|
||||
'FlowPerformanceTracker', # Flow 性能追踪器(含系统资源采样)
|
||||
'CommandPerformanceTracker', # 命令性能追踪器
|
||||
# 扫描日志
|
||||
'user_log', # 用户可见扫描日志记录
|
||||
# 配置解析
|
||||
'config_parser',
|
||||
]
|
||||
|
||||
@@ -1,497 +0,0 @@
|
||||
from rest_framework import viewsets, status
|
||||
from rest_framework.decorators import action
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.exceptions import NotFound, APIException
|
||||
from rest_framework.filters import SearchFilter
|
||||
from django_filters.rest_framework import DjangoFilterBackend
|
||||
from django.core.exceptions import ObjectDoesNotExist, ValidationError
|
||||
from django.db.utils import DatabaseError, IntegrityError, OperationalError
|
||||
import logging
|
||||
|
||||
from apps.common.response_helpers import success_response, error_response
|
||||
from apps.common.error_codes import ErrorCodes
|
||||
from apps.scan.utils.config_merger import ConfigConflictError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from ..models import Scan, ScheduledScan
|
||||
from ..serializers import (
|
||||
ScanSerializer, ScanHistorySerializer, QuickScanSerializer,
|
||||
InitiateScanSerializer, ScheduledScanSerializer, CreateScheduledScanSerializer,
|
||||
UpdateScheduledScanSerializer, ToggleScheduledScanSerializer
|
||||
)
|
||||
from ..services.scan_service import ScanService
|
||||
from ..services.scheduled_scan_service import ScheduledScanService
|
||||
from ..repositories import ScheduledScanDTO
|
||||
from apps.targets.services.target_service import TargetService
|
||||
from apps.targets.services.organization_service import OrganizationService
|
||||
from apps.engine.services.engine_service import EngineService
|
||||
from apps.common.definitions import ScanStatus
|
||||
from apps.common.pagination import BasePagination
|
||||
|
||||
|
||||
class ScanViewSet(viewsets.ModelViewSet):
|
||||
"""扫描任务视图集"""
|
||||
serializer_class = ScanSerializer
|
||||
pagination_class = BasePagination
|
||||
filter_backends = [DjangoFilterBackend, SearchFilter]
|
||||
filterset_fields = ['target'] # 支持 ?target=123 过滤
|
||||
search_fields = ['target__name'] # 按目标名称搜索
|
||||
|
||||
def get_queryset(self):
|
||||
"""优化查询集,提升API性能
|
||||
|
||||
查询优化策略:
|
||||
- select_related: 预加载 target 和 engine(一对一/多对一关系,使用 JOIN)
|
||||
- 移除 prefetch_related: 避免加载大量资产数据到内存
|
||||
- order_by: 按创建时间降序排列(最新创建的任务排在最前面)
|
||||
|
||||
性能优化原理:
|
||||
- 列表页:使用缓存统计字段(cached_*_count),避免实时 COUNT 查询
|
||||
- 序列化器:严格验证缓存字段,确保数据一致性
|
||||
- 分页场景:每页只显示10条记录,查询高效
|
||||
- 避免大数据加载:不再预加载所有关联的资产数据
|
||||
"""
|
||||
# 只保留必要的 select_related,移除所有 prefetch_related
|
||||
scan_service = ScanService()
|
||||
queryset = scan_service.get_all_scans(prefetch_relations=True)
|
||||
|
||||
return queryset
|
||||
|
||||
def get_serializer_class(self):
|
||||
"""根据不同的 action 返回不同的序列化器
|
||||
|
||||
- list action: 使用 ScanHistorySerializer(包含 summary 和 progress)
|
||||
- retrieve action: 使用 ScanHistorySerializer(包含 summary 和 progress)
|
||||
- 其他 action: 使用标准的 ScanSerializer
|
||||
"""
|
||||
if self.action in ['list', 'retrieve']:
|
||||
return ScanHistorySerializer
|
||||
return ScanSerializer
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
"""
|
||||
删除单个扫描任务(两阶段删除)
|
||||
|
||||
1. 软删除:立即对用户不可见
|
||||
2. 硬删除:后台异步执行
|
||||
"""
|
||||
try:
|
||||
scan = self.get_object()
|
||||
scan_service = ScanService()
|
||||
result = scan_service.delete_scans_two_phase([scan.id])
|
||||
|
||||
return success_response(
|
||||
data={
|
||||
'scanId': scan.id,
|
||||
'deletedCount': result['soft_deleted_count'],
|
||||
'deletedScans': result['scan_names']
|
||||
}
|
||||
)
|
||||
|
||||
except Scan.DoesNotExist:
|
||||
return error_response(
|
||||
code=ErrorCodes.NOT_FOUND,
|
||||
status_code=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
except ValueError as e:
|
||||
return error_response(
|
||||
code=ErrorCodes.NOT_FOUND,
|
||||
message=str(e),
|
||||
status_code=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("删除扫描任务时发生错误")
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
@action(detail=False, methods=['post'])
|
||||
def quick(self, request):
|
||||
"""
|
||||
快速扫描接口
|
||||
|
||||
功能:
|
||||
1. 接收目标列表和 YAML 配置
|
||||
2. 自动解析输入(支持 URL、域名、IP、CIDR)
|
||||
3. 批量创建 Target、Website、Endpoint 资产
|
||||
4. 立即发起批量扫描
|
||||
|
||||
请求参数:
|
||||
{
|
||||
"targets": [{"name": "example.com"}, {"name": "https://example.com/api"}],
|
||||
"configuration": "subdomain_discovery:\n enabled: true\n ...",
|
||||
"engine_ids": [1, 2], // 可选,用于记录
|
||||
"engine_names": ["引擎A", "引擎B"] // 可选,用于记录
|
||||
}
|
||||
|
||||
支持的输入格式:
|
||||
- 域名: example.com
|
||||
- IP: 192.168.1.1
|
||||
- CIDR: 10.0.0.0/8
|
||||
- URL: https://example.com/api/v1
|
||||
"""
|
||||
from ..services.quick_scan_service import QuickScanService
|
||||
|
||||
serializer = QuickScanSerializer(data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
|
||||
targets_data = serializer.validated_data['targets']
|
||||
configuration = serializer.validated_data['configuration']
|
||||
engine_ids = serializer.validated_data.get('engine_ids', [])
|
||||
engine_names = serializer.validated_data.get('engine_names', [])
|
||||
|
||||
try:
|
||||
# 提取输入字符串列表
|
||||
inputs = [t['name'] for t in targets_data]
|
||||
|
||||
# 1. 使用 QuickScanService 解析输入并创建资产
|
||||
quick_scan_service = QuickScanService()
|
||||
result = quick_scan_service.process_quick_scan(inputs, engine_ids[0] if engine_ids else None)
|
||||
|
||||
targets = result['targets']
|
||||
|
||||
if not targets:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='No valid targets for scanning',
|
||||
details=result.get('errors', []),
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
# 2. 直接使用前端传递的配置创建扫描
|
||||
scan_service = ScanService()
|
||||
created_scans = scan_service.create_scans(
|
||||
targets=targets,
|
||||
engine_ids=engine_ids,
|
||||
engine_names=engine_names,
|
||||
yaml_configuration=configuration
|
||||
)
|
||||
|
||||
# 检查是否成功创建扫描任务
|
||||
if not created_scans:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='No scan tasks were created. All targets may already have active scans.',
|
||||
details={
|
||||
'targetStats': result['target_stats'],
|
||||
'assetStats': result['asset_stats'],
|
||||
'errors': result.get('errors', [])
|
||||
},
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY
|
||||
)
|
||||
|
||||
# 序列化返回结果
|
||||
scan_serializer = ScanSerializer(created_scans, many=True)
|
||||
|
||||
return success_response(
|
||||
data={
|
||||
'count': len(created_scans),
|
||||
'targetStats': result['target_stats'],
|
||||
'assetStats': result['asset_stats'],
|
||||
'errors': result.get('errors', []),
|
||||
'scans': scan_serializer.data
|
||||
},
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
|
||||
except ValidationError as e:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message=str(e),
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("快速扫描启动失败")
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
@action(detail=False, methods=['post'])
|
||||
def initiate(self, request):
|
||||
"""
|
||||
发起扫描任务
|
||||
|
||||
请求参数:
|
||||
- organization_id: 组织ID (int, 可选)
|
||||
- target_id: 目标ID (int, 可选)
|
||||
- configuration: YAML 配置字符串 (str, 必填)
|
||||
- engine_ids: 扫描引擎ID列表 (list[int], 必填)
|
||||
- engine_names: 引擎名称列表 (list[str], 必填)
|
||||
|
||||
注意: organization_id 和 target_id 二选一
|
||||
|
||||
返回:
|
||||
- 扫描任务详情(单个或多个)
|
||||
"""
|
||||
# 使用 serializer 验证请求数据
|
||||
serializer = InitiateScanSerializer(data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
|
||||
# 获取验证后的数据
|
||||
organization_id = serializer.validated_data.get('organization_id')
|
||||
target_id = serializer.validated_data.get('target_id')
|
||||
configuration = serializer.validated_data['configuration']
|
||||
engine_ids = serializer.validated_data['engine_ids']
|
||||
engine_names = serializer.validated_data['engine_names']
|
||||
|
||||
try:
|
||||
# 获取目标列表
|
||||
scan_service = ScanService()
|
||||
|
||||
if organization_id:
|
||||
from apps.targets.repositories import DjangoOrganizationRepository
|
||||
org_repo = DjangoOrganizationRepository()
|
||||
organization = org_repo.get_by_id(organization_id)
|
||||
if not organization:
|
||||
raise ObjectDoesNotExist(f'Organization ID {organization_id} 不存在')
|
||||
targets = org_repo.get_targets(organization_id)
|
||||
if not targets:
|
||||
raise ValidationError(f'组织 ID {organization_id} 下没有目标')
|
||||
else:
|
||||
from apps.targets.repositories import DjangoTargetRepository
|
||||
target_repo = DjangoTargetRepository()
|
||||
target = target_repo.get_by_id(target_id)
|
||||
if not target:
|
||||
raise ObjectDoesNotExist(f'Target ID {target_id} 不存在')
|
||||
targets = [target]
|
||||
|
||||
# 直接使用前端传递的配置创建扫描
|
||||
created_scans = scan_service.create_scans(
|
||||
targets=targets,
|
||||
engine_ids=engine_ids,
|
||||
engine_names=engine_names,
|
||||
yaml_configuration=configuration
|
||||
)
|
||||
|
||||
# 检查是否成功创建扫描任务
|
||||
if not created_scans:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='No scan tasks were created. All targets may already have active scans.',
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY
|
||||
)
|
||||
|
||||
# 序列化返回结果
|
||||
scan_serializer = ScanSerializer(created_scans, many=True)
|
||||
|
||||
return success_response(
|
||||
data={
|
||||
'count': len(created_scans),
|
||||
'scans': scan_serializer.data
|
||||
},
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
|
||||
except ObjectDoesNotExist as e:
|
||||
# 资源不存在错误(由 service 层抛出)
|
||||
return error_response(
|
||||
code=ErrorCodes.NOT_FOUND,
|
||||
message=str(e),
|
||||
status_code=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
except ValidationError as e:
|
||||
# 参数验证错误(由 service 层抛出)
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message=str(e),
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
except (DatabaseError, IntegrityError, OperationalError):
|
||||
# 数据库错误
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
message='Database error',
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE
|
||||
)
|
||||
|
||||
# 所有快照相关的 action 和 export 已迁移到 asset/views.py 中的快照 ViewSet
|
||||
# GET /api/scans/{id}/subdomains/ -> SubdomainSnapshotViewSet
|
||||
# GET /api/scans/{id}/subdomains/export/ -> SubdomainSnapshotViewSet.export
|
||||
# GET /api/scans/{id}/websites/ -> WebsiteSnapshotViewSet
|
||||
# GET /api/scans/{id}/websites/export/ -> WebsiteSnapshotViewSet.export
|
||||
# GET /api/scans/{id}/directories/ -> DirectorySnapshotViewSet
|
||||
# GET /api/scans/{id}/directories/export/ -> DirectorySnapshotViewSet.export
|
||||
# GET /api/scans/{id}/endpoints/ -> EndpointSnapshotViewSet
|
||||
# GET /api/scans/{id}/endpoints/export/ -> EndpointSnapshotViewSet.export
|
||||
# GET /api/scans/{id}/ip-addresses/ -> HostPortMappingSnapshotViewSet
|
||||
# GET /api/scans/{id}/ip-addresses/export/ -> HostPortMappingSnapshotViewSet.export
|
||||
# GET /api/scans/{id}/vulnerabilities/ -> VulnerabilitySnapshotViewSet
|
||||
|
||||
@action(detail=False, methods=['post', 'delete'], url_path='bulk-delete')
|
||||
def bulk_delete(self, request):
|
||||
"""
|
||||
批量删除扫描记录
|
||||
|
||||
请求参数:
|
||||
- ids: 扫描ID列表 (list[int], 必填)
|
||||
|
||||
示例请求:
|
||||
POST /api/scans/bulk-delete/
|
||||
{
|
||||
"ids": [1, 2, 3]
|
||||
}
|
||||
|
||||
返回:
|
||||
- message: 成功消息
|
||||
- deletedCount: 实际删除的记录数
|
||||
|
||||
注意:
|
||||
- 使用级联删除,会同时删除关联的子域名、端点等数据
|
||||
- 只删除存在的记录,不存在的ID会被忽略
|
||||
"""
|
||||
ids = request.data.get('ids', [])
|
||||
|
||||
# 参数验证
|
||||
if not ids:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='Missing required parameter: ids',
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
if not isinstance(ids, list):
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='ids must be an array',
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
if not all(isinstance(i, int) for i in ids):
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='All elements in ids array must be integers',
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
try:
|
||||
# 使用 Service 层批量删除(两阶段删除)
|
||||
scan_service = ScanService()
|
||||
result = scan_service.delete_scans_two_phase(ids)
|
||||
|
||||
return success_response(
|
||||
data={
|
||||
'deletedCount': result['soft_deleted_count'],
|
||||
'deletedScans': result['scan_names']
|
||||
}
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
# 未找到记录
|
||||
return error_response(
|
||||
code=ErrorCodes.NOT_FOUND,
|
||||
message=str(e),
|
||||
status_code=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("批量删除扫描任务时发生错误")
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def statistics(self, request):
|
||||
"""
|
||||
获取扫描统计数据
|
||||
|
||||
返回扫描任务的汇总统计信息,用于仪表板和扫描历史页面。
|
||||
使用缓存字段聚合查询,性能优异。
|
||||
|
||||
返回:
|
||||
- total: 总扫描次数
|
||||
- running: 运行中的扫描数量
|
||||
- completed: 已完成的扫描数量
|
||||
- failed: 失败的扫描数量
|
||||
- totalVulns: 总共发现的漏洞数量
|
||||
- totalSubdomains: 总共发现的子域名数量
|
||||
- totalEndpoints: 总共发现的端点数量
|
||||
- totalAssets: 总资产数
|
||||
"""
|
||||
try:
|
||||
# 使用 Service 层获取统计数据
|
||||
scan_service = ScanService()
|
||||
stats = scan_service.get_statistics()
|
||||
|
||||
return success_response(
|
||||
data={
|
||||
'total': stats['total'],
|
||||
'running': stats['running'],
|
||||
'completed': stats['completed'],
|
||||
'failed': stats['failed'],
|
||||
'totalVulns': stats['total_vulns'],
|
||||
'totalSubdomains': stats['total_subdomains'],
|
||||
'totalEndpoints': stats['total_endpoints'],
|
||||
'totalWebsites': stats['total_websites'],
|
||||
'totalAssets': stats['total_assets'],
|
||||
}
|
||||
)
|
||||
|
||||
except (DatabaseError, OperationalError):
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
message='Database error',
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE
|
||||
)
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def stop(self, request, pk=None): # pylint: disable=unused-argument
|
||||
"""
|
||||
停止扫描任务
|
||||
|
||||
URL: POST /api/scans/{id}/stop/
|
||||
|
||||
功能:
|
||||
- 终止正在运行或初始化的扫描任务
|
||||
- 更新扫描状态为 CANCELLED
|
||||
|
||||
状态限制:
|
||||
- 只能停止 RUNNING 或 INITIATED 状态的扫描
|
||||
- 已完成、失败或取消的扫描无法停止
|
||||
|
||||
返回:
|
||||
- message: 成功消息
|
||||
- revokedTaskCount: 取消的 Flow Run 数量
|
||||
"""
|
||||
try:
|
||||
# 使用 Service 层处理停止逻辑
|
||||
scan_service = ScanService()
|
||||
success, revoked_count = scan_service.stop_scan(scan_id=pk)
|
||||
|
||||
if not success:
|
||||
# 检查是否是状态不允许的问题
|
||||
scan = scan_service.get_scan(scan_id=pk, prefetch_relations=False)
|
||||
if scan and scan.status not in [ScanStatus.RUNNING, ScanStatus.INITIATED]:
|
||||
return error_response(
|
||||
code=ErrorCodes.BAD_REQUEST,
|
||||
message=f'Cannot stop scan: current status is {ScanStatus(scan.status).label}',
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
# 其他失败原因
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
return success_response(
|
||||
data={'revokedTaskCount': revoked_count}
|
||||
)
|
||||
|
||||
except ObjectDoesNotExist:
|
||||
return error_response(
|
||||
code=ErrorCodes.NOT_FOUND,
|
||||
message=f'Scan ID {pk} not found',
|
||||
status_code=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
except (DatabaseError, IntegrityError, OperationalError):
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
message='Database error',
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE
|
||||
)
|
||||
@@ -1,27 +0,0 @@
|
||||
[tool.pytest.ini_options]
|
||||
DJANGO_SETTINGS_MODULE = "config.settings"
|
||||
python_files = ["test_*.py", "*_test.py"]
|
||||
python_classes = ["Test*"]
|
||||
python_functions = ["test_*"]
|
||||
testpaths = ["apps"]
|
||||
addopts = "-v --reuse-db"
|
||||
|
||||
[tool.pylint]
|
||||
django-settings-module = "config.settings"
|
||||
load-plugins = "pylint_django"
|
||||
|
||||
[tool.pylint.messages_control]
|
||||
disable = [
|
||||
"missing-docstring",
|
||||
"invalid-name",
|
||||
"too-few-public-methods",
|
||||
"no-member",
|
||||
"import-error",
|
||||
"no-name-in-module",
|
||||
]
|
||||
|
||||
[tool.pylint.format]
|
||||
max-line-length = 120
|
||||
|
||||
[tool.pylint.basic]
|
||||
good-names = ["i", "j", "k", "ex", "Run", "_", "id", "pk", "ip", "url", "db", "qs"]
|
||||
@@ -11,7 +11,7 @@ import { DashboardDataTable } from "@/components/dashboard/dashboard-data-table"
|
||||
export default function Page() {
|
||||
return (
|
||||
// Content area containing cards, charts and data tables
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6 animate-dashboard-fade-in">
|
||||
{/* Top statistics cards */}
|
||||
<DashboardStatCards />
|
||||
|
||||
|
||||
@@ -40,8 +40,11 @@ export async function generateMetadata({ params }: { params: Promise<{ locale: s
|
||||
title: t('title'),
|
||||
description: t('description'),
|
||||
keywords: t('keywords').split(',').map(k => k.trim()),
|
||||
generator: "Xingrin ASM Platform",
|
||||
generator: "Orbit ASM Platform",
|
||||
authors: [{ name: "yyhuni" }],
|
||||
icons: {
|
||||
icon: [{ url: "/icon.svg", type: "image/svg+xml" }],
|
||||
},
|
||||
openGraph: {
|
||||
title: t('ogTitle'),
|
||||
description: t('ogDescription'),
|
||||
|
||||
@@ -3,125 +3,226 @@
|
||||
import React from "react"
|
||||
import { useRouter } from "next/navigation"
|
||||
import { useTranslations } from "next-intl"
|
||||
import Lottie from "lottie-react"
|
||||
import securityAnimation from "@/public/animations/Security000-Purple.json"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { Card, CardContent } from "@/components/ui/card"
|
||||
import {
|
||||
Field,
|
||||
FieldGroup,
|
||||
FieldLabel,
|
||||
} from "@/components/ui/field"
|
||||
import { Spinner } from "@/components/ui/spinner"
|
||||
import { useQueryClient } from "@tanstack/react-query"
|
||||
import dynamic from "next/dynamic"
|
||||
import { LoginBootScreen } from "@/components/auth/login-boot-screen"
|
||||
import { TerminalLogin } from "@/components/ui/terminal-login"
|
||||
import { useLogin, useAuth } from "@/hooks/use-auth"
|
||||
import { vulnerabilityKeys } from "@/hooks/use-vulnerabilities"
|
||||
import { useRoutePrefetch } from "@/hooks/use-route-prefetch"
|
||||
import { getAssetStatistics, getStatisticsHistory } from "@/services/dashboard.service"
|
||||
import { getScans } from "@/services/scan.service"
|
||||
import { VulnerabilityService } from "@/services/vulnerability.service"
|
||||
|
||||
// Dynamic import to avoid SSR issues with WebGL
|
||||
const PixelBlast = dynamic(() => import("@/components/PixelBlast"), { ssr: false })
|
||||
|
||||
const BOOT_SPLASH_MS = 600
|
||||
const BOOT_FADE_MS = 200
|
||||
|
||||
type BootOverlayPhase = "entering" | "visible" | "leaving" | "hidden"
|
||||
|
||||
export default function LoginPage() {
|
||||
// Preload all page components on login page
|
||||
useRoutePrefetch()
|
||||
const router = useRouter()
|
||||
const queryClient = useQueryClient()
|
||||
const { data: auth, isLoading: authLoading } = useAuth()
|
||||
const { mutate: login, isPending } = useLogin()
|
||||
const t = useTranslations("auth")
|
||||
|
||||
const [username, setUsername] = React.useState("")
|
||||
const [password, setPassword] = React.useState("")
|
||||
const { mutateAsync: login, isPending } = useLogin()
|
||||
const t = useTranslations("auth.terminal")
|
||||
|
||||
const loginStartedRef = React.useRef(false)
|
||||
const [loginReady, setLoginReady] = React.useState(false)
|
||||
|
||||
const [pixelFirstFrame, setPixelFirstFrame] = React.useState(false)
|
||||
const handlePixelFirstFrame = React.useCallback(() => {
|
||||
setPixelFirstFrame(true)
|
||||
}, [])
|
||||
|
||||
// 提取预加载逻辑为可复用函数
|
||||
const prefetchDashboardData = React.useCallback(async () => {
|
||||
const scansParams = { page: 1, pageSize: 10 }
|
||||
const vulnsParams = { page: 1, pageSize: 10 }
|
||||
|
||||
return Promise.allSettled([
|
||||
queryClient.prefetchQuery({
|
||||
queryKey: ["asset", "statistics"],
|
||||
queryFn: getAssetStatistics,
|
||||
}),
|
||||
queryClient.prefetchQuery({
|
||||
queryKey: ["asset", "statistics", "history", 7],
|
||||
queryFn: () => getStatisticsHistory(7),
|
||||
}),
|
||||
queryClient.prefetchQuery({
|
||||
queryKey: ["scans", scansParams],
|
||||
queryFn: () => getScans(scansParams),
|
||||
}),
|
||||
queryClient.prefetchQuery({
|
||||
queryKey: vulnerabilityKeys.list(vulnsParams),
|
||||
queryFn: () => VulnerabilityService.getAllVulnerabilities(vulnsParams),
|
||||
}),
|
||||
])
|
||||
}, [queryClient])
|
||||
|
||||
// Always show a short splash on entering the login page.
|
||||
const [bootMinDone, setBootMinDone] = React.useState(false)
|
||||
const [bootPhase, setBootPhase] = React.useState<BootOverlayPhase>("entering")
|
||||
|
||||
// If already logged in, redirect to dashboard
|
||||
React.useEffect(() => {
|
||||
if (auth?.authenticated) {
|
||||
router.push("/dashboard/")
|
||||
setBootMinDone(false)
|
||||
setBootPhase("entering")
|
||||
|
||||
const bootTimer = setTimeout(() => setBootMinDone(true), BOOT_SPLASH_MS)
|
||||
const raf = requestAnimationFrame(() => setBootPhase("visible"))
|
||||
|
||||
return () => {
|
||||
clearTimeout(bootTimer)
|
||||
cancelAnimationFrame(raf)
|
||||
}
|
||||
}, [auth, router])
|
||||
}, [])
|
||||
|
||||
const handleSubmit = (e: React.FormEvent) => {
|
||||
e.preventDefault()
|
||||
login({ username, password })
|
||||
|
||||
// Start hiding the splash after the minimum time AND auth check completes.
|
||||
// Note: don't schedule the fade-out timer in the same effect where we set `bootPhase`,
|
||||
// otherwise the effect cleanup will cancel the timer when `bootPhase` changes.
|
||||
React.useEffect(() => {
|
||||
if (bootPhase !== "visible") return
|
||||
if (!bootMinDone) return
|
||||
if (authLoading) return
|
||||
if (!pixelFirstFrame) return
|
||||
|
||||
setBootPhase("leaving")
|
||||
}, [authLoading, bootMinDone, bootPhase, pixelFirstFrame])
|
||||
|
||||
React.useEffect(() => {
|
||||
if (bootPhase !== "leaving") return
|
||||
|
||||
const timer = setTimeout(() => setBootPhase("hidden"), BOOT_FADE_MS)
|
||||
return () => clearTimeout(timer)
|
||||
}, [bootPhase])
|
||||
|
||||
// Memoize translations object to avoid recreating on every render
|
||||
const translations = React.useMemo(() => ({
|
||||
title: t("title"),
|
||||
subtitle: t("subtitle"),
|
||||
usernamePrompt: t("usernamePrompt"),
|
||||
passwordPrompt: t("passwordPrompt"),
|
||||
authenticating: t("authenticating"),
|
||||
processing: t("processing"),
|
||||
accessGranted: t("accessGranted"),
|
||||
welcomeMessage: t("welcomeMessage"),
|
||||
authFailed: t("authFailed"),
|
||||
invalidCredentials: t("invalidCredentials"),
|
||||
shortcuts: t("shortcuts"),
|
||||
submit: t("submit"),
|
||||
cancel: t("cancel"),
|
||||
clear: t("clear"),
|
||||
startEnd: t("startEnd"),
|
||||
}), [t])
|
||||
|
||||
// If already logged in, warm up the dashboard, then redirect.
|
||||
React.useEffect(() => {
|
||||
if (authLoading) return
|
||||
if (!auth?.authenticated) return
|
||||
if (loginStartedRef.current) return
|
||||
|
||||
let cancelled = false
|
||||
|
||||
void (async () => {
|
||||
await prefetchDashboardData()
|
||||
|
||||
if (cancelled) return
|
||||
router.replace("/dashboard/")
|
||||
})()
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [auth?.authenticated, authLoading, prefetchDashboardData, router])
|
||||
|
||||
React.useEffect(() => {
|
||||
if (!loginReady) return
|
||||
router.replace("/dashboard/")
|
||||
}, [loginReady, router])
|
||||
|
||||
const handleLogin = async (username: string, password: string) => {
|
||||
loginStartedRef.current = true
|
||||
setLoginReady(false)
|
||||
|
||||
// 并行执行独立操作:登录验证 + 预加载 dashboard bundle
|
||||
const [loginRes] = await Promise.all([
|
||||
login({ username, password }),
|
||||
router.prefetch("/dashboard/"),
|
||||
])
|
||||
|
||||
// 预加载 dashboard 数据
|
||||
await prefetchDashboardData()
|
||||
|
||||
// Prime auth cache so AuthLayout doesn't flash a full-screen loading state.
|
||||
queryClient.setQueryData(["auth", "me"], {
|
||||
authenticated: true,
|
||||
user: loginRes.user,
|
||||
})
|
||||
|
||||
setLoginReady(true)
|
||||
}
|
||||
|
||||
// Show spinner while loading
|
||||
if (authLoading) {
|
||||
return (
|
||||
<div className="flex min-h-svh w-full flex-col items-center justify-center gap-4 bg-background">
|
||||
<Spinner className="size-8 text-primary" />
|
||||
<p className="text-muted-foreground text-sm" suppressHydrationWarning>loading...</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Don't show login page if already logged in
|
||||
if (auth?.authenticated) {
|
||||
return null
|
||||
}
|
||||
const loginVisible = bootPhase === "leaving" || bootPhase === "hidden"
|
||||
|
||||
return (
|
||||
<div className="login-bg flex min-h-svh flex-col p-6 md:p-10">
|
||||
{/* Main content area */}
|
||||
<div className="flex-1 flex items-center justify-center">
|
||||
<div className="w-full max-w-sm md:max-w-4xl">
|
||||
<Card className="overflow-hidden p-0">
|
||||
<CardContent className="grid p-0 md:grid-cols-2">
|
||||
<form className="p-6 md:p-8" onSubmit={handleSubmit}>
|
||||
<FieldGroup>
|
||||
{/* Fingerprint identifier - for FOFA/Shodan and other search engines to identify */}
|
||||
<meta name="generator" content="Xingrin ASM Platform" />
|
||||
<div className="flex flex-col items-center gap-2 text-center">
|
||||
<h1 className="text-2xl font-bold">{t("title")}</h1>
|
||||
<p className="text-sm text-muted-foreground mt-1">
|
||||
{t("subtitle")}
|
||||
</p>
|
||||
</div>
|
||||
<Field>
|
||||
<FieldLabel htmlFor="username">{t("username")}</FieldLabel>
|
||||
<Input
|
||||
id="username"
|
||||
type="text"
|
||||
placeholder={t("usernamePlaceholder")}
|
||||
value={username}
|
||||
onChange={(e) => setUsername(e.target.value)}
|
||||
required
|
||||
autoFocus
|
||||
/>
|
||||
</Field>
|
||||
<Field>
|
||||
<FieldLabel htmlFor="password">{t("password")}</FieldLabel>
|
||||
<Input
|
||||
id="password"
|
||||
type="password"
|
||||
placeholder={t("passwordPlaceholder")}
|
||||
value={password}
|
||||
onChange={(e) => setPassword(e.target.value)}
|
||||
required
|
||||
/>
|
||||
</Field>
|
||||
<Field>
|
||||
<Button type="submit" className="w-full" disabled={isPending}>
|
||||
{isPending ? t("loggingIn") : t("login")}
|
||||
</Button>
|
||||
</Field>
|
||||
</FieldGroup>
|
||||
</form>
|
||||
<div className="bg-primary/5 relative hidden md:flex md:items-center md:justify-center">
|
||||
<div className="text-center p-4">
|
||||
<Lottie
|
||||
animationData={securityAnimation}
|
||||
loop={true}
|
||||
className="w-96 h-96 mx-auto"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</div>
|
||||
<div className="relative flex min-h-svh flex-col bg-black">
|
||||
<div className={`fixed inset-0 z-0 transition-opacity duration-300 ${loginVisible ? "opacity-100" : "opacity-0"}`}>
|
||||
<PixelBlast
|
||||
onFirstFrame={handlePixelFirstFrame}
|
||||
className=""
|
||||
style={{}}
|
||||
pixelSize={6.5}
|
||||
patternScale={4.5}
|
||||
color="#FF10F0"
|
||||
speed={0.35}
|
||||
enableRipples={false}
|
||||
/>
|
||||
</div>
|
||||
|
||||
|
||||
{/* Fingerprint identifier - for FOFA/Shodan and other search engines to identify */}
|
||||
<meta name="generator" content="Orbit ASM Platform" />
|
||||
|
||||
{/* Main content area */}
|
||||
<div
|
||||
className={`relative z-10 flex-1 flex items-center justify-center p-6 transition-[opacity,transform] duration-300 ${
|
||||
loginVisible ? "opacity-100 translate-y-0" : "opacity-0 translate-y-2"
|
||||
}`}
|
||||
>
|
||||
<TerminalLogin
|
||||
onLogin={handleLogin}
|
||||
authDone={loginReady}
|
||||
isPending={isPending}
|
||||
translations={translations}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Version number - fixed at the bottom of the page */}
|
||||
<div className="flex-shrink-0 text-center py-4">
|
||||
<div
|
||||
className={`relative z-10 flex-shrink-0 text-center py-4 transition-opacity duration-300 ${
|
||||
loginVisible ? "opacity-100" : "opacity-0"
|
||||
}`}
|
||||
>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{process.env.NEXT_PUBLIC_VERSION || 'dev'}
|
||||
{process.env.NEXT_PUBLIC_VERSION || "dev"}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Full-page splash overlay */}
|
||||
{bootPhase !== "hidden" && (
|
||||
<div
|
||||
className={`fixed inset-0 z-50 transition-opacity ease-out ${
|
||||
bootPhase === "visible" ? "opacity-100" : "opacity-0 pointer-events-none"
|
||||
}`}
|
||||
style={{ transitionDuration: `${BOOT_FADE_MS}ms` }}
|
||||
>
|
||||
<LoginBootScreen />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ const FEATURE_LIST = [
|
||||
{ key: "site_scan" },
|
||||
{ key: "fingerprint_detect" },
|
||||
{ key: "directory_scan" },
|
||||
{ key: "screenshot" },
|
||||
{ key: "url_fetch" },
|
||||
{ key: "vuln_scan" },
|
||||
] as const
|
||||
@@ -48,6 +49,7 @@ function parseEngineFeatures(engine: ScanEngine): Record<FeatureKey, boolean> {
|
||||
site_scan: false,
|
||||
fingerprint_detect: false,
|
||||
directory_scan: false,
|
||||
screenshot: false,
|
||||
url_fetch: false,
|
||||
vuln_scan: false,
|
||||
}
|
||||
@@ -64,6 +66,7 @@ function parseEngineFeatures(engine: ScanEngine): Record<FeatureKey, boolean> {
|
||||
site_scan: !!config.site_scan,
|
||||
fingerprint_detect: !!config.fingerprint_detect,
|
||||
directory_scan: !!config.directory_scan,
|
||||
screenshot: !!config.screenshot,
|
||||
url_fetch: !!config.url_fetch,
|
||||
vuln_scan: !!config.vuln_scan,
|
||||
}
|
||||
|
||||
@@ -3,9 +3,10 @@
|
||||
import React from "react"
|
||||
import { usePathname, useParams } from "next/navigation"
|
||||
import Link from "next/link"
|
||||
import { Target } from "lucide-react"
|
||||
import { Target, LayoutDashboard, Package, FolderSearch, Image, ShieldAlert } from "lucide-react"
|
||||
import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { useScan } from "@/hooks/use-scans"
|
||||
import { useTranslations } from "next-intl"
|
||||
|
||||
@@ -19,94 +20,124 @@ export default function ScanHistoryLayout({
|
||||
const { data: scanData, isLoading } = useScan(parseInt(id))
|
||||
const t = useTranslations("scan.history")
|
||||
|
||||
const getActiveTab = () => {
|
||||
if (pathname.includes("/subdomain")) return "subdomain"
|
||||
if (pathname.includes("/endpoints")) return "endpoints"
|
||||
if (pathname.includes("/websites")) return "websites"
|
||||
// Get primary navigation active tab
|
||||
const getPrimaryTab = () => {
|
||||
if (pathname.includes("/overview")) return "overview"
|
||||
if (pathname.includes("/directories")) return "directories"
|
||||
if (pathname.includes("/screenshots")) return "screenshots"
|
||||
if (pathname.includes("/vulnerabilities")) return "vulnerabilities"
|
||||
if (pathname.includes("/ip-addresses")) return "ip-addresses"
|
||||
return ""
|
||||
// All asset pages fall under "assets"
|
||||
if (
|
||||
pathname.includes("/websites") ||
|
||||
pathname.includes("/subdomain") ||
|
||||
pathname.includes("/ip-addresses") ||
|
||||
pathname.includes("/endpoints")
|
||||
) {
|
||||
return "assets"
|
||||
}
|
||||
return "overview"
|
||||
}
|
||||
|
||||
// Get secondary navigation active tab (for assets)
|
||||
const getSecondaryTab = () => {
|
||||
if (pathname.includes("/websites")) return "websites"
|
||||
if (pathname.includes("/subdomain")) return "subdomain"
|
||||
if (pathname.includes("/ip-addresses")) return "ip-addresses"
|
||||
if (pathname.includes("/endpoints")) return "endpoints"
|
||||
return "websites"
|
||||
}
|
||||
|
||||
// Check if we should show secondary navigation
|
||||
const showSecondaryNav = getPrimaryTab() === "assets"
|
||||
|
||||
const basePath = `/scan/history/${id}`
|
||||
const tabPaths = {
|
||||
subdomain: `${basePath}/subdomain/`,
|
||||
endpoints: `${basePath}/endpoints/`,
|
||||
websites: `${basePath}/websites/`,
|
||||
const primaryPaths = {
|
||||
overview: `${basePath}/overview/`,
|
||||
assets: `${basePath}/websites/`, // Default to websites when clicking assets
|
||||
directories: `${basePath}/directories/`,
|
||||
screenshots: `${basePath}/screenshots/`,
|
||||
vulnerabilities: `${basePath}/vulnerabilities/`,
|
||||
}
|
||||
|
||||
const secondaryPaths = {
|
||||
websites: `${basePath}/websites/`,
|
||||
subdomain: `${basePath}/subdomain/`,
|
||||
"ip-addresses": `${basePath}/ip-addresses/`,
|
||||
endpoints: `${basePath}/endpoints/`,
|
||||
}
|
||||
|
||||
// Get counts for each tab from scan data
|
||||
const stats = scanData?.cachedStats
|
||||
const counts = {
|
||||
subdomain: scanData?.summary?.subdomains || 0,
|
||||
endpoints: scanData?.summary?.endpoints || 0,
|
||||
websites: scanData?.summary?.websites || 0,
|
||||
directories: scanData?.summary?.directories || 0,
|
||||
vulnerabilities: scanData?.summary?.vulnerabilities?.total || 0,
|
||||
"ip-addresses": scanData?.summary?.ips || 0,
|
||||
subdomain: stats?.subdomainsCount || 0,
|
||||
endpoints: stats?.endpointsCount || 0,
|
||||
websites: stats?.websitesCount || 0,
|
||||
directories: stats?.directoriesCount || 0,
|
||||
screenshots: stats?.screenshotsCount || 0,
|
||||
vulnerabilities: stats?.vulnsTotal || 0,
|
||||
"ip-addresses": stats?.ipsCount || 0,
|
||||
}
|
||||
|
||||
// Calculate total assets count
|
||||
const totalAssets = counts.websites + counts.subdomain + counts["ip-addresses"] + counts.endpoints
|
||||
|
||||
// Loading state
|
||||
if (isLoading) {
|
||||
return (
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
|
||||
{/* Header skeleton */}
|
||||
<div className="flex items-center gap-2 px-4 lg:px-6">
|
||||
<Skeleton className="h-4 w-16" />
|
||||
<span className="text-muted-foreground">/</span>
|
||||
<Skeleton className="h-4 w-32" />
|
||||
</div>
|
||||
{/* Tabs skeleton */}
|
||||
<div className="flex gap-1 px-4 lg:px-6">
|
||||
<Skeleton className="h-9 w-20" />
|
||||
<Skeleton className="h-9 w-20" />
|
||||
<Skeleton className="h-9 w-24" />
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
|
||||
<div className="flex items-center justify-between px-4 lg:px-6">
|
||||
<div>
|
||||
<h2 className="text-2xl font-bold tracking-tight flex items-center gap-2">
|
||||
<Target />
|
||||
Scan Results
|
||||
</h2>
|
||||
<p className="text-muted-foreground">{t("taskId", { id })}</p>
|
||||
</div>
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6 h-full">
|
||||
{/* Header: Page label + Scan info */}
|
||||
<div className="flex items-center gap-2 text-sm px-4 lg:px-6">
|
||||
<span className="text-muted-foreground">{t("breadcrumb.scanHistory")}</span>
|
||||
<span className="text-muted-foreground">/</span>
|
||||
<span className="font-medium flex items-center gap-1.5">
|
||||
<Target className="h-4 w-4" />
|
||||
{(scanData?.target as any)?.name || t("taskId", { id })}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center justify-between px-4 lg:px-6">
|
||||
<Tabs value={getActiveTab()} className="w-full">
|
||||
{/* Primary navigation */}
|
||||
<div className="px-4 lg:px-6">
|
||||
<Tabs value={getPrimaryTab()}>
|
||||
<TabsList>
|
||||
<TabsTrigger value="websites" asChild>
|
||||
<Link href={tabPaths.websites} className="flex items-center gap-0.5">
|
||||
Websites
|
||||
{counts.websites > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.websites}
|
||||
</Badge>
|
||||
)}
|
||||
<TabsTrigger value="overview" asChild>
|
||||
<Link href={primaryPaths.overview} className="flex items-center gap-1.5">
|
||||
<LayoutDashboard className="h-4 w-4" />
|
||||
{t("tabs.overview")}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="subdomain" asChild>
|
||||
<Link href={tabPaths.subdomain} className="flex items-center gap-0.5">
|
||||
Subdomains
|
||||
{counts.subdomain > 0 && (
|
||||
<TabsTrigger value="assets" asChild>
|
||||
<Link href={primaryPaths.assets} className="flex items-center gap-1.5">
|
||||
<Package className="h-4 w-4" />
|
||||
{t("tabs.assets")}
|
||||
{totalAssets > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.subdomain}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="ip-addresses" asChild>
|
||||
<Link href={tabPaths["ip-addresses"]} className="flex items-center gap-0.5">
|
||||
IP Addresses
|
||||
{counts["ip-addresses"] > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts["ip-addresses"]}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="endpoints" asChild>
|
||||
<Link href={tabPaths.endpoints} className="flex items-center gap-0.5">
|
||||
URLs
|
||||
{counts.endpoints > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.endpoints}
|
||||
{totalAssets}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="directories" asChild>
|
||||
<Link href={tabPaths.directories} className="flex items-center gap-0.5">
|
||||
Directories
|
||||
<Link href={primaryPaths.directories} className="flex items-center gap-1.5">
|
||||
<FolderSearch className="h-4 w-4" />
|
||||
{t("tabs.directories")}
|
||||
{counts.directories > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.directories}
|
||||
@@ -114,9 +145,21 @@ export default function ScanHistoryLayout({
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="screenshots" asChild>
|
||||
<Link href={primaryPaths.screenshots} className="flex items-center gap-1.5">
|
||||
<Image className="h-4 w-4" />
|
||||
{t("tabs.screenshots")}
|
||||
{counts.screenshots > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.screenshots}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="vulnerabilities" asChild>
|
||||
<Link href={tabPaths.vulnerabilities} className="flex items-center gap-0.5">
|
||||
Vulnerabilities
|
||||
<Link href={primaryPaths.vulnerabilities} className="flex items-center gap-1.5">
|
||||
<ShieldAlert className="h-4 w-4" />
|
||||
{t("tabs.vulnerabilities")}
|
||||
{counts.vulnerabilities > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.vulnerabilities}
|
||||
@@ -128,6 +171,57 @@ export default function ScanHistoryLayout({
|
||||
</Tabs>
|
||||
</div>
|
||||
|
||||
{/* Secondary navigation (only for assets) */}
|
||||
{showSecondaryNav && (
|
||||
<div className="flex items-center px-4 lg:px-6">
|
||||
<Tabs value={getSecondaryTab()} className="w-full">
|
||||
<TabsList variant="underline">
|
||||
<TabsTrigger value="websites" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.websites} className="flex items-center gap-0.5">
|
||||
{t("tabs.websites")}
|
||||
{counts.websites > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.websites}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="subdomain" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.subdomain} className="flex items-center gap-0.5">
|
||||
{t("tabs.subdomains")}
|
||||
{counts.subdomain > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.subdomain}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="ip-addresses" variant="underline" asChild>
|
||||
<Link href={secondaryPaths["ip-addresses"]} className="flex items-center gap-0.5">
|
||||
{t("tabs.ips")}
|
||||
{counts["ip-addresses"] > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts["ip-addresses"]}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="endpoints" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.endpoints} className="flex items-center gap-0.5">
|
||||
{t("tabs.urls")}
|
||||
{counts.endpoints > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.endpoints}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
</Tabs>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Sub-page content */}
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
|
||||
19
frontend/app/[locale]/scan/history/[id]/overview/page.tsx
Normal file
19
frontend/app/[locale]/scan/history/[id]/overview/page.tsx
Normal file
@@ -0,0 +1,19 @@
|
||||
"use client"
|
||||
|
||||
import { useParams } from "next/navigation"
|
||||
import { ScanOverview } from "@/components/scan/history/scan-overview"
|
||||
|
||||
/**
|
||||
* Scan overview page
|
||||
* Displays scan statistics and summary information
|
||||
*/
|
||||
export default function ScanOverviewPage() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const scanId = Number(id)
|
||||
|
||||
return (
|
||||
<div className="flex-1 flex flex-col min-h-0 px-4 lg:px-6">
|
||||
<ScanOverview scanId={scanId} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -8,7 +8,7 @@ export default function ScanHistoryDetailPage() {
|
||||
const router = useRouter()
|
||||
|
||||
useEffect(() => {
|
||||
router.replace(`/scan/history/${id}/websites/`)
|
||||
router.replace(`/scan/history/${id}/overview/`)
|
||||
}, [id, router])
|
||||
|
||||
return null
|
||||
|
||||
15
frontend/app/[locale]/scan/history/[id]/screenshots/page.tsx
Normal file
15
frontend/app/[locale]/scan/history/[id]/screenshots/page.tsx
Normal file
@@ -0,0 +1,15 @@
|
||||
"use client"
|
||||
|
||||
import { useParams } from "next/navigation"
|
||||
import { ScreenshotsGallery } from "@/components/screenshots/screenshots-gallery"
|
||||
|
||||
export default function ScanScreenshotsPage() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const scanId = Number(id)
|
||||
|
||||
return (
|
||||
<div className="px-4 lg:px-6">
|
||||
<ScreenshotsGallery scanId={scanId} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -29,6 +29,10 @@ export default function NotificationSettingsPage() {
|
||||
enabled: z.boolean(),
|
||||
webhookUrl: z.string().url(t("discord.urlInvalid")).or(z.literal('')),
|
||||
}),
|
||||
wecom: z.object({
|
||||
enabled: z.boolean(),
|
||||
webhookUrl: z.string().url(t("wecom.urlInvalid")).or(z.literal('')),
|
||||
}),
|
||||
categories: z.object({
|
||||
scan: z.boolean(),
|
||||
vulnerability: z.boolean(),
|
||||
@@ -46,6 +50,15 @@ export default function NotificationSettingsPage() {
|
||||
})
|
||||
}
|
||||
}
|
||||
if (val.wecom.enabled) {
|
||||
if (!val.wecom.webhookUrl || val.wecom.webhookUrl.trim() === '') {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
message: t("wecom.requiredError"),
|
||||
path: ['wecom', 'webhookUrl'],
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const NOTIFICATION_CATEGORIES = [
|
||||
@@ -79,6 +92,7 @@ export default function NotificationSettingsPage() {
|
||||
resolver: zodResolver(schema),
|
||||
values: data ?? {
|
||||
discord: { enabled: false, webhookUrl: '' },
|
||||
wecom: { enabled: false, webhookUrl: '' },
|
||||
categories: {
|
||||
scan: true,
|
||||
vulnerability: true,
|
||||
@@ -93,6 +107,7 @@ export default function NotificationSettingsPage() {
|
||||
}
|
||||
|
||||
const discordEnabled = form.watch('discord.enabled')
|
||||
const wecomEnabled = form.watch('wecom.enabled')
|
||||
|
||||
return (
|
||||
<div className="p-4 md:p-6 space-y-6">
|
||||
@@ -187,25 +202,59 @@ export default function NotificationSettingsPage() {
|
||||
</CardHeader>
|
||||
</Card>
|
||||
|
||||
{/* Feishu/DingTalk/WeCom - Coming soon */}
|
||||
<Card className="opacity-60">
|
||||
{/* 企业微信 */}
|
||||
<Card>
|
||||
<CardHeader className="pb-4">
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="flex h-10 w-10 items-center justify-center rounded-lg bg-muted">
|
||||
<IconBrandSlack className="h-5 w-5 text-muted-foreground" />
|
||||
<div className="flex h-10 w-10 items-center justify-center rounded-lg bg-[#07C160]/10">
|
||||
<IconBrandSlack className="h-5 w-5 text-[#07C160]" />
|
||||
</div>
|
||||
<div>
|
||||
<div className="flex items-center gap-2">
|
||||
<CardTitle className="text-base">{t("enterprise.title")}</CardTitle>
|
||||
<Badge variant="secondary" className="text-xs">{t("emailChannel.comingSoon")}</Badge>
|
||||
</div>
|
||||
<CardDescription>{t("enterprise.description")}</CardDescription>
|
||||
<CardTitle className="text-base">{t("wecom.title")}</CardTitle>
|
||||
<CardDescription>{t("wecom.description")}</CardDescription>
|
||||
</div>
|
||||
</div>
|
||||
<Switch disabled />
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="wecom.enabled"
|
||||
render={({ field }) => (
|
||||
<FormControl>
|
||||
<Switch
|
||||
checked={field.value}
|
||||
onCheckedChange={field.onChange}
|
||||
disabled={isLoading || updateMutation.isPending}
|
||||
/>
|
||||
</FormControl>
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
</CardHeader>
|
||||
{wecomEnabled && (
|
||||
<CardContent className="pt-0">
|
||||
<Separator className="mb-4" />
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="wecom.webhookUrl"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>{t("wecom.webhookLabel")}</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
placeholder={t("wecom.webhookPlaceholder")}
|
||||
{...field}
|
||||
disabled={isLoading || updateMutation.isPending}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormDescription>
|
||||
{t("wecom.webhookHelp")}
|
||||
</FormDescription>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
</CardContent>
|
||||
)}
|
||||
</Card>
|
||||
</TabsContent>
|
||||
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
"use client"
|
||||
|
||||
import { useTranslations } from "next-intl"
|
||||
import { SystemLogsView } from "@/components/settings/system-logs"
|
||||
|
||||
export default function SystemLogsPage() {
|
||||
const t = useTranslations("settings.systemLogs")
|
||||
|
||||
return (
|
||||
<div className="flex flex-1 flex-col gap-4 p-4">
|
||||
<div>
|
||||
<h1 className="text-2xl font-bold tracking-tight">{t("title")}</h1>
|
||||
<p className="text-muted-foreground">{t("description")}</p>
|
||||
</div>
|
||||
<div className="flex flex-1 flex-col p-4 h-full">
|
||||
<SystemLogsView />
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -3,10 +3,16 @@
|
||||
import React from "react"
|
||||
import { usePathname, useParams } from "next/navigation"
|
||||
import Link from "next/link"
|
||||
import { Target } from "lucide-react"
|
||||
import { Target, LayoutDashboard, Package, FolderSearch, Image, ShieldAlert, Settings, HelpCircle } from "lucide-react"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip"
|
||||
import { useTarget } from "@/hooks/use-targets"
|
||||
import { useTranslations } from "next-intl"
|
||||
|
||||
@@ -34,6 +40,8 @@ export default function TargetLayout({
|
||||
// Get primary navigation active tab
|
||||
const getPrimaryTab = () => {
|
||||
if (pathname.includes("/overview")) return "overview"
|
||||
if (pathname.includes("/directories")) return "directories"
|
||||
if (pathname.includes("/screenshots")) return "screenshots"
|
||||
if (pathname.includes("/vulnerabilities")) return "vulnerabilities"
|
||||
if (pathname.includes("/settings")) return "settings"
|
||||
// All asset pages fall under "assets"
|
||||
@@ -41,8 +49,7 @@ export default function TargetLayout({
|
||||
pathname.includes("/websites") ||
|
||||
pathname.includes("/subdomain") ||
|
||||
pathname.includes("/ip-addresses") ||
|
||||
pathname.includes("/endpoints") ||
|
||||
pathname.includes("/directories")
|
||||
pathname.includes("/endpoints")
|
||||
) {
|
||||
return "assets"
|
||||
}
|
||||
@@ -55,7 +62,6 @@ export default function TargetLayout({
|
||||
if (pathname.includes("/subdomain")) return "subdomain"
|
||||
if (pathname.includes("/ip-addresses")) return "ip-addresses"
|
||||
if (pathname.includes("/endpoints")) return "endpoints"
|
||||
if (pathname.includes("/directories")) return "directories"
|
||||
return "websites"
|
||||
}
|
||||
|
||||
@@ -67,6 +73,8 @@ export default function TargetLayout({
|
||||
const primaryPaths = {
|
||||
overview: `${basePath}/overview/`,
|
||||
assets: `${basePath}/websites/`, // Default to websites when clicking assets
|
||||
directories: `${basePath}/directories/`,
|
||||
screenshots: `${basePath}/screenshots/`,
|
||||
vulnerabilities: `${basePath}/vulnerabilities/`,
|
||||
settings: `${basePath}/settings/`,
|
||||
}
|
||||
@@ -76,7 +84,6 @@ export default function TargetLayout({
|
||||
subdomain: `${basePath}/subdomain/`,
|
||||
"ip-addresses": `${basePath}/ip-addresses/`,
|
||||
endpoints: `${basePath}/endpoints/`,
|
||||
directories: `${basePath}/directories/`,
|
||||
}
|
||||
|
||||
// Get counts for each tab from target data
|
||||
@@ -87,10 +94,11 @@ export default function TargetLayout({
|
||||
directories: (target as any)?.summary?.directories || 0,
|
||||
vulnerabilities: (target as any)?.summary?.vulnerabilities?.total || 0,
|
||||
"ip-addresses": (target as any)?.summary?.ips || 0,
|
||||
screenshots: (target as any)?.summary?.screenshots || 0,
|
||||
}
|
||||
|
||||
// Calculate total assets count
|
||||
const totalAssets = counts.websites + counts.subdomain + counts["ip-addresses"] + counts.endpoints + counts.directories
|
||||
const totalAssets = counts.websites + counts.subdomain + counts["ip-addresses"] + counts.endpoints
|
||||
|
||||
// Loading state
|
||||
if (isLoading) {
|
||||
@@ -158,41 +166,82 @@ export default function TargetLayout({
|
||||
</div>
|
||||
|
||||
{/* Primary navigation */}
|
||||
<div className="px-4 lg:px-6">
|
||||
<Tabs value={getPrimaryTab()}>
|
||||
<TabsList>
|
||||
<TabsTrigger value="overview" asChild>
|
||||
<Link href={primaryPaths.overview} className="flex items-center gap-0.5">
|
||||
{t("tabs.overview")}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="assets" asChild>
|
||||
<Link href={primaryPaths.assets} className="flex items-center gap-0.5">
|
||||
{t("tabs.assets")}
|
||||
{totalAssets > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{totalAssets}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="vulnerabilities" asChild>
|
||||
<Link href={primaryPaths.vulnerabilities} className="flex items-center gap-0.5">
|
||||
{t("tabs.vulnerabilities")}
|
||||
{counts.vulnerabilities > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.vulnerabilities}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="settings" asChild>
|
||||
<Link href={primaryPaths.settings} className="flex items-center gap-0.5">
|
||||
{t("tabs.settings")}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
</Tabs>
|
||||
<div className="flex items-center justify-between px-4 lg:px-6">
|
||||
<div className="flex items-center gap-3">
|
||||
<Tabs value={getPrimaryTab()}>
|
||||
<TabsList>
|
||||
<TabsTrigger value="overview" asChild>
|
||||
<Link href={primaryPaths.overview} className="flex items-center gap-1.5">
|
||||
<LayoutDashboard className="h-4 w-4" />
|
||||
{t("tabs.overview")}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="assets" asChild>
|
||||
<Link href={primaryPaths.assets} className="flex items-center gap-1.5">
|
||||
<Package className="h-4 w-4" />
|
||||
{t("tabs.assets")}
|
||||
{totalAssets > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{totalAssets}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="directories" asChild>
|
||||
<Link href={primaryPaths.directories} className="flex items-center gap-1.5">
|
||||
<FolderSearch className="h-4 w-4" />
|
||||
{t("tabs.directories")}
|
||||
{counts.directories > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.directories}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="screenshots" asChild>
|
||||
<Link href={primaryPaths.screenshots} className="flex items-center gap-1.5">
|
||||
<Image className="h-4 w-4" />
|
||||
{t("tabs.screenshots")}
|
||||
{counts.screenshots > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.screenshots}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="vulnerabilities" asChild>
|
||||
<Link href={primaryPaths.vulnerabilities} className="flex items-center gap-1.5">
|
||||
<ShieldAlert className="h-4 w-4" />
|
||||
{t("tabs.vulnerabilities")}
|
||||
{counts.vulnerabilities > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.vulnerabilities}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="settings" asChild>
|
||||
<Link href={primaryPaths.settings} className="flex items-center gap-1.5">
|
||||
<Settings className="h-4 w-4" />
|
||||
{t("tabs.settings")}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
</Tabs>
|
||||
|
||||
{getPrimaryTab() === "directories" && (
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<HelpCircle className="h-4 w-4 text-muted-foreground cursor-help" />
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="right" className="max-w-sm">
|
||||
{t("directoriesHelp")}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Secondary navigation (only for assets) */}
|
||||
@@ -202,7 +251,7 @@ export default function TargetLayout({
|
||||
<TabsList variant="underline">
|
||||
<TabsTrigger value="websites" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.websites} className="flex items-center gap-0.5">
|
||||
Websites
|
||||
{t("tabs.websites")}
|
||||
{counts.websites > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.websites}
|
||||
@@ -212,7 +261,7 @@ export default function TargetLayout({
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="subdomain" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.subdomain} className="flex items-center gap-0.5">
|
||||
Subdomains
|
||||
{t("tabs.subdomains")}
|
||||
{counts.subdomain > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.subdomain}
|
||||
@@ -222,7 +271,7 @@ export default function TargetLayout({
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="ip-addresses" variant="underline" asChild>
|
||||
<Link href={secondaryPaths["ip-addresses"]} className="flex items-center gap-0.5">
|
||||
IPs
|
||||
{t("tabs.ips")}
|
||||
{counts["ip-addresses"] > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts["ip-addresses"]}
|
||||
@@ -232,7 +281,7 @@ export default function TargetLayout({
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="endpoints" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.endpoints} className="flex items-center gap-0.5">
|
||||
URLs
|
||||
{t("tabs.urls")}
|
||||
{counts.endpoints > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.endpoints}
|
||||
@@ -240,16 +289,6 @@ export default function TargetLayout({
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="directories" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.directories} className="flex items-center gap-0.5">
|
||||
Directories
|
||||
{counts.directories > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.directories}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
</Tabs>
|
||||
</div>
|
||||
|
||||
15
frontend/app/[locale]/target/[id]/screenshots/page.tsx
Normal file
15
frontend/app/[locale]/target/[id]/screenshots/page.tsx
Normal file
@@ -0,0 +1,15 @@
|
||||
"use client"
|
||||
|
||||
import { useParams } from "next/navigation"
|
||||
import { ScreenshotsGallery } from "@/components/screenshots/screenshots-gallery"
|
||||
|
||||
export default function ScreenshotsPage() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const targetId = Number(id)
|
||||
|
||||
return (
|
||||
<div className="px-4 lg:px-6">
|
||||
<ScreenshotsGallery targetId={targetId} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -1,9 +1,19 @@
|
||||
"use client"
|
||||
|
||||
import { useEffect, useMemo, useState } from "react"
|
||||
import Editor from "@monaco-editor/react"
|
||||
import dynamic from "next/dynamic"
|
||||
import Link from "next/link"
|
||||
import { useParams } from "next/navigation"
|
||||
|
||||
// Dynamic import Monaco Editor to reduce bundle size (~2MB)
|
||||
const Editor = dynamic(() => import("@monaco-editor/react"), {
|
||||
ssr: false,
|
||||
loading: () => (
|
||||
<div className="flex items-center justify-center h-full">
|
||||
<div className="text-sm text-muted-foreground">Loading editor...</div>
|
||||
</div>
|
||||
),
|
||||
})
|
||||
import {
|
||||
ChevronDown,
|
||||
ChevronRight,
|
||||
@@ -160,7 +170,7 @@ export default function NucleiRepoDetailPage() {
|
||||
} else {
|
||||
setEditorValue("")
|
||||
}
|
||||
}, [templateContent?.path])
|
||||
}, [templateContent])
|
||||
|
||||
const toggleFolder = (path: string) => {
|
||||
setExpandedPaths((prev) =>
|
||||
@@ -248,7 +258,7 @@ export default function NucleiRepoDetailPage() {
|
||||
}
|
||||
}}
|
||||
className={cn(
|
||||
"flex w-full items-center gap-1.5 rounded-md px-2 py-1.5 text-left text-sm transition-colors",
|
||||
"tree-node-item flex w-full items-center gap-1.5 rounded-md px-2 py-1.5 text-left text-sm transition-colors",
|
||||
isFolder && "font-medium",
|
||||
isActive
|
||||
? "bg-primary/10 text-primary"
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 25 KiB |
@@ -245,6 +245,12 @@
|
||||
/* Chrome, Safari and Opera */
|
||||
}
|
||||
|
||||
/* 性能优化:长列表渲染优化 - content-visibility */
|
||||
.tree-node-item {
|
||||
content-visibility: auto;
|
||||
contain-intrinsic-size: 0 36px;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* 登录页背景 - 使用主题色适配亮暗模式 */
|
||||
@@ -272,6 +278,20 @@
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
/* 终端光标闪烁动画 */
|
||||
@keyframes blink {
|
||||
0%, 50% {
|
||||
opacity: 1;
|
||||
}
|
||||
51%, 100% {
|
||||
opacity: 0;
|
||||
}
|
||||
}
|
||||
|
||||
.animate-blink {
|
||||
animation: blink 1s step-end infinite;
|
||||
}
|
||||
|
||||
/* 通知铃铛摇晃动画 */
|
||||
@keyframes wiggle {
|
||||
0%, 100% {
|
||||
@@ -367,4 +387,206 @@
|
||||
|
||||
.animate-border-flow {
|
||||
animation: border-flow 2s linear infinite;
|
||||
}
|
||||
}
|
||||
|
||||
/* Dashboard 淡入动画 - 纯 CSS 实现,避免 hydration mismatch */
|
||||
@keyframes dashboard-fade-in {
|
||||
from {
|
||||
opacity: 0;
|
||||
filter: blur(4px);
|
||||
}
|
||||
to {
|
||||
opacity: 1;
|
||||
filter: blur(0);
|
||||
}
|
||||
}
|
||||
|
||||
.animate-dashboard-fade-in {
|
||||
animation: dashboard-fade-in 500ms ease-out forwards;
|
||||
}
|
||||
|
||||
/* 登录页 - Glitch Reveal(全屏开场) - 增强版赛博朋克风格 */
|
||||
@keyframes orbit-splash-jitter {
|
||||
0%,
|
||||
100% {
|
||||
transform: translate3d(0, 0, 0);
|
||||
filter: none;
|
||||
}
|
||||
10% {
|
||||
transform: translate3d(-2px, 0, 0);
|
||||
}
|
||||
20% {
|
||||
transform: translate3d(2px, -1px, 0);
|
||||
filter: hue-rotate(10deg);
|
||||
}
|
||||
30% {
|
||||
transform: translate3d(-1px, 1px, 0);
|
||||
}
|
||||
45% {
|
||||
transform: translate3d(1px, 0, 0);
|
||||
filter: hue-rotate(-10deg);
|
||||
}
|
||||
60% {
|
||||
transform: translate3d(0, -1px, 0);
|
||||
}
|
||||
75% {
|
||||
transform: translate3d(1px, 1px, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes orbit-splash-noise {
|
||||
0% {
|
||||
transform: translate3d(-2%, -2%, 0);
|
||||
opacity: 0.22;
|
||||
}
|
||||
25% {
|
||||
transform: translate3d(2%, -1%, 0);
|
||||
opacity: 0.28;
|
||||
}
|
||||
50% {
|
||||
transform: translate3d(-1%, 2%, 0);
|
||||
opacity: 0.24;
|
||||
}
|
||||
75% {
|
||||
transform: translate3d(1%, 1%, 0);
|
||||
opacity: 0.30;
|
||||
}
|
||||
100% {
|
||||
transform: translate3d(-2%, -2%, 0);
|
||||
opacity: 0.22;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes orbit-splash-sweep {
|
||||
0% {
|
||||
transform: translate3d(0, -120%, 0);
|
||||
opacity: 0;
|
||||
}
|
||||
18% {
|
||||
opacity: 0.35;
|
||||
}
|
||||
100% {
|
||||
transform: translate3d(0, 120%, 0);
|
||||
opacity: 0;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes orbit-glitch-clip {
|
||||
0% {
|
||||
clip-path: inset(0 0 0 0);
|
||||
transform: translate3d(0, 0, 0);
|
||||
}
|
||||
16% {
|
||||
clip-path: inset(12% 0 72% 0);
|
||||
transform: translate3d(-2px, 0, 0);
|
||||
}
|
||||
32% {
|
||||
clip-path: inset(54% 0 18% 0);
|
||||
transform: translate3d(2px, 0, 0);
|
||||
}
|
||||
48% {
|
||||
clip-path: inset(78% 0 6% 0);
|
||||
transform: translate3d(-1px, 0, 0);
|
||||
}
|
||||
64% {
|
||||
clip-path: inset(30% 0 48% 0);
|
||||
transform: translate3d(1px, 0, 0);
|
||||
}
|
||||
80% {
|
||||
clip-path: inset(6% 0 86% 0);
|
||||
transform: translate3d(0, 0, 0);
|
||||
}
|
||||
100% {
|
||||
clip-path: inset(0 0 0 0);
|
||||
transform: translate3d(0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
.orbit-splash-glitch {
|
||||
isolation: isolate;
|
||||
animation: orbit-splash-jitter 0.5s steps(2, end) infinite;
|
||||
}
|
||||
|
||||
.orbit-splash-glitch::before {
|
||||
content: "";
|
||||
position: absolute;
|
||||
inset: -20%;
|
||||
pointer-events: none;
|
||||
z-index: 20;
|
||||
mix-blend-mode: screen;
|
||||
background-image:
|
||||
repeating-linear-gradient(
|
||||
0deg,
|
||||
rgba(255, 255, 255, 0.08) 0px,
|
||||
rgba(255, 255, 255, 0.08) 1px,
|
||||
transparent 1px,
|
||||
transparent 4px
|
||||
),
|
||||
repeating-linear-gradient(
|
||||
90deg,
|
||||
rgba(255, 16, 240, 0.15) 0px,
|
||||
rgba(255, 16, 240, 0.15) 1px,
|
||||
transparent 1px,
|
||||
transparent 84px
|
||||
),
|
||||
repeating-linear-gradient(
|
||||
45deg,
|
||||
rgba(176, 38, 255, 0.08) 0px,
|
||||
rgba(176, 38, 255, 0.08) 1px,
|
||||
transparent 1px,
|
||||
transparent 9px
|
||||
);
|
||||
animation: orbit-splash-noise 0.5s steps(2, end) infinite;
|
||||
}
|
||||
|
||||
.orbit-splash-glitch::after {
|
||||
content: "";
|
||||
position: absolute;
|
||||
inset: 0;
|
||||
pointer-events: none;
|
||||
z-index: 20;
|
||||
background: linear-gradient(
|
||||
180deg,
|
||||
transparent 0%,
|
||||
rgba(255, 16, 240, 0.18) 50%,
|
||||
transparent 100%
|
||||
);
|
||||
opacity: 0;
|
||||
animation: orbit-splash-sweep 0.5s ease-out both;
|
||||
}
|
||||
|
||||
.orbit-glitch-text {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
text-shadow: 0 0 20px rgba(255, 16, 240, 0.4), 0 0 40px rgba(255, 16, 240, 0.2);
|
||||
}
|
||||
|
||||
.orbit-glitch-text::before,
|
||||
.orbit-glitch-text::after {
|
||||
content: attr(data-text);
|
||||
position: absolute;
|
||||
inset: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.orbit-glitch-text::before {
|
||||
color: rgba(255, 16, 240, 0.85);
|
||||
transform: translate3d(-2px, 0, 0);
|
||||
animation: orbit-glitch-clip 0.5s steps(2, end) infinite;
|
||||
}
|
||||
|
||||
.orbit-glitch-text::after {
|
||||
color: rgba(176, 38, 255, 0.75);
|
||||
transform: translate3d(2px, 0, 0);
|
||||
animation: orbit-glitch-clip 0.5s steps(2, end) infinite reverse;
|
||||
}
|
||||
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
.orbit-splash-glitch,
|
||||
.orbit-splash-glitch::before,
|
||||
.orbit-splash-glitch::after,
|
||||
.orbit-glitch-text::before,
|
||||
.orbit-glitch-text::after {
|
||||
animation: none !important;
|
||||
}
|
||||
}
|
||||
|
||||
16
frontend/app/icon.svg
Normal file
16
frontend/app/icon.svg
Normal file
@@ -0,0 +1,16 @@
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width="256"
|
||||
height="256"
|
||||
viewBox="0 0 24 24"
|
||||
fill="none"
|
||||
stroke="#06b6d4"
|
||||
stroke-width="2"
|
||||
stroke-linecap="round"
|
||||
stroke-linejoin="round"
|
||||
>
|
||||
<title>Orbit</title>
|
||||
<path d="M21 12h-8a1 1 0 1 0 -1 1v8a9 9 0 0 0 9 -9" />
|
||||
<path d="M16 9a5 5 0 1 0 -7 7" />
|
||||
<path d="M20.486 9a9 9 0 1 0 -11.482 11.495" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 371 B |
@@ -18,5 +18,9 @@
|
||||
"lib": "@/lib",
|
||||
"hooks": "@/hooks"
|
||||
},
|
||||
"registries": {}
|
||||
"registries": {
|
||||
"@animate-ui": "https://animate-ui.com/r/{name}.json",
|
||||
"@magicui": "https://magicui.design/r/{name}.json",
|
||||
"@react-bits": "https://reactbits.dev/r/{name}.json"
|
||||
}
|
||||
}
|
||||
|
||||
6
frontend/components/FaultyTerminal.css
Normal file
6
frontend/components/FaultyTerminal.css
Normal file
@@ -0,0 +1,6 @@
|
||||
.faulty-terminal-container {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
424
frontend/components/FaultyTerminal.tsx
Normal file
424
frontend/components/FaultyTerminal.tsx
Normal file
@@ -0,0 +1,424 @@
|
||||
import { Renderer, Program, Mesh, Color, Triangle } from 'ogl';
|
||||
import { useEffect, useRef, useMemo, useCallback } from 'react';
|
||||
import './FaultyTerminal.css';
|
||||
|
||||
const vertexShader = `
|
||||
attribute vec2 position;
|
||||
attribute vec2 uv;
|
||||
varying vec2 vUv;
|
||||
void main() {
|
||||
vUv = uv;
|
||||
gl_Position = vec4(position, 0.0, 1.0);
|
||||
}
|
||||
`;
|
||||
|
||||
const fragmentShader = `
|
||||
precision mediump float;
|
||||
|
||||
varying vec2 vUv;
|
||||
|
||||
uniform float iTime;
|
||||
uniform vec3 iResolution;
|
||||
uniform float uScale;
|
||||
|
||||
uniform vec2 uGridMul;
|
||||
uniform float uDigitSize;
|
||||
uniform float uScanlineIntensity;
|
||||
uniform float uGlitchAmount;
|
||||
uniform float uFlickerAmount;
|
||||
uniform float uNoiseAmp;
|
||||
uniform float uChromaticAberration;
|
||||
uniform float uDither;
|
||||
uniform float uCurvature;
|
||||
uniform vec3 uTint;
|
||||
uniform vec2 uMouse;
|
||||
uniform float uMouseStrength;
|
||||
uniform float uUseMouse;
|
||||
uniform float uPageLoadProgress;
|
||||
uniform float uUsePageLoadAnimation;
|
||||
uniform float uBrightness;
|
||||
|
||||
float time;
|
||||
|
||||
float hash21(vec2 p){
|
||||
p = fract(p * 234.56);
|
||||
p += dot(p, p + 34.56);
|
||||
return fract(p.x * p.y);
|
||||
}
|
||||
|
||||
float noise(vec2 p)
|
||||
{
|
||||
return sin(p.x * 10.0) * sin(p.y * (3.0 + sin(time * 0.090909))) + 0.2;
|
||||
}
|
||||
|
||||
mat2 rotate(float angle)
|
||||
{
|
||||
float c = cos(angle);
|
||||
float s = sin(angle);
|
||||
return mat2(c, -s, s, c);
|
||||
}
|
||||
|
||||
float fbm(vec2 p)
|
||||
{
|
||||
p *= 1.1;
|
||||
float f = 0.0;
|
||||
float amp = 0.5 * uNoiseAmp;
|
||||
|
||||
mat2 modify0 = rotate(time * 0.02);
|
||||
f += amp * noise(p);
|
||||
p = modify0 * p * 2.0;
|
||||
amp *= 0.454545;
|
||||
|
||||
mat2 modify1 = rotate(time * 0.02);
|
||||
f += amp * noise(p);
|
||||
p = modify1 * p * 2.0;
|
||||
amp *= 0.454545;
|
||||
|
||||
mat2 modify2 = rotate(time * 0.08);
|
||||
f += amp * noise(p);
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
float pattern(vec2 p, out vec2 q, out vec2 r) {
|
||||
vec2 offset1 = vec2(1.0);
|
||||
vec2 offset0 = vec2(0.0);
|
||||
mat2 rot01 = rotate(0.1 * time);
|
||||
mat2 rot1 = rotate(0.1);
|
||||
|
||||
q = vec2(fbm(p + offset1), fbm(rot01 * p + offset1));
|
||||
r = vec2(fbm(rot1 * q + offset0), fbm(q + offset0));
|
||||
return fbm(p + r);
|
||||
}
|
||||
|
||||
float digit(vec2 p){
|
||||
vec2 grid = uGridMul * 15.0;
|
||||
vec2 s = floor(p * grid) / grid;
|
||||
p = p * grid;
|
||||
vec2 q, r;
|
||||
float intensity = pattern(s * 0.1, q, r) * 1.3 - 0.03;
|
||||
|
||||
if(uUseMouse > 0.5){
|
||||
vec2 mouseWorld = uMouse * uScale;
|
||||
float distToMouse = distance(s, mouseWorld);
|
||||
float mouseInfluence = exp(-distToMouse * 8.0) * uMouseStrength * 10.0;
|
||||
intensity += mouseInfluence;
|
||||
|
||||
float ripple = sin(distToMouse * 20.0 - iTime * 5.0) * 0.1 * mouseInfluence;
|
||||
intensity += ripple;
|
||||
}
|
||||
|
||||
if(uUsePageLoadAnimation > 0.5){
|
||||
float cellRandom = fract(sin(dot(s, vec2(12.9898, 78.233))) * 43758.5453);
|
||||
float cellDelay = cellRandom * 0.8;
|
||||
float cellProgress = clamp((uPageLoadProgress - cellDelay) / 0.2, 0.0, 1.0);
|
||||
|
||||
float fadeAlpha = smoothstep(0.0, 1.0, cellProgress);
|
||||
intensity *= fadeAlpha;
|
||||
}
|
||||
|
||||
p = fract(p);
|
||||
p *= uDigitSize;
|
||||
|
||||
float px5 = p.x * 5.0;
|
||||
float py5 = (1.0 - p.y) * 5.0;
|
||||
float x = fract(px5);
|
||||
float y = fract(py5);
|
||||
|
||||
float i = floor(py5) - 2.0;
|
||||
float j = floor(px5) - 2.0;
|
||||
float n = i * i + j * j;
|
||||
float f = n * 0.0625;
|
||||
|
||||
float isOn = step(0.1, intensity - f);
|
||||
float brightness = isOn * (0.2 + y * 0.8) * (0.75 + x * 0.25);
|
||||
|
||||
return step(0.0, p.x) * step(p.x, 1.0) * step(0.0, p.y) * step(p.y, 1.0) * brightness;
|
||||
}
|
||||
|
||||
float onOff(float a, float b, float c)
|
||||
{
|
||||
return step(c, sin(iTime + a * cos(iTime * b))) * uFlickerAmount;
|
||||
}
|
||||
|
||||
float displace(vec2 look)
|
||||
{
|
||||
float y = look.y - mod(iTime * 0.25, 1.0);
|
||||
float window = 1.0 / (1.0 + 50.0 * y * y);
|
||||
return sin(look.y * 20.0 + iTime) * 0.0125 * onOff(4.0, 2.0, 0.8) * (1.0 + cos(iTime * 60.0)) * window;
|
||||
}
|
||||
|
||||
vec3 getColor(vec2 p){
|
||||
|
||||
float bar = step(mod(p.y + time * 20.0, 1.0), 0.2) * 0.4 + 1.0;
|
||||
bar *= uScanlineIntensity;
|
||||
|
||||
float displacement = displace(p);
|
||||
p.x += displacement;
|
||||
|
||||
if (uGlitchAmount != 1.0) {
|
||||
float extra = displacement * (uGlitchAmount - 1.0);
|
||||
p.x += extra;
|
||||
}
|
||||
|
||||
float middle = digit(p);
|
||||
|
||||
const float off = 0.002;
|
||||
float sum = digit(p + vec2(-off, -off)) + digit(p + vec2(0.0, -off)) + digit(p + vec2(off, -off)) +
|
||||
digit(p + vec2(-off, 0.0)) + digit(p + vec2(0.0, 0.0)) + digit(p + vec2(off, 0.0)) +
|
||||
digit(p + vec2(-off, off)) + digit(p + vec2(0.0, off)) + digit(p + vec2(off, off));
|
||||
|
||||
vec3 baseColor = vec3(0.9) * middle + sum * 0.1 * vec3(1.0) * bar;
|
||||
return baseColor;
|
||||
}
|
||||
|
||||
vec2 barrel(vec2 uv){
|
||||
vec2 c = uv * 2.0 - 1.0;
|
||||
float r2 = dot(c, c);
|
||||
c *= 1.0 + uCurvature * r2;
|
||||
return c * 0.5 + 0.5;
|
||||
}
|
||||
|
||||
void main() {
|
||||
time = iTime * 0.333333;
|
||||
vec2 uv = vUv;
|
||||
|
||||
if(uCurvature != 0.0){
|
||||
uv = barrel(uv);
|
||||
}
|
||||
|
||||
vec2 p = uv * uScale;
|
||||
vec3 col = getColor(p);
|
||||
|
||||
if(uChromaticAberration != 0.0){
|
||||
vec2 ca = vec2(uChromaticAberration) / iResolution.xy;
|
||||
col.r = getColor(p + ca).r;
|
||||
col.b = getColor(p - ca).b;
|
||||
}
|
||||
|
||||
col *= uTint;
|
||||
col *= uBrightness;
|
||||
|
||||
if(uDither > 0.0){
|
||||
float rnd = hash21(gl_FragCoord.xy);
|
||||
col += (rnd - 0.5) * (uDither * 0.003922);
|
||||
}
|
||||
|
||||
gl_FragColor = vec4(col, 1.0);
|
||||
}
|
||||
`;
|
||||
|
||||
function hexToRgb(hex: string) {
|
||||
let h = hex.replace('#', '').trim();
|
||||
if (h.length === 3)
|
||||
h = h
|
||||
.split('')
|
||||
.map(c => c + c)
|
||||
.join('');
|
||||
const num = parseInt(h, 16);
|
||||
return [((num >> 16) & 255) / 255, ((num >> 8) & 255) / 255, (num & 255) / 255];
|
||||
}
|
||||
|
||||
interface FaultyTerminalProps {
|
||||
scale?: number;
|
||||
gridMul?: [number, number];
|
||||
digitSize?: number;
|
||||
timeScale?: number;
|
||||
pause?: boolean;
|
||||
scanlineIntensity?: number;
|
||||
glitchAmount?: number;
|
||||
flickerAmount?: number;
|
||||
noiseAmp?: number;
|
||||
chromaticAberration?: number;
|
||||
dither?: number;
|
||||
curvature?: number;
|
||||
tint?: string;
|
||||
mouseReact?: boolean;
|
||||
mouseStrength?: number;
|
||||
dpr?: number;
|
||||
pageLoadAnimation?: boolean;
|
||||
brightness?: number;
|
||||
className?: string;
|
||||
style?: React.CSSProperties;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
export default function FaultyTerminal({
|
||||
scale = 1,
|
||||
gridMul = [2, 1],
|
||||
digitSize = 1.5,
|
||||
timeScale = 0.3,
|
||||
pause = false,
|
||||
scanlineIntensity = 0.3,
|
||||
glitchAmount = 1,
|
||||
flickerAmount = 1,
|
||||
noiseAmp = 0,
|
||||
chromaticAberration = 0,
|
||||
dither = 0,
|
||||
curvature = 0.2,
|
||||
tint = '#ffffff',
|
||||
mouseReact = true,
|
||||
mouseStrength = 0.2,
|
||||
dpr = Math.min(window.devicePixelRatio || 1, 2),
|
||||
pageLoadAnimation = true,
|
||||
brightness = 1,
|
||||
className,
|
||||
style,
|
||||
...rest
|
||||
}: FaultyTerminalProps) {
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
const programRef = useRef<any>(null);
|
||||
const rendererRef = useRef<any>(null);
|
||||
const mouseRef = useRef({ x: 0.5, y: 0.5 });
|
||||
const smoothMouseRef = useRef({ x: 0.5, y: 0.5 });
|
||||
const frozenTimeRef = useRef(0);
|
||||
const rafRef = useRef(0);
|
||||
const loadAnimationStartRef = useRef(0);
|
||||
const timeOffsetRef = useRef(Math.random() * 100);
|
||||
|
||||
const tintVec = useMemo(() => hexToRgb(tint), [tint]);
|
||||
|
||||
const ditherValue = useMemo(() => (typeof dither === 'boolean' ? (dither ? 1 : 0) : dither), [dither]);
|
||||
|
||||
const handleMouseMove = useCallback((e: MouseEvent) => {
|
||||
const ctn = containerRef.current;
|
||||
if (!ctn) return;
|
||||
const rect = ctn.getBoundingClientRect();
|
||||
const x = (e.clientX - rect.left) / rect.width;
|
||||
const y = 1 - (e.clientY - rect.top) / rect.height;
|
||||
mouseRef.current = { x, y };
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
const ctn = containerRef.current;
|
||||
if (!ctn) return;
|
||||
|
||||
const renderer = new Renderer({ dpr });
|
||||
rendererRef.current = renderer;
|
||||
const gl = renderer.gl;
|
||||
gl.clearColor(0, 0, 0, 1);
|
||||
|
||||
const geometry = new Triangle(gl);
|
||||
|
||||
const program = new Program(gl, {
|
||||
vertex: vertexShader,
|
||||
fragment: fragmentShader,
|
||||
uniforms: {
|
||||
iTime: { value: 0 },
|
||||
iResolution: {
|
||||
value: new Color(gl.canvas.width, gl.canvas.height, gl.canvas.width / gl.canvas.height)
|
||||
},
|
||||
uScale: { value: scale },
|
||||
|
||||
uGridMul: { value: new Float32Array(gridMul) },
|
||||
uDigitSize: { value: digitSize },
|
||||
uScanlineIntensity: { value: scanlineIntensity },
|
||||
uGlitchAmount: { value: glitchAmount },
|
||||
uFlickerAmount: { value: flickerAmount },
|
||||
uNoiseAmp: { value: noiseAmp },
|
||||
uChromaticAberration: { value: chromaticAberration },
|
||||
uDither: { value: ditherValue },
|
||||
uCurvature: { value: curvature },
|
||||
uTint: { value: new Color(tintVec[0], tintVec[1], tintVec[2]) },
|
||||
uMouse: {
|
||||
value: new Float32Array([smoothMouseRef.current.x, smoothMouseRef.current.y])
|
||||
},
|
||||
uMouseStrength: { value: mouseStrength },
|
||||
uUseMouse: { value: mouseReact ? 1 : 0 },
|
||||
uPageLoadProgress: { value: pageLoadAnimation ? 0 : 1 },
|
||||
uUsePageLoadAnimation: { value: pageLoadAnimation ? 1 : 0 },
|
||||
uBrightness: { value: brightness }
|
||||
}
|
||||
});
|
||||
programRef.current = program;
|
||||
|
||||
const mesh = new Mesh(gl, { geometry, program });
|
||||
|
||||
function resize() {
|
||||
if (!ctn || !renderer) return;
|
||||
renderer.setSize(ctn.offsetWidth, ctn.offsetHeight);
|
||||
program.uniforms.iResolution.value = new Color(
|
||||
gl.canvas.width,
|
||||
gl.canvas.height,
|
||||
gl.canvas.width / gl.canvas.height
|
||||
);
|
||||
}
|
||||
|
||||
const resizeObserver = new ResizeObserver(() => resize());
|
||||
resizeObserver.observe(ctn);
|
||||
resize();
|
||||
|
||||
const update = (t: number) => {
|
||||
rafRef.current = requestAnimationFrame(update);
|
||||
|
||||
if (pageLoadAnimation && loadAnimationStartRef.current === 0) {
|
||||
loadAnimationStartRef.current = t;
|
||||
}
|
||||
|
||||
if (!pause) {
|
||||
const elapsed = (t * 0.001 + timeOffsetRef.current) * timeScale;
|
||||
program.uniforms.iTime.value = elapsed;
|
||||
frozenTimeRef.current = elapsed;
|
||||
} else {
|
||||
program.uniforms.iTime.value = frozenTimeRef.current;
|
||||
}
|
||||
|
||||
if (pageLoadAnimation && loadAnimationStartRef.current > 0) {
|
||||
const animationDuration = 2000;
|
||||
const animationElapsed = t - loadAnimationStartRef.current;
|
||||
const progress = Math.min(animationElapsed / animationDuration, 1);
|
||||
program.uniforms.uPageLoadProgress.value = progress;
|
||||
}
|
||||
|
||||
if (mouseReact) {
|
||||
const dampingFactor = 0.08;
|
||||
const smoothMouse = smoothMouseRef.current;
|
||||
const mouse = mouseRef.current;
|
||||
smoothMouse.x += (mouse.x - smoothMouse.x) * dampingFactor;
|
||||
smoothMouse.y += (mouse.y - smoothMouse.y) * dampingFactor;
|
||||
|
||||
const mouseUniform = program.uniforms.uMouse.value;
|
||||
mouseUniform[0] = smoothMouse.x;
|
||||
mouseUniform[1] = smoothMouse.y;
|
||||
}
|
||||
|
||||
renderer.render({ scene: mesh });
|
||||
};
|
||||
rafRef.current = requestAnimationFrame(update);
|
||||
ctn.appendChild(gl.canvas);
|
||||
|
||||
if (mouseReact) window.addEventListener('mousemove', handleMouseMove);
|
||||
|
||||
return () => {
|
||||
cancelAnimationFrame(rafRef.current);
|
||||
resizeObserver.disconnect();
|
||||
if (mouseReact) window.removeEventListener('mousemove', handleMouseMove);
|
||||
if (gl.canvas.parentElement === ctn) ctn.removeChild(gl.canvas);
|
||||
gl.getExtension('WEBGL_lose_context')?.loseContext();
|
||||
loadAnimationStartRef.current = 0;
|
||||
timeOffsetRef.current = Math.random() * 100;
|
||||
};
|
||||
}, [
|
||||
dpr,
|
||||
pause,
|
||||
timeScale,
|
||||
scale,
|
||||
gridMul,
|
||||
digitSize,
|
||||
scanlineIntensity,
|
||||
glitchAmount,
|
||||
flickerAmount,
|
||||
noiseAmp,
|
||||
chromaticAberration,
|
||||
ditherValue,
|
||||
curvature,
|
||||
tintVec,
|
||||
mouseReact,
|
||||
mouseStrength,
|
||||
pageLoadAnimation,
|
||||
brightness,
|
||||
handleMouseMove
|
||||
]);
|
||||
|
||||
return <div ref={containerRef} className={`faulty-terminal-container ${className}`} style={style} {...rest} />;
|
||||
}
|
||||
6
frontend/components/PixelBlast.css
Normal file
6
frontend/components/PixelBlast.css
Normal file
@@ -0,0 +1,6 @@
|
||||
.pixel-blast-container {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
782
frontend/components/PixelBlast.tsx
Normal file
782
frontend/components/PixelBlast.tsx
Normal file
@@ -0,0 +1,782 @@
|
||||
import { useEffect, useRef, useState, useMemo } from 'react';
|
||||
import * as THREE from 'three';
|
||||
import { EffectComposer, EffectPass, RenderPass, Effect } from 'postprocessing';
|
||||
import './PixelBlast.css';
|
||||
|
||||
const createTouchTexture = () => {
|
||||
const size = 64;
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = size;
|
||||
canvas.height = size;
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) throw new Error('2D context not available');
|
||||
ctx.fillStyle = 'black';
|
||||
ctx.fillRect(0, 0, canvas.width, canvas.height);
|
||||
const texture = new THREE.Texture(canvas);
|
||||
texture.minFilter = THREE.LinearFilter;
|
||||
texture.magFilter = THREE.LinearFilter;
|
||||
texture.generateMipmaps = false;
|
||||
const trail: any[] = [];
|
||||
let last: any = null;
|
||||
const maxAge = 64;
|
||||
let radius = 0.1 * size;
|
||||
const speed = 1 / maxAge;
|
||||
const clear = () => {
|
||||
ctx.fillStyle = 'black';
|
||||
ctx.fillRect(0, 0, canvas.width, canvas.height);
|
||||
};
|
||||
const drawPoint = (p: any) => {
|
||||
const pos = { x: p.x * size, y: (1 - p.y) * size };
|
||||
let intensity = 1;
|
||||
const easeOutSine = (t: number) => Math.sin((t * Math.PI) / 2);
|
||||
const easeOutQuad = (t: number) => -t * (t - 2);
|
||||
if (p.age < maxAge * 0.3) intensity = easeOutSine(p.age / (maxAge * 0.3));
|
||||
else intensity = easeOutQuad(1 - (p.age - maxAge * 0.3) / (maxAge * 0.7)) || 0;
|
||||
intensity *= p.force;
|
||||
const color = `${((p.vx + 1) / 2) * 255}, ${((p.vy + 1) / 2) * 255}, ${intensity * 255}`;
|
||||
const offset = size * 5;
|
||||
ctx.shadowOffsetX = offset;
|
||||
ctx.shadowOffsetY = offset;
|
||||
ctx.shadowBlur = radius;
|
||||
ctx.shadowColor = `rgba(${color},${0.22 * intensity})`;
|
||||
ctx.beginPath();
|
||||
ctx.fillStyle = 'rgba(255,0,0,1)';
|
||||
ctx.arc(pos.x - offset, pos.y - offset, radius, 0, Math.PI * 2);
|
||||
ctx.fill();
|
||||
};
|
||||
const addTouch = (norm: any) => {
|
||||
let force = 0;
|
||||
let vx = 0;
|
||||
let vy = 0;
|
||||
if (last) {
|
||||
const dx = norm.x - last.x;
|
||||
const dy = norm.y - last.y;
|
||||
if (dx === 0 && dy === 0) return;
|
||||
const dd = dx * dx + dy * dy;
|
||||
const d = Math.sqrt(dd);
|
||||
vx = dx / (d || 1);
|
||||
vy = dy / (d || 1);
|
||||
force = Math.min(dd * 10000, 1);
|
||||
}
|
||||
last = { x: norm.x, y: norm.y };
|
||||
trail.push({ x: norm.x, y: norm.y, age: 0, force, vx, vy });
|
||||
};
|
||||
const update = () => {
|
||||
clear();
|
||||
for (let i = trail.length - 1; i >= 0; i--) {
|
||||
const point = trail[i];
|
||||
const f = point.force * speed * (1 - point.age / maxAge);
|
||||
point.x += point.vx * f;
|
||||
point.y += point.vy * f;
|
||||
point.age++;
|
||||
if (point.age > maxAge) trail.splice(i, 1);
|
||||
}
|
||||
for (let i = 0; i < trail.length; i++) drawPoint(trail[i]);
|
||||
texture.needsUpdate = true;
|
||||
};
|
||||
return {
|
||||
canvas,
|
||||
texture,
|
||||
addTouch,
|
||||
update,
|
||||
set radiusScale(v) {
|
||||
radius = 0.1 * size * v;
|
||||
},
|
||||
get radiusScale() {
|
||||
return radius / (0.1 * size);
|
||||
},
|
||||
size
|
||||
};
|
||||
};
|
||||
|
||||
const createLiquidEffect = (texture: any, opts: any) => {
|
||||
const fragment = `
|
||||
uniform sampler2D uTexture;
|
||||
uniform float uStrength;
|
||||
uniform float uTime;
|
||||
uniform float uFreq;
|
||||
|
||||
void mainUv(inout vec2 uv) {
|
||||
vec4 tex = texture2D(uTexture, uv);
|
||||
float vx = tex.r * 2.0 - 1.0;
|
||||
float vy = tex.g * 2.0 - 1.0;
|
||||
float intensity = tex.b;
|
||||
|
||||
float wave = 0.5 + 0.5 * sin(uTime * uFreq + intensity * 6.2831853);
|
||||
|
||||
float amt = uStrength * intensity * wave;
|
||||
|
||||
uv += vec2(vx, vy) * amt;
|
||||
}
|
||||
`;
|
||||
return new Effect('LiquidEffect', fragment, {
|
||||
uniforms: new Map([
|
||||
['uTexture', new THREE.Uniform(texture)],
|
||||
['uStrength', new THREE.Uniform(opts?.strength ?? 0.025)],
|
||||
['uTime', new THREE.Uniform(0)],
|
||||
['uFreq', new THREE.Uniform(opts?.freq ?? 4.5)]
|
||||
])
|
||||
});
|
||||
};
|
||||
|
||||
const SHAPE_MAP = {
|
||||
square: 0,
|
||||
circle: 1,
|
||||
triangle: 2,
|
||||
diamond: 3
|
||||
};
|
||||
|
||||
const VERTEX_SRC = `
|
||||
void main() {
|
||||
gl_Position = vec4(position, 1.0);
|
||||
}
|
||||
`;
|
||||
|
||||
const FRAGMENT_SRC = `
|
||||
precision highp float;
|
||||
|
||||
uniform vec3 uColor;
|
||||
uniform vec2 uResolution;
|
||||
uniform float uTime;
|
||||
uniform float uPixelSize;
|
||||
uniform float uScale;
|
||||
uniform float uDensity;
|
||||
uniform float uPixelJitter;
|
||||
uniform int uEnableRipples;
|
||||
uniform float uRippleSpeed;
|
||||
uniform float uRippleThickness;
|
||||
uniform float uRippleIntensity;
|
||||
uniform float uEdgeFade;
|
||||
|
||||
uniform int uShapeType;
|
||||
const int SHAPE_SQUARE = 0;
|
||||
const int SHAPE_CIRCLE = 1;
|
||||
const int SHAPE_TRIANGLE = 2;
|
||||
const int SHAPE_DIAMOND = 3;
|
||||
|
||||
const int MAX_CLICKS = 10;
|
||||
|
||||
uniform vec2 uClickPos [MAX_CLICKS];
|
||||
uniform float uClickTimes[MAX_CLICKS];
|
||||
|
||||
out vec4 fragColor;
|
||||
|
||||
float Bayer2(vec2 a) {
|
||||
a = floor(a);
|
||||
return fract(a.x / 2. + a.y * a.y * .75);
|
||||
}
|
||||
#define Bayer4(a) (Bayer2(.5*(a))*0.25 + Bayer2(a))
|
||||
#define Bayer8(a) (Bayer4(.5*(a))*0.25 + Bayer2(a))
|
||||
|
||||
#define FBM_OCTAVES 2
|
||||
#define FBM_LACUNARITY 1.25
|
||||
#define FBM_GAIN 1.0
|
||||
|
||||
float hash11(float n){ return fract(sin(n)*43758.5453); }
|
||||
|
||||
float vnoise(vec3 p){
|
||||
vec3 ip = floor(p);
|
||||
vec3 fp = fract(p);
|
||||
float n000 = hash11(dot(ip + vec3(0.0,0.0,0.0), vec3(1.0,57.0,113.0)));
|
||||
float n100 = hash11(dot(ip + vec3(1.0,0.0,0.0), vec3(1.0,57.0,113.0)));
|
||||
float n010 = hash11(dot(ip + vec3(0.0,1.0,0.0), vec3(1.0,57.0,113.0)));
|
||||
float n110 = hash11(dot(ip + vec3(1.0,1.0,0.0), vec3(1.0,57.0,113.0)));
|
||||
float n001 = hash11(dot(ip + vec3(0.0,0.0,1.0), vec3(1.0,57.0,113.0)));
|
||||
float n101 = hash11(dot(ip + vec3(1.0,0.0,1.0), vec3(1.0,57.0,113.0)));
|
||||
float n011 = hash11(dot(ip + vec3(0.0,1.0,1.0), vec3(1.0,57.0,113.0)));
|
||||
float n111 = hash11(dot(ip + vec3(1.0,1.0,1.0), vec3(1.0,57.0,113.0)));
|
||||
vec3 w = fp*fp*fp*(fp*(fp*6.0-15.0)+10.0);
|
||||
float x00 = mix(n000, n100, w.x);
|
||||
float x10 = mix(n010, n110, w.x);
|
||||
float x01 = mix(n001, n101, w.x);
|
||||
float x11 = mix(n011, n111, w.x);
|
||||
float y0 = mix(x00, x10, w.y);
|
||||
float y1 = mix(x01, x11, w.y);
|
||||
return mix(y0, y1, w.z) * 2.0 - 1.0;
|
||||
}
|
||||
|
||||
float fbm2(vec2 uv, float t){
|
||||
vec3 p = vec3(uv * uScale, t);
|
||||
float amp = 1.0;
|
||||
float freq = 1.0;
|
||||
float sum = 1.0;
|
||||
for (int i = 0; i < FBM_OCTAVES; ++i){
|
||||
sum += amp * vnoise(p * freq);
|
||||
freq *= FBM_LACUNARITY;
|
||||
amp *= FBM_GAIN;
|
||||
}
|
||||
return sum * 0.5 + 0.5;
|
||||
}
|
||||
|
||||
float maskCircle(vec2 p, float cov){
|
||||
float r = sqrt(cov) * .25;
|
||||
float d = length(p - 0.5) - r;
|
||||
float aa = 0.5 * fwidth(d);
|
||||
return cov * (1.0 - smoothstep(-aa, aa, d * 2.0));
|
||||
}
|
||||
|
||||
float maskTriangle(vec2 p, vec2 id, float cov){
|
||||
bool flip = mod(id.x + id.y, 2.0) > 0.5;
|
||||
if (flip) p.x = 1.0 - p.x;
|
||||
float r = sqrt(cov);
|
||||
float d = p.y - r*(1.0 - p.x);
|
||||
float aa = fwidth(d);
|
||||
return cov * clamp(0.5 - d/aa, 0.0, 1.0);
|
||||
}
|
||||
|
||||
float maskDiamond(vec2 p, float cov){
|
||||
float r = sqrt(cov) * 0.564;
|
||||
return step(abs(p.x - 0.49) + abs(p.y - 0.49), r);
|
||||
}
|
||||
|
||||
void main(){
|
||||
float pixelSize = uPixelSize;
|
||||
vec2 fragCoord = gl_FragCoord.xy - uResolution * .5;
|
||||
float aspectRatio = uResolution.x / uResolution.y;
|
||||
|
||||
vec2 pixelId = floor(fragCoord / pixelSize);
|
||||
vec2 pixelUV = fract(fragCoord / pixelSize);
|
||||
|
||||
float cellPixelSize = 8.0 * pixelSize;
|
||||
vec2 cellId = floor(fragCoord / cellPixelSize);
|
||||
vec2 cellCoord = cellId * cellPixelSize;
|
||||
vec2 uv = cellCoord / uResolution * vec2(aspectRatio, 1.0);
|
||||
|
||||
float base = fbm2(uv, uTime * 0.05);
|
||||
base = base * 0.5 - 0.65;
|
||||
|
||||
float feed = base + (uDensity - 0.5) * 0.3;
|
||||
|
||||
float speed = uRippleSpeed;
|
||||
float thickness = uRippleThickness;
|
||||
const float dampT = 1.0;
|
||||
const float dampR = 10.0;
|
||||
|
||||
if (uEnableRipples == 1) {
|
||||
for (int i = 0; i < MAX_CLICKS; ++i){
|
||||
vec2 pos = uClickPos[i];
|
||||
if (pos.x < 0.0) continue;
|
||||
float cellPixelSize = 8.0 * pixelSize;
|
||||
vec2 cuv = (((pos - uResolution * .5 - cellPixelSize * .5) / (uResolution))) * vec2(aspectRatio, 1.0);
|
||||
float t = max(uTime - uClickTimes[i], 0.0);
|
||||
float r = distance(uv, cuv);
|
||||
float waveR = speed * t;
|
||||
float ring = exp(-pow((r - waveR) / thickness, 2.0));
|
||||
float atten = exp(-dampT * t) * exp(-dampR * r);
|
||||
feed = max(feed, ring * atten * uRippleIntensity);
|
||||
}
|
||||
}
|
||||
|
||||
float bayer = Bayer8(fragCoord / uPixelSize) - 0.5;
|
||||
float bw = step(0.5, feed + bayer);
|
||||
|
||||
float h = fract(sin(dot(floor(fragCoord / uPixelSize), vec2(127.1, 311.7))) * 43758.5453);
|
||||
float jitterScale = 1.0 + (h - 0.5) * uPixelJitter;
|
||||
float coverage = bw * jitterScale;
|
||||
float M;
|
||||
if (uShapeType == SHAPE_CIRCLE) M = maskCircle (pixelUV, coverage);
|
||||
else if (uShapeType == SHAPE_TRIANGLE) M = maskTriangle(pixelUV, pixelId, coverage);
|
||||
else if (uShapeType == SHAPE_DIAMOND) M = maskDiamond(pixelUV, coverage);
|
||||
else M = coverage;
|
||||
|
||||
if (uEdgeFade > 0.0) {
|
||||
vec2 norm = gl_FragCoord.xy / uResolution;
|
||||
float edge = min(min(norm.x, norm.y), min(1.0 - norm.x, 1.0 - norm.y));
|
||||
float fade = smoothstep(0.0, uEdgeFade, edge);
|
||||
M *= fade;
|
||||
}
|
||||
|
||||
vec3 color = uColor;
|
||||
|
||||
// sRGB gamma correction - convert linear to sRGB for accurate color output
|
||||
vec3 srgbColor = mix(
|
||||
color * 12.92,
|
||||
1.055 * pow(color, vec3(1.0 / 2.4)) - 0.055,
|
||||
step(0.0031308, color)
|
||||
);
|
||||
|
||||
fragColor = vec4(srgbColor, M);
|
||||
}
|
||||
`;
|
||||
|
||||
const MAX_CLICKS = 10;
|
||||
|
||||
interface PixelBlastProps {
|
||||
variant?: string;
|
||||
pixelSize?: number;
|
||||
color?: string;
|
||||
className?: string;
|
||||
style?: React.CSSProperties;
|
||||
antialias?: boolean;
|
||||
patternScale?: number;
|
||||
patternDensity?: number;
|
||||
liquid?: boolean;
|
||||
liquidStrength?: number;
|
||||
liquidRadius?: number;
|
||||
pixelSizeJitter?: number;
|
||||
enableRipples?: boolean;
|
||||
rippleIntensityScale?: number;
|
||||
rippleThickness?: number;
|
||||
rippleSpeed?: number;
|
||||
liquidWobbleSpeed?: number;
|
||||
autoPauseOffscreen?: boolean;
|
||||
speed?: number;
|
||||
transparent?: boolean;
|
||||
edgeFade?: number;
|
||||
noiseAmount?: number;
|
||||
respectReducedMotion?: boolean;
|
||||
maxPixelRatio?: number;
|
||||
onFirstFrame?: () => void;
|
||||
}
|
||||
|
||||
const PixelBlast = ({
|
||||
variant = 'square',
|
||||
pixelSize = 3,
|
||||
color = '#B19EEF',
|
||||
className,
|
||||
style,
|
||||
antialias = true,
|
||||
patternScale = 2,
|
||||
patternDensity = 1,
|
||||
liquid = false,
|
||||
liquidStrength = 0.1,
|
||||
liquidRadius = 1,
|
||||
pixelSizeJitter = 0,
|
||||
enableRipples = true,
|
||||
rippleIntensityScale = 1,
|
||||
rippleThickness = 0.1,
|
||||
rippleSpeed = 0.3,
|
||||
liquidWobbleSpeed = 4.5,
|
||||
autoPauseOffscreen = true,
|
||||
speed = 0.5,
|
||||
transparent = true,
|
||||
edgeFade = 0.5,
|
||||
noiseAmount = 0,
|
||||
respectReducedMotion = true,
|
||||
maxPixelRatio = 2,
|
||||
onFirstFrame
|
||||
}: PixelBlastProps) => {
|
||||
const containerRef = useRef(null);
|
||||
const visibilityRef = useRef({ visible: true });
|
||||
const speedRef = useRef(speed);
|
||||
const threeRef = useRef<any>(null);
|
||||
const prevConfigRef = useRef<any>(null);
|
||||
const [prefersReducedMotion, setPrefersReducedMotion] = useState(false);
|
||||
|
||||
const onFirstFrameRef = useRef<PixelBlastProps['onFirstFrame']>(onFirstFrame);
|
||||
onFirstFrameRef.current = onFirstFrame;
|
||||
|
||||
const firstFrameFiredRef = useRef(false);
|
||||
|
||||
// Limit pixel ratio for performance (lower on mobile)
|
||||
const effectivePixelRatio = useMemo(() => {
|
||||
if (typeof window === 'undefined') return 1;
|
||||
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent);
|
||||
const dpr = window.devicePixelRatio || 1;
|
||||
if (isMobile) return Math.min(dpr, 1.5, maxPixelRatio);
|
||||
return Math.min(dpr, maxPixelRatio);
|
||||
}, [maxPixelRatio]);
|
||||
|
||||
// Check for prefers-reduced-motion
|
||||
useEffect(() => {
|
||||
if (!respectReducedMotion) return;
|
||||
const mq = window.matchMedia('(prefers-reduced-motion: reduce)');
|
||||
setPrefersReducedMotion(mq.matches);
|
||||
const handler = (e: MediaQueryListEvent) => setPrefersReducedMotion(e.matches);
|
||||
mq.addEventListener('change', handler);
|
||||
return () => mq.removeEventListener('change', handler);
|
||||
}, [respectReducedMotion]);
|
||||
|
||||
// If WebGL rendering is disabled (e.g. reduced motion), still signal readiness so
|
||||
// callers don't wait forever.
|
||||
useEffect(() => {
|
||||
if (!prefersReducedMotion) return;
|
||||
if (firstFrameFiredRef.current) return;
|
||||
firstFrameFiredRef.current = true;
|
||||
onFirstFrameRef.current?.();
|
||||
}, [prefersReducedMotion]);
|
||||
|
||||
// Pause animation when page is not visible or element is offscreen
|
||||
useEffect(() => {
|
||||
if (!autoPauseOffscreen || prefersReducedMotion) return;
|
||||
|
||||
const container = containerRef.current;
|
||||
if (!container) return;
|
||||
|
||||
// IntersectionObserver for offscreen detection
|
||||
const io = new IntersectionObserver(
|
||||
([entry]) => {
|
||||
visibilityRef.current.visible = entry.isIntersecting;
|
||||
},
|
||||
{ threshold: 0 }
|
||||
);
|
||||
io.observe(container);
|
||||
|
||||
// Page Visibility API
|
||||
const handleVisibility = () => {
|
||||
if (document.hidden) {
|
||||
visibilityRef.current.visible = false;
|
||||
}
|
||||
};
|
||||
document.addEventListener('visibilitychange', handleVisibility);
|
||||
|
||||
return () => {
|
||||
io.disconnect();
|
||||
document.removeEventListener('visibilitychange', handleVisibility);
|
||||
};
|
||||
}, [autoPauseOffscreen, prefersReducedMotion]);
|
||||
|
||||
// Main WebGL setup effect
|
||||
useEffect(() => {
|
||||
// Skip WebGL setup if user prefers reduced motion
|
||||
if (prefersReducedMotion) return;
|
||||
|
||||
const container = containerRef.current;
|
||||
if (!container) return;
|
||||
speedRef.current = speed;
|
||||
const needsReinitKeys = ['antialias', 'liquid', 'noiseAmount'];
|
||||
const cfg = { antialias, liquid, noiseAmount };
|
||||
let mustReinit = false;
|
||||
if (!threeRef.current) mustReinit = true;
|
||||
else if (prevConfigRef.current) {
|
||||
for (const k of needsReinitKeys)
|
||||
if ((prevConfigRef.current as any)[k] !== (cfg as any)[k]) {
|
||||
mustReinit = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (mustReinit) {
|
||||
if (threeRef.current) {
|
||||
const t = threeRef.current;
|
||||
t.resizeObserver?.disconnect();
|
||||
cancelAnimationFrame(t.raf);
|
||||
t.quad?.geometry.dispose();
|
||||
t.material.dispose();
|
||||
t.composer?.dispose();
|
||||
t.renderer.dispose();
|
||||
if (t.renderer.domElement.parentElement === container) (container as HTMLDivElement).removeChild(t.renderer.domElement);
|
||||
threeRef.current = null;
|
||||
}
|
||||
|
||||
let renderer: THREE.WebGLRenderer | null = null;
|
||||
let canvas: HTMLCanvasElement | null = null;
|
||||
|
||||
try {
|
||||
canvas = document.createElement('canvas');
|
||||
renderer = new THREE.WebGLRenderer({
|
||||
canvas,
|
||||
antialias,
|
||||
alpha: true,
|
||||
powerPreference: 'high-performance'
|
||||
});
|
||||
renderer.domElement.style.width = '100%';
|
||||
renderer.domElement.style.height = '100%';
|
||||
renderer.setPixelRatio(effectivePixelRatio);
|
||||
(container as HTMLDivElement).appendChild(renderer.domElement);
|
||||
if (transparent) renderer.setClearAlpha(0);
|
||||
else renderer.setClearColor(0x000000, 1);
|
||||
const uniforms = {
|
||||
uResolution: { value: new THREE.Vector2(0, 0) },
|
||||
uTime: { value: 0 },
|
||||
uColor: { value: new THREE.Color(color) },
|
||||
uClickPos: {
|
||||
value: Array.from({ length: MAX_CLICKS }, () => new THREE.Vector2(-1, -1))
|
||||
},
|
||||
uClickTimes: { value: new Float32Array(MAX_CLICKS) },
|
||||
uShapeType: { value: SHAPE_MAP[variant as keyof typeof SHAPE_MAP] ?? 0 },
|
||||
uPixelSize: { value: pixelSize * renderer.getPixelRatio() },
|
||||
uScale: { value: patternScale },
|
||||
uDensity: { value: patternDensity },
|
||||
uPixelJitter: { value: pixelSizeJitter },
|
||||
uEnableRipples: { value: enableRipples ? 1 : 0 },
|
||||
uRippleSpeed: { value: rippleSpeed },
|
||||
uRippleThickness: { value: rippleThickness },
|
||||
uRippleIntensity: { value: rippleIntensityScale },
|
||||
uEdgeFade: { value: edgeFade }
|
||||
};
|
||||
const scene = new THREE.Scene();
|
||||
const camera = new THREE.OrthographicCamera(-1, 1, 1, -1, 0, 1);
|
||||
const material = new THREE.ShaderMaterial({
|
||||
vertexShader: VERTEX_SRC,
|
||||
fragmentShader: FRAGMENT_SRC,
|
||||
uniforms,
|
||||
transparent: true,
|
||||
depthTest: false,
|
||||
depthWrite: false,
|
||||
glslVersion: THREE.GLSL3
|
||||
});
|
||||
const quadGeom = new THREE.PlaneGeometry(2, 2);
|
||||
const quad = new THREE.Mesh(quadGeom, material);
|
||||
scene.add(quad);
|
||||
const clock = new THREE.Clock();
|
||||
const setSize = () => {
|
||||
if (!renderer) return;
|
||||
const w = (container as HTMLDivElement).clientWidth || 1;
|
||||
const h = (container as HTMLDivElement).clientHeight || 1;
|
||||
renderer.setSize(w, h, false);
|
||||
uniforms.uResolution.value.set(renderer.domElement.width, renderer.domElement.height);
|
||||
if (threeRef.current?.composer)
|
||||
threeRef.current.composer.setSize(renderer.domElement.width, renderer.domElement.height);
|
||||
uniforms.uPixelSize.value = pixelSize * renderer.getPixelRatio();
|
||||
};
|
||||
setSize();
|
||||
const ro = new ResizeObserver(setSize);
|
||||
ro.observe(container);
|
||||
const randomFloat = () => {
|
||||
if (typeof window !== 'undefined' && window.crypto?.getRandomValues) {
|
||||
const u32 = new Uint32Array(1);
|
||||
window.crypto.getRandomValues(u32);
|
||||
return u32[0] / 0xffffffff;
|
||||
}
|
||||
return Math.random();
|
||||
};
|
||||
const timeOffset = randomFloat() * 1000;
|
||||
let composer: EffectComposer | undefined;
|
||||
let touch: ReturnType<typeof createTouchTexture> | undefined;
|
||||
let liquidEffect: Effect | undefined;
|
||||
if (liquid) {
|
||||
touch = createTouchTexture();
|
||||
touch.radiusScale = liquidRadius;
|
||||
composer = new EffectComposer(renderer);
|
||||
const renderPass = new RenderPass(scene, camera);
|
||||
liquidEffect = createLiquidEffect(touch.texture, {
|
||||
strength: liquidStrength,
|
||||
freq: liquidWobbleSpeed
|
||||
});
|
||||
const effectPass = new EffectPass(camera, liquidEffect);
|
||||
effectPass.renderToScreen = true;
|
||||
composer.addPass(renderPass);
|
||||
composer.addPass(effectPass);
|
||||
}
|
||||
if (noiseAmount > 0) {
|
||||
if (!composer) {
|
||||
composer = new EffectComposer(renderer);
|
||||
composer.addPass(new RenderPass(scene, camera));
|
||||
}
|
||||
const noiseEffect = new Effect(
|
||||
'NoiseEffect',
|
||||
`uniform float uTime; uniform float uAmount; float hash(vec2 p){ return fract(sin(dot(p, vec2(127.1,311.7))) * 43758.5453);} void mainUv(inout vec2 uv){} void mainImage(const in vec4 inputColor,const in vec2 uv,out vec4 outputColor){ float n=hash(floor(uv*vec2(1920.0,1080.0))+floor(uTime*60.0)); float g=(n-0.5)*uAmount; outputColor=inputColor+vec4(vec3(g),0.0);} `,
|
||||
{
|
||||
uniforms: new Map([
|
||||
['uTime', new THREE.Uniform(0)],
|
||||
['uAmount', new THREE.Uniform(noiseAmount)]
|
||||
])
|
||||
}
|
||||
);
|
||||
const noisePass = new EffectPass(camera, noiseEffect);
|
||||
noisePass.renderToScreen = true;
|
||||
if (composer && composer.passes.length > 0) composer.passes.forEach(p => (p.renderToScreen = false));
|
||||
composer.addPass(noisePass);
|
||||
}
|
||||
if (composer && renderer) composer.setSize(renderer.domElement.width, renderer.domElement.height);
|
||||
const mapToPixels = (e: MouseEvent | PointerEvent) => {
|
||||
if (!renderer) return { fx: 0, fy: 0, w: 0, h: 0 };
|
||||
const rect = renderer.domElement.getBoundingClientRect();
|
||||
const scaleX = renderer.domElement.width / rect.width;
|
||||
const scaleY = renderer.domElement.height / rect.height;
|
||||
const fx = (e.clientX - rect.left) * scaleX;
|
||||
const fy = (rect.height - (e.clientY - rect.top)) * scaleY;
|
||||
return {
|
||||
fx,
|
||||
fy,
|
||||
w: renderer.domElement.width,
|
||||
h: renderer.domElement.height
|
||||
};
|
||||
};
|
||||
let lastRippleTime = 0;
|
||||
const rippleThrottle = 150; // ms between ripples
|
||||
const onPointerMove = (e: MouseEvent | PointerEvent) => {
|
||||
const { fx, fy, w, h } = mapToPixels(e);
|
||||
|
||||
// Trigger ripple on mouse move (throttled)
|
||||
const now = performance.now();
|
||||
if (now - lastRippleTime > rippleThrottle) {
|
||||
const ix = threeRef.current?.clickIx ?? 0;
|
||||
uniforms.uClickPos.value[ix].set(fx, fy);
|
||||
uniforms.uClickTimes.value[ix] = uniforms.uTime.value;
|
||||
if (threeRef.current) threeRef.current.clickIx = (ix + 1) % MAX_CLICKS;
|
||||
lastRippleTime = now;
|
||||
}
|
||||
|
||||
// Liquid touch effect
|
||||
if (touch) {
|
||||
touch.addTouch({ x: fx / w, y: fy / h });
|
||||
}
|
||||
};
|
||||
renderer.domElement.addEventListener('pointermove', onPointerMove, {
|
||||
passive: true
|
||||
});
|
||||
|
||||
// Store event handler for cleanup
|
||||
const domElement = renderer.domElement;
|
||||
let raf = 0;
|
||||
let lastFrameTime = 0;
|
||||
const targetDelta = 1000 / 10; // throttle to ~20fps
|
||||
const animate = (now?: number) => {
|
||||
const timeNow = now ?? performance.now();
|
||||
if (autoPauseOffscreen && !visibilityRef.current.visible) {
|
||||
raf = requestAnimationFrame(animate);
|
||||
if (threeRef.current) threeRef.current.raf = raf;
|
||||
return;
|
||||
}
|
||||
if (timeNow - lastFrameTime < targetDelta) {
|
||||
raf = requestAnimationFrame(animate);
|
||||
if (threeRef.current) threeRef.current.raf = raf;
|
||||
return;
|
||||
}
|
||||
lastFrameTime = timeNow;
|
||||
uniforms.uTime.value = timeOffset + clock.getElapsedTime() * speedRef.current;
|
||||
if (liquidEffect) liquidEffect.uniforms.get('uTime')!.value = uniforms.uTime.value;
|
||||
if (composer) {
|
||||
if (touch) touch.update();
|
||||
composer.passes.forEach(p => {
|
||||
const effs = (p as any).effects;
|
||||
if (effs)
|
||||
effs.forEach((eff: Effect) => {
|
||||
const u = eff.uniforms?.get('uTime');
|
||||
if (u) u.value = uniforms.uTime.value;
|
||||
});
|
||||
});
|
||||
composer.render();
|
||||
} else if (renderer) renderer.render(scene, camera);
|
||||
|
||||
if (!firstFrameFiredRef.current) {
|
||||
firstFrameFiredRef.current = true;
|
||||
onFirstFrameRef.current?.();
|
||||
}
|
||||
|
||||
raf = requestAnimationFrame(animate);
|
||||
if (threeRef.current) threeRef.current.raf = raf;
|
||||
};
|
||||
raf = requestAnimationFrame(animate);
|
||||
threeRef.current = {
|
||||
renderer,
|
||||
scene,
|
||||
camera,
|
||||
material,
|
||||
clock,
|
||||
clickIx: 0,
|
||||
uniforms,
|
||||
resizeObserver: ro,
|
||||
raf,
|
||||
quad,
|
||||
timeOffset,
|
||||
composer,
|
||||
touch,
|
||||
liquidEffect,
|
||||
onPointerMove,
|
||||
domElement
|
||||
};
|
||||
} catch (err) {
|
||||
console.error('[PixelBlast] WebGL initialization failed', err);
|
||||
if (renderer) renderer.dispose();
|
||||
if (canvas && canvas.parentElement === container) {
|
||||
(container as HTMLDivElement).removeChild(canvas);
|
||||
}
|
||||
threeRef.current = null;
|
||||
|
||||
if (!firstFrameFiredRef.current) {
|
||||
firstFrameFiredRef.current = true;
|
||||
onFirstFrameRef.current?.();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const t = threeRef.current;
|
||||
t.uniforms.uShapeType.value = SHAPE_MAP[variant as keyof typeof SHAPE_MAP] ?? 0;
|
||||
t.uniforms.uPixelSize.value = pixelSize * t.renderer.getPixelRatio();
|
||||
t.uniforms.uColor.value.set(color);
|
||||
t.uniforms.uScale.value = patternScale;
|
||||
t.uniforms.uDensity.value = patternDensity;
|
||||
t.uniforms.uPixelJitter.value = pixelSizeJitter;
|
||||
t.uniforms.uEnableRipples.value = enableRipples ? 1 : 0;
|
||||
t.uniforms.uRippleIntensity.value = rippleIntensityScale;
|
||||
t.uniforms.uRippleThickness.value = rippleThickness;
|
||||
t.uniforms.uRippleSpeed.value = rippleSpeed;
|
||||
t.uniforms.uEdgeFade.value = edgeFade;
|
||||
if (transparent) t.renderer.setClearAlpha(0);
|
||||
else t.renderer.setClearColor(0x000000, 1);
|
||||
if (t.liquidEffect) {
|
||||
const uStrength = t.liquidEffect;
|
||||
if (uStrength) uStrength.value = liquidStrength;
|
||||
const uFreq = t.liquidEffect.uniforms.get('uFreq');
|
||||
if (uFreq) uFreq.value = liquidWobbleSpeed;
|
||||
}
|
||||
if (t.touch) t.touch.radiusScale = liquidRadius;
|
||||
}
|
||||
prevConfigRef.current = cfg;
|
||||
return () => {
|
||||
if (!threeRef.current) return;
|
||||
const t = threeRef.current;
|
||||
|
||||
// Remove event listeners
|
||||
if (t.domElement && t.onPointerMove) {
|
||||
t.domElement.removeEventListener('pointermove', t.onPointerMove);
|
||||
}
|
||||
|
||||
t.resizeObserver?.disconnect();
|
||||
cancelAnimationFrame(t.raf);
|
||||
|
||||
// Dispose Three.js resources
|
||||
t.quad?.geometry.dispose();
|
||||
t.material.dispose();
|
||||
t.composer?.dispose();
|
||||
|
||||
// Dispose touch texture
|
||||
if (t.touch?.texture) {
|
||||
t.touch.texture.dispose();
|
||||
}
|
||||
|
||||
t.renderer.dispose();
|
||||
if (t.renderer.domElement.parentElement === container) {
|
||||
(container as HTMLDivElement).removeChild(t.renderer.domElement);
|
||||
}
|
||||
threeRef.current = null;
|
||||
};
|
||||
}, [
|
||||
antialias,
|
||||
liquid,
|
||||
noiseAmount,
|
||||
pixelSize,
|
||||
patternScale,
|
||||
patternDensity,
|
||||
enableRipples,
|
||||
rippleIntensityScale,
|
||||
rippleThickness,
|
||||
rippleSpeed,
|
||||
pixelSizeJitter,
|
||||
edgeFade,
|
||||
transparent,
|
||||
liquidStrength,
|
||||
liquidRadius,
|
||||
liquidWobbleSpeed,
|
||||
autoPauseOffscreen,
|
||||
variant,
|
||||
color,
|
||||
speed,
|
||||
prefersReducedMotion,
|
||||
effectivePixelRatio
|
||||
]);
|
||||
|
||||
// Render empty container if user prefers reduced motion
|
||||
if (prefersReducedMotion) {
|
||||
return (
|
||||
<div
|
||||
ref={containerRef}
|
||||
className={`pixel-blast-container ${className ?? ''}`}
|
||||
style={{ ...style, backgroundColor: 'transparent' }}
|
||||
aria-label="PixelBlast background (disabled for reduced motion)"
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={containerRef}
|
||||
className={`pixel-blast-container ${className ?? ''}`}
|
||||
style={style}
|
||||
aria-label="PixelBlast interactive background"
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default PixelBlast;
|
||||
30
frontend/components/Shuffle.css
Normal file
30
frontend/components/Shuffle.css
Normal file
@@ -0,0 +1,30 @@
|
||||
.shuffle-parent {
|
||||
display: inline-block;
|
||||
white-space: normal;
|
||||
word-wrap: break-word;
|
||||
will-change: transform;
|
||||
line-height: 1.2;
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
.shuffle-parent.is-ready {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
.shuffle-char-wrapper {
|
||||
display: inline-block;
|
||||
overflow: hidden;
|
||||
vertical-align: baseline;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.shuffle-char-wrapper > span {
|
||||
display: inline-flex;
|
||||
will-change: transform;
|
||||
}
|
||||
|
||||
.shuffle-char {
|
||||
line-height: 1;
|
||||
display: inline-block;
|
||||
text-align: center;
|
||||
}
|
||||
423
frontend/components/Shuffle.tsx
Normal file
423
frontend/components/Shuffle.tsx
Normal file
@@ -0,0 +1,423 @@
|
||||
import React, { useRef, useEffect, useState, useMemo } from 'react';
|
||||
import { gsap } from 'gsap';
|
||||
import { ScrollTrigger } from 'gsap/ScrollTrigger';
|
||||
import { SplitText as GSAPSplitText } from 'gsap/SplitText';
|
||||
import { useGSAP } from '@gsap/react';
|
||||
import './Shuffle.css';
|
||||
|
||||
gsap.registerPlugin(ScrollTrigger, GSAPSplitText, useGSAP);
|
||||
|
||||
interface ShuffleProps {
|
||||
text: string;
|
||||
className?: string;
|
||||
style?: React.CSSProperties;
|
||||
shuffleDirection?: 'up' | 'down' | 'left' | 'right';
|
||||
duration?: number;
|
||||
maxDelay?: number;
|
||||
ease?: string;
|
||||
threshold?: number;
|
||||
rootMargin?: string;
|
||||
tag?: keyof React.JSX.IntrinsicElements;
|
||||
textAlign?: 'left' | 'center' | 'right';
|
||||
onShuffleComplete?: () => void;
|
||||
shuffleTimes?: number;
|
||||
animationMode?: 'evenodd' | 'random';
|
||||
loop?: boolean;
|
||||
loopDelay?: number;
|
||||
stagger?: number;
|
||||
scrambleCharset?: string;
|
||||
colorFrom?: string;
|
||||
colorTo?: string;
|
||||
triggerOnce?: boolean;
|
||||
respectReducedMotion?: boolean;
|
||||
triggerOnHover?: boolean;
|
||||
autoPlay?: boolean;
|
||||
}
|
||||
|
||||
const Shuffle: React.FC<ShuffleProps> = ({
|
||||
text,
|
||||
className = '',
|
||||
style = {},
|
||||
shuffleDirection = 'right',
|
||||
duration = 0.35,
|
||||
maxDelay = 0,
|
||||
ease = 'power3.out',
|
||||
threshold = 0.1,
|
||||
rootMargin = '-100px',
|
||||
tag = 'p',
|
||||
textAlign = 'center',
|
||||
onShuffleComplete,
|
||||
shuffleTimes = 1,
|
||||
animationMode = 'evenodd',
|
||||
loop = false,
|
||||
loopDelay = 0,
|
||||
stagger = 0.03,
|
||||
scrambleCharset = '',
|
||||
colorFrom,
|
||||
colorTo,
|
||||
triggerOnce = true,
|
||||
respectReducedMotion = true,
|
||||
triggerOnHover = true,
|
||||
autoPlay = true
|
||||
}) => {
|
||||
const ref = useRef<HTMLElement | null>(null);
|
||||
const [fontsLoaded, setFontsLoaded] = useState(false);
|
||||
const [ready, setReady] = useState(false);
|
||||
|
||||
const splitRef = useRef<any>(null);
|
||||
const wrappersRef = useRef<any[]>([]);
|
||||
const tlRef = useRef<gsap.core.Timeline | null>(null);
|
||||
const playingRef = useRef(false);
|
||||
const hoverHandlerRef = useRef<((e: MouseEvent) => void) | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if ('fonts' in document) {
|
||||
if (document.fonts.status === 'loaded') setFontsLoaded(true);
|
||||
else document.fonts.ready.then(() => setFontsLoaded(true));
|
||||
} else setFontsLoaded(true);
|
||||
}, []);
|
||||
|
||||
const scrollTriggerStart = useMemo(() => {
|
||||
const startPct = (1 - threshold) * 100;
|
||||
const mm = /^(-?\d+(?:\.\d+)?)(px|em|rem|%)?$/.exec(rootMargin || '');
|
||||
const mv = mm ? parseFloat(mm[1]) : 0;
|
||||
const mu = mm ? mm[2] || 'px' : 'px';
|
||||
const sign = mv === 0 ? '' : mv < 0 ? `-=${Math.abs(mv)}${mu}` : `+=${mv}${mu}`;
|
||||
return `top ${startPct}%${sign}`;
|
||||
}, [threshold, rootMargin]);
|
||||
|
||||
useGSAP(
|
||||
() => {
|
||||
if (!ref.current || !text || !fontsLoaded) return;
|
||||
if (respectReducedMotion && window.matchMedia && window.matchMedia('(prefers-reduced-motion: reduce)').matches) {
|
||||
setReady(true);
|
||||
onShuffleComplete?.();
|
||||
return;
|
||||
}
|
||||
|
||||
const el = ref.current;
|
||||
|
||||
const start = scrollTriggerStart;
|
||||
|
||||
const removeHover = () => {
|
||||
if (hoverHandlerRef.current && ref.current) {
|
||||
ref.current.removeEventListener('mouseenter', hoverHandlerRef.current);
|
||||
hoverHandlerRef.current = null;
|
||||
}
|
||||
};
|
||||
|
||||
const teardown = () => {
|
||||
if (tlRef.current) {
|
||||
tlRef.current.kill();
|
||||
tlRef.current = null;
|
||||
}
|
||||
if (wrappersRef.current.length) {
|
||||
wrappersRef.current.forEach(wrap => {
|
||||
const inner = wrap.firstElementChild;
|
||||
const orig = inner?.querySelector('[data-orig="1"]');
|
||||
if (orig && wrap.parentNode) wrap.parentNode.replaceChild(orig, wrap);
|
||||
});
|
||||
wrappersRef.current = [];
|
||||
}
|
||||
try {
|
||||
splitRef.current?.revert();
|
||||
} catch {
|
||||
/* noop */
|
||||
}
|
||||
splitRef.current = null;
|
||||
playingRef.current = false;
|
||||
};
|
||||
|
||||
const build = () => {
|
||||
teardown();
|
||||
|
||||
splitRef.current = new GSAPSplitText(el, {
|
||||
type: 'chars',
|
||||
charsClass: 'shuffle-char',
|
||||
wordsClass: 'shuffle-word',
|
||||
linesClass: 'shuffle-line',
|
||||
smartWrap: true,
|
||||
reduceWhiteSpace: false
|
||||
});
|
||||
|
||||
const chars = splitRef.current.chars || [];
|
||||
wrappersRef.current = [];
|
||||
|
||||
const rolls = Math.max(1, Math.floor(shuffleTimes));
|
||||
const rand = (set: string) => set.charAt(Math.floor(Math.random() * set.length)) || '';
|
||||
|
||||
chars.forEach((ch: any) => {
|
||||
const parent = ch.parentElement;
|
||||
if (!parent) return;
|
||||
|
||||
const w = ch.getBoundingClientRect().width;
|
||||
const h = ch.getBoundingClientRect().height;
|
||||
if (!w) return;
|
||||
|
||||
const wrap = document.createElement('span');
|
||||
Object.assign(wrap.style, {
|
||||
display: 'inline-block',
|
||||
overflow: 'hidden',
|
||||
width: w + 'px',
|
||||
height: shuffleDirection === 'up' || shuffleDirection === 'down' ? h + 'px' : 'auto',
|
||||
verticalAlign: 'bottom'
|
||||
});
|
||||
|
||||
const inner = document.createElement('span');
|
||||
Object.assign(inner.style, {
|
||||
display: 'inline-block',
|
||||
whiteSpace: shuffleDirection === 'up' || shuffleDirection === 'down' ? 'normal' : 'nowrap',
|
||||
willChange: 'transform'
|
||||
});
|
||||
|
||||
parent.insertBefore(wrap, ch);
|
||||
wrap.appendChild(inner);
|
||||
|
||||
const firstOrig = ch.cloneNode(true);
|
||||
Object.assign(firstOrig.style, {
|
||||
display: shuffleDirection === 'up' || shuffleDirection === 'down' ? 'block' : 'inline-block',
|
||||
width: w + 'px',
|
||||
textAlign: 'center'
|
||||
});
|
||||
|
||||
ch.setAttribute('data-orig', '1');
|
||||
Object.assign(ch.style, {
|
||||
display: shuffleDirection === 'up' || shuffleDirection === 'down' ? 'block' : 'inline-block',
|
||||
width: w + 'px',
|
||||
textAlign: 'center'
|
||||
});
|
||||
|
||||
inner.appendChild(firstOrig);
|
||||
for (let k = 0; k < rolls; k++) {
|
||||
const c = ch.cloneNode(true);
|
||||
if (scrambleCharset) c.textContent = rand(scrambleCharset);
|
||||
Object.assign(c.style, {
|
||||
display: shuffleDirection === 'up' || shuffleDirection === 'down' ? 'block' : 'inline-block',
|
||||
width: w + 'px',
|
||||
textAlign: 'center'
|
||||
});
|
||||
inner.appendChild(c);
|
||||
}
|
||||
inner.appendChild(ch);
|
||||
|
||||
const steps = rolls + 1;
|
||||
|
||||
if (shuffleDirection === 'right' || shuffleDirection === 'down') {
|
||||
const firstCopy = inner.firstElementChild;
|
||||
const real = inner.lastElementChild;
|
||||
if (real) inner.insertBefore(real, inner.firstChild);
|
||||
if (firstCopy) inner.appendChild(firstCopy);
|
||||
}
|
||||
|
||||
let startX = 0;
|
||||
let finalX = 0;
|
||||
let startY = 0;
|
||||
let finalY = 0;
|
||||
|
||||
if (shuffleDirection === 'right') {
|
||||
startX = -steps * w;
|
||||
finalX = 0;
|
||||
} else if (shuffleDirection === 'left') {
|
||||
startX = 0;
|
||||
finalX = -steps * w;
|
||||
} else if (shuffleDirection === 'down') {
|
||||
startY = -steps * h;
|
||||
finalY = 0;
|
||||
} else if (shuffleDirection === 'up') {
|
||||
startY = 0;
|
||||
finalY = -steps * h;
|
||||
}
|
||||
|
||||
if (shuffleDirection === 'left' || shuffleDirection === 'right') {
|
||||
gsap.set(inner, { x: startX, y: 0, force3D: true });
|
||||
inner.setAttribute('data-start-x', String(startX));
|
||||
inner.setAttribute('data-final-x', String(finalX));
|
||||
} else {
|
||||
gsap.set(inner, { x: 0, y: startY, force3D: true });
|
||||
inner.setAttribute('data-start-y', String(startY));
|
||||
inner.setAttribute('data-final-y', String(finalY));
|
||||
}
|
||||
|
||||
if (colorFrom) inner.style.color = colorFrom;
|
||||
wrappersRef.current.push(wrap);
|
||||
});
|
||||
};
|
||||
|
||||
const inners = () => wrappersRef.current.map(w => w.firstElementChild);
|
||||
|
||||
const randomizeScrambles = () => {
|
||||
if (!scrambleCharset) return;
|
||||
wrappersRef.current.forEach(w => {
|
||||
const strip = w.firstElementChild;
|
||||
if (!strip) return;
|
||||
const kids = Array.from(strip.children) as Element[];
|
||||
for (let i = 1; i < kids.length - 1; i++) {
|
||||
kids[i].textContent = scrambleCharset.charAt(Math.floor(Math.random() * scrambleCharset.length));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const cleanupToStill = () => {
|
||||
wrappersRef.current.forEach(w => {
|
||||
const strip = w.firstElementChild;
|
||||
if (!strip) return;
|
||||
const real = strip.querySelector('[data-orig="1"]');
|
||||
if (!real) return;
|
||||
strip.replaceChildren(real);
|
||||
strip.style.transform = 'none';
|
||||
strip.style.willChange = 'auto';
|
||||
});
|
||||
};
|
||||
|
||||
const play = () => {
|
||||
const strips = inners();
|
||||
if (!strips.length) return;
|
||||
|
||||
playingRef.current = true;
|
||||
const isVertical = shuffleDirection === 'up' || shuffleDirection === 'down';
|
||||
|
||||
const tl = gsap.timeline({
|
||||
smoothChildTiming: true,
|
||||
repeat: loop ? -1 : 0,
|
||||
repeatDelay: loop ? loopDelay : 0,
|
||||
onRepeat: () => {
|
||||
if (scrambleCharset) randomizeScrambles();
|
||||
if (isVertical) {
|
||||
gsap.set(strips, { y: (i, t) => parseFloat(t.getAttribute('data-start-y') || '0') });
|
||||
} else {
|
||||
gsap.set(strips, { x: (i, t) => parseFloat(t.getAttribute('data-start-x') || '0') });
|
||||
}
|
||||
onShuffleComplete?.();
|
||||
},
|
||||
onComplete: () => {
|
||||
playingRef.current = false;
|
||||
if (!loop) {
|
||||
cleanupToStill();
|
||||
if (colorTo) gsap.set(strips, { color: colorTo });
|
||||
onShuffleComplete?.();
|
||||
armHover();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const addTween = (targets: any, at: any) => {
|
||||
const vars: any = {
|
||||
duration,
|
||||
ease,
|
||||
force3D: true,
|
||||
stagger: animationMode === 'evenodd' ? stagger : 0
|
||||
};
|
||||
if (isVertical) {
|
||||
vars.y = (i: number, t: any) => parseFloat(t.getAttribute('data-final-y') || '0');
|
||||
} else {
|
||||
vars.x = (i: number, t: any) => parseFloat(t.getAttribute('data-final-x') || '0');
|
||||
}
|
||||
|
||||
tl.to(targets, vars, at);
|
||||
|
||||
if (colorFrom && colorTo) {
|
||||
tl.to(targets, { color: colorTo, duration, ease }, at);
|
||||
}
|
||||
};
|
||||
|
||||
if (animationMode === 'evenodd') {
|
||||
const odd = strips.filter((_, i) => i % 2 === 1);
|
||||
const even = strips.filter((_, i) => i % 2 === 0);
|
||||
const oddTotal = duration + Math.max(0, odd.length - 1) * stagger;
|
||||
const evenStart = odd.length ? oddTotal * 0.7 : 0;
|
||||
if (odd.length) addTween(odd, 0);
|
||||
if (even.length) addTween(even, evenStart);
|
||||
} else {
|
||||
strips.forEach(strip => {
|
||||
const d = Math.random() * maxDelay;
|
||||
const vars: any = {
|
||||
duration,
|
||||
ease,
|
||||
force3D: true
|
||||
};
|
||||
if (isVertical) {
|
||||
vars.y = parseFloat(strip.getAttribute('data-final-y') || '0');
|
||||
} else {
|
||||
vars.x = parseFloat(strip.getAttribute('data-final-x') || '0');
|
||||
}
|
||||
tl.to(strip, vars, d);
|
||||
if (colorFrom && colorTo) tl.fromTo(strip, { color: colorFrom }, { color: colorTo, duration, ease }, d);
|
||||
});
|
||||
}
|
||||
|
||||
tlRef.current = tl;
|
||||
};
|
||||
|
||||
const armHover = () => {
|
||||
if (!triggerOnHover || !ref.current) return;
|
||||
removeHover();
|
||||
const handler = () => {
|
||||
if (playingRef.current) return;
|
||||
build();
|
||||
if (scrambleCharset) randomizeScrambles();
|
||||
play();
|
||||
};
|
||||
hoverHandlerRef.current = handler;
|
||||
ref.current.addEventListener('mouseenter', handler);
|
||||
};
|
||||
|
||||
const create = () => {
|
||||
build();
|
||||
if (scrambleCharset) randomizeScrambles();
|
||||
if (autoPlay) {
|
||||
play();
|
||||
}
|
||||
armHover();
|
||||
setReady(true);
|
||||
};
|
||||
|
||||
const st = ScrollTrigger.create({
|
||||
trigger: el,
|
||||
start,
|
||||
once: triggerOnce,
|
||||
onEnter: create
|
||||
});
|
||||
|
||||
return () => {
|
||||
st.kill();
|
||||
removeHover();
|
||||
teardown();
|
||||
setReady(false);
|
||||
};
|
||||
},
|
||||
{
|
||||
dependencies: [
|
||||
text,
|
||||
duration,
|
||||
maxDelay,
|
||||
ease,
|
||||
scrollTriggerStart,
|
||||
fontsLoaded,
|
||||
shuffleDirection,
|
||||
shuffleTimes,
|
||||
animationMode,
|
||||
loop,
|
||||
loopDelay,
|
||||
stagger,
|
||||
scrambleCharset,
|
||||
colorFrom,
|
||||
colorTo,
|
||||
triggerOnce,
|
||||
respectReducedMotion,
|
||||
triggerOnHover,
|
||||
onShuffleComplete,
|
||||
autoPlay
|
||||
],
|
||||
scope: ref
|
||||
}
|
||||
);
|
||||
|
||||
const commonStyle = useMemo(() => ({ textAlign, ...style }), [textAlign, style]);
|
||||
|
||||
const classes = useMemo(() => `shuffle-parent ${ready ? 'is-ready' : ''} ${className}`, [ready, className]);
|
||||
|
||||
const Tag = tag || 'p';
|
||||
return React.createElement(Tag, { ref, className: classes, style: commonStyle }, text);
|
||||
};
|
||||
|
||||
export default Shuffle;
|
||||
189
frontend/components/about-dialog.tsx
Normal file
189
frontend/components/about-dialog.tsx
Normal file
@@ -0,0 +1,189 @@
|
||||
"use client"
|
||||
|
||||
import { useState } from 'react'
|
||||
import { useTranslations } from 'next-intl'
|
||||
import { useQueryClient } from '@tanstack/react-query'
|
||||
import {
|
||||
IconRadar,
|
||||
IconRefresh,
|
||||
IconExternalLink,
|
||||
IconBrandGithub,
|
||||
IconMessageReport,
|
||||
IconBook,
|
||||
IconFileText,
|
||||
IconCheck,
|
||||
IconArrowUp,
|
||||
} from '@tabler/icons-react'
|
||||
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogTrigger,
|
||||
} from '@/components/ui/dialog'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Separator } from '@/components/ui/separator'
|
||||
import { Badge } from '@/components/ui/badge'
|
||||
import { useVersion } from '@/hooks/use-version'
|
||||
import { VersionService } from '@/services/version.service'
|
||||
import type { UpdateCheckResult } from '@/types/version.types'
|
||||
|
||||
interface AboutDialogProps {
|
||||
children: React.ReactNode
|
||||
}
|
||||
|
||||
export function AboutDialog({ children }: AboutDialogProps) {
|
||||
const t = useTranslations('about')
|
||||
const { data: versionData } = useVersion()
|
||||
const queryClient = useQueryClient()
|
||||
|
||||
const [isChecking, setIsChecking] = useState(false)
|
||||
const [updateResult, setUpdateResult] = useState<UpdateCheckResult | null>(null)
|
||||
const [checkError, setCheckError] = useState<string | null>(null)
|
||||
|
||||
const handleCheckUpdate = async () => {
|
||||
setIsChecking(true)
|
||||
setCheckError(null)
|
||||
try {
|
||||
const result = await VersionService.checkUpdate()
|
||||
setUpdateResult(result)
|
||||
queryClient.setQueryData(['check-update'], result)
|
||||
} catch {
|
||||
setCheckError(t('checkFailed'))
|
||||
} finally {
|
||||
setIsChecking(false)
|
||||
}
|
||||
}
|
||||
|
||||
const currentVersion = updateResult?.currentVersion || versionData?.version || '-'
|
||||
const latestVersion = updateResult?.latestVersion
|
||||
const hasUpdate = updateResult?.hasUpdate
|
||||
|
||||
return (
|
||||
<Dialog>
|
||||
<DialogTrigger asChild>
|
||||
{children}
|
||||
</DialogTrigger>
|
||||
<DialogContent className="sm:max-w-md">
|
||||
<DialogHeader>
|
||||
<DialogTitle>{t('title')}</DialogTitle>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="space-y-6">
|
||||
{/* Logo and name */}
|
||||
<div className="flex flex-col items-center py-4">
|
||||
<div className="flex h-16 w-16 items-center justify-center rounded-2xl bg-primary/10 mb-3">
|
||||
<IconRadar className="h-8 w-8 text-primary" />
|
||||
</div>
|
||||
<h2 className="text-xl font-semibold">{t('productName')}</h2>
|
||||
<p className="text-sm text-muted-foreground">{t('description')}</p>
|
||||
</div>
|
||||
|
||||
{/* Version info */}
|
||||
<div className="rounded-lg border p-4 space-y-3">
|
||||
<div className="flex items-center justify-between">
|
||||
<span className="text-sm text-muted-foreground">{t('currentVersion')}</span>
|
||||
<span className="font-mono text-sm">{currentVersion}</span>
|
||||
</div>
|
||||
|
||||
{updateResult && (
|
||||
<div className="flex items-center justify-between">
|
||||
<span className="text-sm text-muted-foreground">{t('latestVersion')}</span>
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="font-mono text-sm">{latestVersion}</span>
|
||||
{hasUpdate ? (
|
||||
<Badge variant="default" className="gap-1">
|
||||
<IconArrowUp className="h-3 w-3" />
|
||||
{t('updateAvailable')}
|
||||
</Badge>
|
||||
) : (
|
||||
<Badge variant="secondary" className="gap-1">
|
||||
<IconCheck className="h-3 w-3" />
|
||||
{t('upToDate')}
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{checkError && (
|
||||
<p className="text-sm text-destructive">{checkError}</p>
|
||||
)}
|
||||
|
||||
<div className="flex gap-2">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
className="flex-1"
|
||||
onClick={handleCheckUpdate}
|
||||
disabled={isChecking}
|
||||
>
|
||||
<IconRefresh className={`h-4 w-4 mr-2 ${isChecking ? 'animate-spin' : ''}`} />
|
||||
{isChecking ? t('checking') : t('checkUpdate')}
|
||||
</Button>
|
||||
|
||||
{hasUpdate && updateResult?.releaseUrl && (
|
||||
<Button
|
||||
variant="default"
|
||||
size="sm"
|
||||
className="flex-1"
|
||||
asChild
|
||||
>
|
||||
<a href={updateResult.releaseUrl} target="_blank" rel="noopener noreferrer">
|
||||
<IconExternalLink className="h-4 w-4 mr-2" />
|
||||
{t('viewRelease')}
|
||||
</a>
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{hasUpdate && (
|
||||
<div className="rounded-md bg-muted p-3 text-sm text-muted-foreground">
|
||||
<p>{t('updateHint')}</p>
|
||||
<code className="mt-1 block rounded bg-background px-2 py-1 font-mono text-xs">
|
||||
sudo ./update.sh
|
||||
</code>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<Separator />
|
||||
|
||||
{/* Links */}
|
||||
<div className="grid grid-cols-2 gap-2">
|
||||
<Button variant="ghost" size="sm" className="justify-start" asChild>
|
||||
<a href="https://github.com/yyhuni/xingrin" target="_blank" rel="noopener noreferrer">
|
||||
<IconBrandGithub className="h-4 w-4 mr-2" />
|
||||
GitHub
|
||||
</a>
|
||||
</Button>
|
||||
<Button variant="ghost" size="sm" className="justify-start" asChild>
|
||||
<a href="https://github.com/yyhuni/xingrin/releases" target="_blank" rel="noopener noreferrer">
|
||||
<IconFileText className="h-4 w-4 mr-2" />
|
||||
{t('changelog')}
|
||||
</a>
|
||||
</Button>
|
||||
<Button variant="ghost" size="sm" className="justify-start" asChild>
|
||||
<a href="https://github.com/yyhuni/xingrin/issues" target="_blank" rel="noopener noreferrer">
|
||||
<IconMessageReport className="h-4 w-4 mr-2" />
|
||||
{t('feedback')}
|
||||
</a>
|
||||
</Button>
|
||||
<Button variant="ghost" size="sm" className="justify-start" asChild>
|
||||
<a href="https://github.com/yyhuni/xingrin#readme" target="_blank" rel="noopener noreferrer">
|
||||
<IconBook className="h-4 w-4 mr-2" />
|
||||
{t('docs')}
|
||||
</a>
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<p className="text-center text-xs text-muted-foreground">
|
||||
© 2026 {t('productName')} · GPL-3.0
|
||||
</p>
|
||||
</div>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,360 @@
|
||||
'use client';
|
||||
|
||||
import * as React from 'react';
|
||||
|
||||
import { cn } from '@/lib/utils';
|
||||
|
||||
type MouseGravity = 'attract' | 'repel';
|
||||
type GlowAnimation = 'instant' | 'ease' | 'spring';
|
||||
type StarsInteractionType = 'bounce' | 'merge';
|
||||
|
||||
type GravityStarsProps = {
|
||||
starsCount?: number;
|
||||
starsSize?: number;
|
||||
starsOpacity?: number;
|
||||
glowIntensity?: number;
|
||||
glowAnimation?: GlowAnimation;
|
||||
movementSpeed?: number;
|
||||
mouseInfluence?: number;
|
||||
mouseGravity?: MouseGravity;
|
||||
gravityStrength?: number;
|
||||
starsInteraction?: boolean;
|
||||
starsInteractionType?: StarsInteractionType;
|
||||
} & React.ComponentProps<'div'>;
|
||||
|
||||
type Particle = {
|
||||
x: number;
|
||||
y: number;
|
||||
vx: number;
|
||||
vy: number;
|
||||
size: number;
|
||||
opacity: number;
|
||||
baseOpacity: number;
|
||||
mass: number;
|
||||
glowMultiplier?: number;
|
||||
glowVelocity?: number;
|
||||
};
|
||||
|
||||
function GravityStarsBackground({
|
||||
starsCount = 75,
|
||||
starsSize = 2,
|
||||
starsOpacity = 0.75,
|
||||
glowIntensity = 15,
|
||||
glowAnimation = 'ease',
|
||||
movementSpeed = 0.3,
|
||||
mouseInfluence = 100,
|
||||
mouseGravity = 'attract',
|
||||
gravityStrength = 75,
|
||||
starsInteraction = false,
|
||||
starsInteractionType = 'bounce',
|
||||
className,
|
||||
...props
|
||||
}: GravityStarsProps) {
|
||||
const containerRef = React.useRef<HTMLDivElement | null>(null);
|
||||
const canvasRef = React.useRef<HTMLCanvasElement | null>(null);
|
||||
const animRef = React.useRef<number | null>(null);
|
||||
const starsRef = React.useRef<Particle[]>([]);
|
||||
const mouseRef = React.useRef<{ x: number; y: number }>({ x: 0, y: 0 });
|
||||
const [dpr, setDpr] = React.useState(1);
|
||||
const [canvasSize, setCanvasSize] = React.useState({
|
||||
width: 800,
|
||||
height: 600,
|
||||
});
|
||||
|
||||
const readColor = React.useCallback(() => {
|
||||
const el = containerRef.current;
|
||||
if (!el) return '#ffffff';
|
||||
const cs = getComputedStyle(el);
|
||||
return cs.color || '#ffffff';
|
||||
}, []);
|
||||
|
||||
const initStars = React.useCallback(
|
||||
(w: number, h: number) => {
|
||||
starsRef.current = Array.from({ length: starsCount }).map(() => {
|
||||
const angle = Math.random() * Math.PI * 2;
|
||||
const speed = movementSpeed * (0.5 + Math.random() * 0.5);
|
||||
return {
|
||||
x: Math.random() * w,
|
||||
y: Math.random() * h,
|
||||
vx: Math.cos(angle) * speed,
|
||||
vy: Math.sin(angle) * speed,
|
||||
size: Math.random() * starsSize + 1,
|
||||
opacity: starsOpacity,
|
||||
baseOpacity: starsOpacity,
|
||||
mass: Math.random() * 0.5 + 0.5,
|
||||
glowMultiplier: 1,
|
||||
glowVelocity: 0,
|
||||
};
|
||||
});
|
||||
},
|
||||
[starsCount, movementSpeed, starsOpacity, starsSize],
|
||||
);
|
||||
|
||||
const redistributeStars = React.useCallback((w: number, h: number) => {
|
||||
starsRef.current.forEach((p) => {
|
||||
p.x = Math.random() * w;
|
||||
p.y = Math.random() * h;
|
||||
});
|
||||
}, []);
|
||||
|
||||
const resizeCanvas = React.useCallback(() => {
|
||||
const canvas = canvasRef.current;
|
||||
const container = containerRef.current;
|
||||
if (!canvas || !container) return;
|
||||
const rect = container.getBoundingClientRect();
|
||||
const nextDpr = Math.max(1, Math.min(window.devicePixelRatio || 1, 2));
|
||||
setDpr(nextDpr);
|
||||
canvas.width = Math.max(1, Math.floor(rect.width * nextDpr));
|
||||
canvas.height = Math.max(1, Math.floor(rect.height * nextDpr));
|
||||
canvas.style.width = `${rect.width}px`;
|
||||
canvas.style.height = `${rect.height}px`;
|
||||
setCanvasSize({ width: rect.width, height: rect.height });
|
||||
if (starsRef.current.length === 0) {
|
||||
initStars(rect.width, rect.height);
|
||||
} else {
|
||||
redistributeStars(rect.width, rect.height);
|
||||
}
|
||||
}, [initStars, redistributeStars]);
|
||||
|
||||
const handlePointerMove = React.useCallback(
|
||||
(e: React.MouseEvent | React.TouchEvent) => {
|
||||
const canvas = canvasRef.current;
|
||||
if (!canvas) return;
|
||||
const rect = canvas.getBoundingClientRect();
|
||||
let clientX = 0;
|
||||
let clientY = 0;
|
||||
if ('touches' in e) {
|
||||
const t = e.touches[0];
|
||||
if (!t) return;
|
||||
clientX = t.clientX;
|
||||
clientY = t.clientY;
|
||||
} else {
|
||||
clientX = e.clientX;
|
||||
clientY = e.clientY;
|
||||
}
|
||||
mouseRef.current = { x: clientX - rect.left, y: clientY - rect.top };
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
const updateStars = React.useCallback(() => {
|
||||
const w = canvasSize.width;
|
||||
const h = canvasSize.height;
|
||||
const mouse = mouseRef.current;
|
||||
|
||||
for (let i = 0; i < starsRef.current.length; i++) {
|
||||
const p = starsRef.current[i];
|
||||
|
||||
const dx = mouse.x - p.x;
|
||||
const dy = mouse.y - p.y;
|
||||
const dist = Math.hypot(dx, dy);
|
||||
|
||||
if (dist < mouseInfluence && dist > 0) {
|
||||
const force = (mouseInfluence - dist) / mouseInfluence;
|
||||
const nx = dx / dist;
|
||||
const ny = dy / dist;
|
||||
const g = force * (gravityStrength * 0.001);
|
||||
|
||||
if (mouseGravity === 'attract') {
|
||||
p.vx += nx * g;
|
||||
p.vy += ny * g;
|
||||
} else if (mouseGravity === 'repel') {
|
||||
p.vx -= nx * g;
|
||||
p.vy -= ny * g;
|
||||
}
|
||||
|
||||
p.opacity = Math.min(1, p.baseOpacity + force * 0.4);
|
||||
|
||||
const targetGlow = 1 + force * 2;
|
||||
const currentGlow = p.glowMultiplier || 1;
|
||||
|
||||
if (glowAnimation === 'instant') {
|
||||
p.glowMultiplier = targetGlow;
|
||||
} else if (glowAnimation === 'ease') {
|
||||
const ease = 0.15;
|
||||
p.glowMultiplier = currentGlow + (targetGlow - currentGlow) * ease;
|
||||
} else {
|
||||
const spring = (targetGlow - currentGlow) * 0.2;
|
||||
const damping = 0.85;
|
||||
p.glowVelocity = (p.glowVelocity || 0) * damping + spring;
|
||||
p.glowMultiplier = currentGlow + (p.glowVelocity || 0);
|
||||
}
|
||||
} else {
|
||||
p.opacity = Math.max(p.baseOpacity * 0.3, p.opacity - 0.02);
|
||||
const targetGlow = 1;
|
||||
const currentGlow = p.glowMultiplier || 1;
|
||||
if (glowAnimation === 'instant') {
|
||||
p.glowMultiplier = targetGlow;
|
||||
} else if (glowAnimation === 'ease') {
|
||||
const ease = 0.08;
|
||||
p.glowMultiplier = Math.max(
|
||||
1,
|
||||
currentGlow + (targetGlow - currentGlow) * ease,
|
||||
);
|
||||
} else {
|
||||
const spring = (targetGlow - currentGlow) * 0.15;
|
||||
const damping = 0.9;
|
||||
p.glowVelocity = (p.glowVelocity || 0) * damping + spring;
|
||||
p.glowMultiplier = Math.max(1, currentGlow + (p.glowVelocity || 0));
|
||||
}
|
||||
}
|
||||
|
||||
if (starsInteraction) {
|
||||
for (let j = i + 1; j < starsRef.current.length; j++) {
|
||||
const o = starsRef.current[j];
|
||||
const dx2 = o.x - p.x;
|
||||
const dy2 = o.y - p.y;
|
||||
const d = Math.hypot(dx2, dy2);
|
||||
const minD = p.size + o.size + 5;
|
||||
if (d < minD && d > 0) {
|
||||
if (starsInteractionType === 'bounce') {
|
||||
const nx = dx2 / d;
|
||||
const ny = dy2 / d;
|
||||
const rvx = p.vx - o.vx;
|
||||
const rvy = p.vy - o.vy;
|
||||
const speed = rvx * nx + rvy * ny;
|
||||
if (speed < 0) continue;
|
||||
const impulse = (2 * speed) / (p.mass + o.mass);
|
||||
p.vx -= impulse * o.mass * nx;
|
||||
p.vy -= impulse * o.mass * ny;
|
||||
o.vx += impulse * p.mass * nx;
|
||||
o.vy += impulse * p.mass * ny;
|
||||
const overlap = minD - d;
|
||||
const sx = nx * overlap * 0.5;
|
||||
const sy = ny * overlap * 0.5;
|
||||
p.x -= sx;
|
||||
p.y -= sy;
|
||||
o.x += sx;
|
||||
o.y += sy;
|
||||
} else {
|
||||
const mergeForce = (minD - d) / minD;
|
||||
p.glowMultiplier = (p.glowMultiplier || 1) + mergeForce * 0.5;
|
||||
o.glowMultiplier = (o.glowMultiplier || 1) + mergeForce * 0.5;
|
||||
const af = mergeForce * 0.01;
|
||||
p.vx += dx2 * af;
|
||||
p.vy += dy2 * af;
|
||||
o.vx -= dx2 * af;
|
||||
o.vy -= dy2 * af;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p.x += p.vx;
|
||||
p.y += p.vy;
|
||||
|
||||
p.vx += (Math.random() - 0.5) * 0.001;
|
||||
p.vy += (Math.random() - 0.5) * 0.001;
|
||||
|
||||
p.vx *= 0.999;
|
||||
p.vy *= 0.999;
|
||||
|
||||
if (p.x < 0) p.x = w;
|
||||
if (p.x > w) p.x = 0;
|
||||
if (p.y < 0) p.y = h;
|
||||
if (p.y > h) p.y = 0;
|
||||
}
|
||||
}, [
|
||||
canvasSize.width,
|
||||
canvasSize.height,
|
||||
mouseInfluence,
|
||||
mouseGravity,
|
||||
gravityStrength,
|
||||
glowAnimation,
|
||||
starsInteraction,
|
||||
starsInteractionType,
|
||||
]);
|
||||
|
||||
const drawStars = React.useCallback(
|
||||
(ctx: CanvasRenderingContext2D) => {
|
||||
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
|
||||
const color = readColor();
|
||||
for (const p of starsRef.current) {
|
||||
ctx.save();
|
||||
ctx.shadowColor = color;
|
||||
ctx.shadowBlur = glowIntensity * (p.glowMultiplier || 1) * 2;
|
||||
ctx.globalAlpha = p.opacity;
|
||||
ctx.fillStyle = color;
|
||||
ctx.beginPath();
|
||||
ctx.arc(p.x * dpr, p.y * dpr, p.size * dpr, 0, Math.PI * 2);
|
||||
ctx.fill();
|
||||
ctx.restore();
|
||||
}
|
||||
},
|
||||
[dpr, glowIntensity, readColor],
|
||||
);
|
||||
|
||||
const animate = React.useCallback(() => {
|
||||
const canvas = canvasRef.current;
|
||||
if (!canvas) return;
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) return;
|
||||
updateStars();
|
||||
drawStars(ctx);
|
||||
animRef.current = requestAnimationFrame(animate);
|
||||
}, [updateStars, drawStars]);
|
||||
|
||||
React.useEffect(() => {
|
||||
resizeCanvas();
|
||||
const container = containerRef.current;
|
||||
const ro =
|
||||
typeof ResizeObserver !== 'undefined'
|
||||
? new ResizeObserver(resizeCanvas)
|
||||
: null;
|
||||
if (container && ro) ro.observe(container);
|
||||
const onResize = () => resizeCanvas();
|
||||
window.addEventListener('resize', onResize);
|
||||
return () => {
|
||||
window.removeEventListener('resize', onResize);
|
||||
if (ro && container) ro.disconnect();
|
||||
};
|
||||
}, [resizeCanvas]);
|
||||
|
||||
React.useEffect(() => {
|
||||
if (starsRef.current.length === 0) {
|
||||
initStars(canvasSize.width, canvasSize.height);
|
||||
} else {
|
||||
starsRef.current.forEach((p) => {
|
||||
p.baseOpacity = starsOpacity;
|
||||
p.opacity = starsOpacity;
|
||||
const spd = Math.hypot(p.vx, p.vy);
|
||||
if (spd > 0) {
|
||||
const ratio = movementSpeed / spd;
|
||||
p.vx *= ratio;
|
||||
p.vy *= ratio;
|
||||
}
|
||||
});
|
||||
}
|
||||
}, [
|
||||
starsCount,
|
||||
starsOpacity,
|
||||
movementSpeed,
|
||||
canvasSize.width,
|
||||
canvasSize.height,
|
||||
initStars,
|
||||
]);
|
||||
|
||||
React.useEffect(() => {
|
||||
if (animRef.current) cancelAnimationFrame(animRef.current);
|
||||
animRef.current = requestAnimationFrame(animate);
|
||||
return () => {
|
||||
if (animRef.current) cancelAnimationFrame(animRef.current);
|
||||
animRef.current = null;
|
||||
};
|
||||
}, [animate]);
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={containerRef}
|
||||
data-slot="gravity-stars-background"
|
||||
className={cn('relative size-full overflow-hidden', className)}
|
||||
onMouseMove={(e) => handlePointerMove(e)}
|
||||
onTouchMove={(e) => handlePointerMove(e)}
|
||||
{...props}
|
||||
>
|
||||
<canvas ref={canvasRef} className="block w-full h-full" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export { GravityStarsBackground, type GravityStarsProps };
|
||||
@@ -5,7 +5,6 @@ import type * as React from "react"
|
||||
// Import various icons from Tabler Icons library
|
||||
import {
|
||||
IconDashboard, // Dashboard icon
|
||||
IconHelp, // Help icon
|
||||
IconListDetails, // List details icon
|
||||
IconSettings, // Settings icon
|
||||
IconUsers, // Users icon
|
||||
@@ -15,10 +14,10 @@ import {
|
||||
IconServer, // Server icon
|
||||
IconTerminal2, // Terminal icon
|
||||
IconBug, // Vulnerability icon
|
||||
IconMessageReport, // Feedback icon
|
||||
IconSearch, // Search icon
|
||||
IconKey, // API Key icon
|
||||
IconBan, // Blacklist icon
|
||||
IconInfoCircle, // About icon
|
||||
} from "@tabler/icons-react"
|
||||
// Import internationalization hook
|
||||
import { useTranslations } from 'next-intl'
|
||||
@@ -27,8 +26,8 @@ import { Link, usePathname } from '@/i18n/navigation'
|
||||
|
||||
// Import custom navigation components
|
||||
import { NavSystem } from "@/components/nav-system"
|
||||
import { NavSecondary } from "@/components/nav-secondary"
|
||||
import { NavUser } from "@/components/nav-user"
|
||||
import { AboutDialog } from "@/components/about-dialog"
|
||||
// Import sidebar UI components
|
||||
import {
|
||||
Sidebar,
|
||||
@@ -139,20 +138,6 @@ export function AppSidebar({ ...props }: React.ComponentProps<typeof Sidebar>) {
|
||||
},
|
||||
]
|
||||
|
||||
// Secondary navigation menu items
|
||||
const navSecondary = [
|
||||
{
|
||||
title: t('feedback'),
|
||||
url: "https://github.com/yyhuni/xingrin/issues",
|
||||
icon: IconMessageReport,
|
||||
},
|
||||
{
|
||||
title: t('help'),
|
||||
url: "https://github.com/yyhuni/xingrin",
|
||||
icon: IconHelp,
|
||||
},
|
||||
]
|
||||
|
||||
// System settings related menu items
|
||||
const documents = [
|
||||
{
|
||||
@@ -194,8 +179,8 @@ export function AppSidebar({ ...props }: React.ComponentProps<typeof Sidebar>) {
|
||||
className="data-[slot=sidebar-menu-button]:!p-1.5"
|
||||
>
|
||||
<Link href="/">
|
||||
<IconRadar className="!size-5" />
|
||||
<span className="text-base font-semibold">XingRin</span>
|
||||
<IconRadar className="!size-5 text-primary" />
|
||||
<span className="text-base font-semibold">{t('appName')}</span>
|
||||
</Link>
|
||||
</SidebarMenuButton>
|
||||
</SidebarMenuItem>
|
||||
@@ -271,8 +256,21 @@ export function AppSidebar({ ...props }: React.ComponentProps<typeof Sidebar>) {
|
||||
|
||||
{/* System settings navigation menu */}
|
||||
<NavSystem items={documents} />
|
||||
{/* Secondary navigation menu, using mt-auto to push to bottom */}
|
||||
<NavSecondary items={navSecondary} className="mt-auto" />
|
||||
{/* About system button */}
|
||||
<SidebarGroup className="mt-auto">
|
||||
<SidebarGroupContent>
|
||||
<SidebarMenu>
|
||||
<SidebarMenuItem>
|
||||
<AboutDialog>
|
||||
<SidebarMenuButton>
|
||||
<IconInfoCircle />
|
||||
<span>{t('about')}</span>
|
||||
</SidebarMenuButton>
|
||||
</AboutDialog>
|
||||
</SidebarMenuItem>
|
||||
</SidebarMenu>
|
||||
</SidebarGroupContent>
|
||||
</SidebarGroup>
|
||||
</SidebarContent>
|
||||
|
||||
{/* Sidebar footer */}
|
||||
|
||||
@@ -40,8 +40,8 @@ export function ChangePasswordDialog({ open, onOpenChange }: ChangePasswordDialo
|
||||
return
|
||||
}
|
||||
|
||||
if (newPassword.length < 4) {
|
||||
setError(t("passwordTooShort", { min: 4 }))
|
||||
if (newPassword.length < 6) {
|
||||
setError(t("passwordTooShort", { min: 6 }))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
151
frontend/components/auth/login-boot-screen.tsx
Normal file
151
frontend/components/auth/login-boot-screen.tsx
Normal file
@@ -0,0 +1,151 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
type BootLine = {
|
||||
text: string
|
||||
className?: string
|
||||
}
|
||||
|
||||
const BOOT_LINES: BootLine[] = [
|
||||
{ text: "> booting ORBIT...", className: "text-yellow-500" },
|
||||
{ text: "> initializing secure terminal...", className: "text-zinc-200" },
|
||||
{ text: "> loading modules: auth, i18n, ui...", className: "text-zinc-200" },
|
||||
{ text: "> checking session...", className: "text-yellow-500" },
|
||||
{ text: "> ready.", className: "text-green-500" },
|
||||
]
|
||||
|
||||
const SUCCESS_LINES: BootLine[] = [
|
||||
{ text: "> authentication successful", className: "text-green-500" },
|
||||
{ text: "> loading user profile...", className: "text-zinc-200" },
|
||||
{ text: "> initializing dashboard...", className: "text-zinc-200" },
|
||||
{ text: "> preparing workspace...", className: "text-yellow-500" },
|
||||
{ text: "> access granted.", className: "text-green-500" },
|
||||
]
|
||||
|
||||
// Keep the log animation snappy so it can complete within the 0.6s splash.
|
||||
const STEP_DELAYS_MS = [70, 90, 110, 130, 150]
|
||||
|
||||
const GLITCH_MS = 600
|
||||
|
||||
export function LoginBootScreen({ className, success = false }: { className?: string; success?: boolean }) {
|
||||
const [visible, setVisible] = React.useState(0)
|
||||
const [entered, setEntered] = React.useState(false)
|
||||
const [glitchOn, setGlitchOn] = React.useState(true)
|
||||
|
||||
// 根据 success 状态选择显示的行
|
||||
const displayLines = success ? SUCCESS_LINES : BOOT_LINES
|
||||
|
||||
React.useEffect(() => {
|
||||
const raf = requestAnimationFrame(() => setEntered(true))
|
||||
return () => cancelAnimationFrame(raf)
|
||||
}, [])
|
||||
|
||||
React.useEffect(() => {
|
||||
setGlitchOn(true)
|
||||
const timer = setTimeout(() => setGlitchOn(false), GLITCH_MS)
|
||||
return () => clearTimeout(timer)
|
||||
}, [])
|
||||
|
||||
React.useEffect(() => {
|
||||
setVisible(0)
|
||||
|
||||
const timers: Array<ReturnType<typeof setTimeout>> = []
|
||||
let acc = 0
|
||||
|
||||
for (let i = 0; i < displayLines.length; i++) {
|
||||
acc += STEP_DELAYS_MS[i] ?? 160
|
||||
timers.push(
|
||||
setTimeout(() => {
|
||||
setVisible((prev) => Math.max(prev, i + 1))
|
||||
}, acc)
|
||||
)
|
||||
}
|
||||
|
||||
return () => {
|
||||
timers.forEach(clearTimeout)
|
||||
}
|
||||
}, [displayLines])
|
||||
|
||||
const progress = Math.round((Math.min(visible, displayLines.length) / displayLines.length) * 100)
|
||||
|
||||
return (
|
||||
<div className={cn("relative flex min-h-svh flex-col bg-black", glitchOn && "orbit-splash-glitch", className)}>
|
||||
{/* Main content area */}
|
||||
<div className="relative z-10 flex-1 flex items-center justify-center p-6">
|
||||
<div
|
||||
className={cn(
|
||||
"border-zinc-700 bg-zinc-900/80 backdrop-blur-sm z-0 w-full max-w-xl rounded-xl border transition-opacity duration-200 ease-out motion-reduce:transition-none",
|
||||
entered ? "opacity-100" : "opacity-0"
|
||||
)}
|
||||
>
|
||||
{/* Terminal header */}
|
||||
<div className="border-zinc-700 flex items-center gap-x-2 border-b px-4 py-3">
|
||||
<div className="flex flex-row gap-x-2">
|
||||
<div className="h-3 w-3 rounded-full bg-red-500" />
|
||||
<div className="h-3 w-3 rounded-full bg-yellow-500" />
|
||||
<div className="h-3 w-3 rounded-full bg-green-500" />
|
||||
</div>
|
||||
<span className="ml-2 text-xs text-zinc-400 font-mono">ORBIT · boot</span>
|
||||
<span className="ml-auto text-xs text-zinc-500 font-mono">{progress}%</span>
|
||||
</div>
|
||||
|
||||
{/* Terminal body */}
|
||||
<div className="p-4 font-mono text-sm min-h-[280px]">
|
||||
<div className="mb-6 text-center">
|
||||
<div
|
||||
className={cn(
|
||||
"text-3xl sm:text-4xl !font-bold tracking-wide",
|
||||
"bg-gradient-to-r from-[#FF10F0] via-[#B026FF] to-[#FF10F0] bg-clip-text text-transparent",
|
||||
glitchOn && "orbit-glitch-text"
|
||||
)}
|
||||
data-text="ORBIT"
|
||||
style={{
|
||||
filter: "drop-shadow(0 0 20px rgba(255, 16, 240, 0.5)) drop-shadow(0 0 40px rgba(176, 38, 255, 0.3))"
|
||||
}}
|
||||
>
|
||||
ORBIT
|
||||
</div>
|
||||
<div className="mt-3 flex items-center gap-3 text-zinc-400 text-xs">
|
||||
<span className="h-px flex-1 bg-gradient-to-r from-transparent via-[#B026FF] to-transparent" />
|
||||
<span className="whitespace-nowrap">system bootstrap</span>
|
||||
<span className="h-px flex-1 bg-gradient-to-r from-transparent via-[#B026FF] to-transparent" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-1">
|
||||
{displayLines.slice(0, visible).map((line, idx) => (
|
||||
<div key={idx} className={cn("whitespace-pre-wrap", line.className)}>
|
||||
{line.text}
|
||||
</div>
|
||||
))}
|
||||
|
||||
{/* Cursor */}
|
||||
<div className="text-green-500">
|
||||
<span className="inline-block h-4 w-2 align-middle bg-green-500 animate-pulse" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Progress bar */}
|
||||
<div className="mt-6">
|
||||
<div className="h-1.5 w-full rounded bg-zinc-800 overflow-hidden">
|
||||
<div
|
||||
className="h-full bg-gradient-to-r from-[#FF10F0] to-[#B026FF]"
|
||||
style={{
|
||||
width: `${progress}%`,
|
||||
boxShadow: "0 0 10px rgba(255, 16, 240, 0.5), 0 0 20px rgba(176, 38, 255, 0.3)"
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
<div className="mt-2 text-xs text-zinc-500">
|
||||
Checking session…
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -34,36 +34,6 @@ interface BulkAddUrlsDialogProps {
|
||||
onSuccess?: () => void
|
||||
}
|
||||
|
||||
const ASSET_TYPE_LABELS: Record<AssetType, { title: string; description: string; placeholder: string }> = {
|
||||
endpoint: {
|
||||
title: 'Bulk Add Endpoints',
|
||||
description: 'Enter endpoint URL list, one per line.',
|
||||
placeholder: `Please enter endpoint URLs, one per line
|
||||
Example:
|
||||
https://example.com/api/v1
|
||||
https://example.com/api/v2
|
||||
https://example.com/login`,
|
||||
},
|
||||
website: {
|
||||
title: 'Bulk Add Websites',
|
||||
description: 'Enter website URL list, one per line.',
|
||||
placeholder: `Please enter website URLs, one per line
|
||||
Example:
|
||||
https://example.com
|
||||
https://www.example.com
|
||||
https://api.example.com`,
|
||||
},
|
||||
directory: {
|
||||
title: 'Bulk Add Directories',
|
||||
description: 'Enter directory URL list, one per line.',
|
||||
placeholder: `Please enter directory URLs, one per line
|
||||
Example:
|
||||
https://example.com/admin
|
||||
https://example.com/api
|
||||
https://example.com/uploads`,
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Bulk add URLs dialog component
|
||||
*
|
||||
@@ -80,6 +50,14 @@ export function BulkAddUrlsDialog({
|
||||
onSuccess,
|
||||
}: BulkAddUrlsDialogProps) {
|
||||
const tBulkAdd = useTranslations("bulkAdd.common")
|
||||
const tUrl = useTranslations("bulkAdd.url")
|
||||
|
||||
// Get translated labels based on asset type
|
||||
const labels = {
|
||||
title: tUrl(`${assetType}.title`),
|
||||
description: tUrl(`${assetType}.description`),
|
||||
placeholder: tUrl(`${assetType}.placeholder`),
|
||||
}
|
||||
|
||||
// Dialog open/close state
|
||||
const [internalOpen, setInternalOpen] = useState(false)
|
||||
@@ -121,7 +99,6 @@ export function BulkAddUrlsDialog({
|
||||
}
|
||||
|
||||
const mutation = getMutation()
|
||||
const labels = ASSET_TYPE_LABELS[assetType]
|
||||
|
||||
// Handle input changes
|
||||
const handleInputChange = (value: string) => {
|
||||
@@ -222,7 +199,7 @@ export function BulkAddUrlsDialog({
|
||||
<DialogTrigger asChild>
|
||||
<Button size="sm" variant="outline">
|
||||
<Plus className="h-4 w-4" />
|
||||
Bulk Add
|
||||
{tBulkAdd("bulkAdd")}
|
||||
</Button>
|
||||
</DialogTrigger>
|
||||
)}
|
||||
@@ -242,7 +219,7 @@ export function BulkAddUrlsDialog({
|
||||
<div className="grid gap-4 py-4">
|
||||
<div className="grid gap-2">
|
||||
<Label htmlFor="urls">
|
||||
URL List <span className="text-destructive">*</span>
|
||||
{tUrl("label")} <span className="text-destructive">*</span>
|
||||
</Label>
|
||||
<div className="flex border rounded-md overflow-hidden h-[220px]">
|
||||
{/* Line number column */}
|
||||
@@ -278,39 +255,43 @@ export function BulkAddUrlsDialog({
|
||||
{validationResult && (
|
||||
<div className="text-xs space-y-1">
|
||||
<div className="text-muted-foreground">
|
||||
Valid: {validationResult.validCount} items
|
||||
{tUrl("valid", { count: validationResult.validCount })}
|
||||
{validationResult.duplicateCount > 0 && (
|
||||
<span className="text-yellow-600 ml-2">
|
||||
Duplicate: {validationResult.duplicateCount} items
|
||||
{tUrl("duplicate", { count: validationResult.duplicateCount })}
|
||||
</span>
|
||||
)}
|
||||
{validationResult.invalidCount > 0 && (
|
||||
<span className="text-destructive ml-2">
|
||||
Invalid: {validationResult.invalidCount} items
|
||||
{tUrl("invalid", { count: validationResult.invalidCount })}
|
||||
</span>
|
||||
)}
|
||||
{validationResult.mismatchedCount > 0 && (
|
||||
<span className="text-destructive ml-2">
|
||||
Mismatched: {validationResult.mismatchedCount} items
|
||||
{tUrl("mismatched", { count: validationResult.mismatchedCount })}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
{validationResult.firstError && (
|
||||
<div className="text-destructive">
|
||||
Line {validationResult.firstError.index + 1}: "
|
||||
{validationResult.firstError.url.length > 50
|
||||
? validationResult.firstError.url.substring(0, 50) + '...'
|
||||
: validationResult.firstError.url}" -{" "}
|
||||
{validationResult.firstError.error}
|
||||
{tUrl("lineError", {
|
||||
line: validationResult.firstError.index + 1,
|
||||
value: validationResult.firstError.url.length > 50
|
||||
? validationResult.firstError.url.substring(0, 50) + '...'
|
||||
: validationResult.firstError.url,
|
||||
error: validationResult.firstError.error,
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
{validationResult.firstMismatch && !validationResult.firstError && (
|
||||
<div className="text-destructive">
|
||||
Line {validationResult.firstMismatch.index + 1}: "
|
||||
{validationResult.firstMismatch.url.length > 50
|
||||
? validationResult.firstMismatch.url.substring(0, 50) + '...'
|
||||
: validationResult.firstMismatch.url}" -
|
||||
URL does not belong to target {targetName}, please remove before submitting
|
||||
{tUrl("mismatchError", {
|
||||
line: validationResult.firstMismatch.index + 1,
|
||||
value: validationResult.firstMismatch.url.length > 50
|
||||
? validationResult.firstMismatch.url.substring(0, 50) + '...'
|
||||
: validationResult.firstMismatch.url,
|
||||
target: targetName || '',
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
@@ -325,7 +306,7 @@ export function BulkAddUrlsDialog({
|
||||
onClick={() => handleOpenChange(false)}
|
||||
disabled={mutation.isPending}
|
||||
>
|
||||
Cancel
|
||||
{tBulkAdd("cancel")}
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
@@ -334,12 +315,12 @@ export function BulkAddUrlsDialog({
|
||||
{mutation.isPending ? (
|
||||
<>
|
||||
<LoadingSpinner />
|
||||
Creating...
|
||||
{tUrl("creating")}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<Plus className="h-4 w-4" />
|
||||
Bulk Add
|
||||
{tBulkAdd("bulkAdd")}
|
||||
</>
|
||||
)}
|
||||
</Button>
|
||||
|
||||
@@ -94,7 +94,7 @@ export function AssetTrendChart() {
|
||||
} satisfies ChartConfig), [t])
|
||||
|
||||
// Visible series state (show all by default)
|
||||
const [visibleSeries, setVisibleSeries] = useState<Set<SeriesKey>>(new Set(ALL_SERIES))
|
||||
const [visibleSeries, setVisibleSeries] = useState<Set<SeriesKey>>(() => new Set(ALL_SERIES))
|
||||
|
||||
// Currently hovered line
|
||||
const [hoveredLine, setHoveredLine] = useState<SeriesKey | null>(null)
|
||||
@@ -136,10 +136,13 @@ export function AssetTrendChart() {
|
||||
}
|
||||
|
||||
// Get latest data (use latest value from raw data)
|
||||
const latest = rawData && rawData.length > 0 ? rawData[rawData.length - 1] : null
|
||||
|
||||
const latest = useMemo(() =>
|
||||
rawData && rawData.length > 0 ? rawData[rawData.length - 1] : null,
|
||||
[rawData]
|
||||
)
|
||||
|
||||
// Display data: show hovered data when hovering, otherwise show latest data
|
||||
const displayData = activeData || latest
|
||||
const displayData = useMemo(() => activeData || latest, [activeData, latest])
|
||||
|
||||
return (
|
||||
<Card>
|
||||
|
||||
@@ -129,6 +129,8 @@ export function DashboardDataTable() {
|
||||
},
|
||||
tooltips: {
|
||||
vulnDetails: t('tooltips.vulnDetails'),
|
||||
reviewed: t('tooltips.reviewed'),
|
||||
pending: t('tooltips.pending'),
|
||||
},
|
||||
severity: {
|
||||
critical: t('severity.critical'),
|
||||
@@ -230,7 +232,7 @@ export function DashboardDataTable() {
|
||||
cancelled: t('common.status.cancelled'),
|
||||
completed: t('common.status.completed'),
|
||||
failed: t('common.status.failed'),
|
||||
initiated: t('common.status.pending'),
|
||||
pending: t('common.status.pending'),
|
||||
running: t('common.status.running'),
|
||||
},
|
||||
summary: {
|
||||
@@ -365,6 +367,7 @@ export function DashboardDataTable() {
|
||||
columns={scanColumns}
|
||||
getRowId={(row) => String(row.id)}
|
||||
enableRowSelection={false}
|
||||
enableAutoColumnSizing
|
||||
pagination={scanPagination}
|
||||
onPaginationChange={setScanPagination}
|
||||
paginationInfo={scanPaginationInfo}
|
||||
|
||||
@@ -49,7 +49,7 @@ export function DashboardScanHistory() {
|
||||
cancelled: tCommon("status.cancelled"),
|
||||
completed: tCommon("status.completed"),
|
||||
failed: tCommon("status.failed"),
|
||||
initiated: tCommon("status.pending"),
|
||||
pending: tCommon("status.pending"),
|
||||
running: tCommon("status.running"),
|
||||
},
|
||||
summary: {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"use client"
|
||||
|
||||
import { memo } from "react"
|
||||
import { useAssetStatistics } from "@/hooks/use-dashboard"
|
||||
import { Card, CardAction, CardDescription, CardFooter, CardHeader, CardTitle } from "@/components/ui/card"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
@@ -8,7 +9,7 @@ import { IconTarget, IconStack2, IconBug, IconPlayerPlay, IconTrendingUp, IconTr
|
||||
import { useTranslations } from "next-intl"
|
||||
import { useLocale } from "next-intl"
|
||||
|
||||
function TrendBadge({ change }: { change: number }) {
|
||||
const TrendBadge = memo(function TrendBadge({ change }: { change: number }) {
|
||||
if (change === 0) return null
|
||||
|
||||
const isPositive = change > 0
|
||||
@@ -24,9 +25,9 @@ function TrendBadge({ change }: { change: number }) {
|
||||
{isPositive ? '+' : ''}{change}
|
||||
</Badge>
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
function StatCard({
|
||||
const StatCard = memo(function StatCard({
|
||||
title,
|
||||
value,
|
||||
change,
|
||||
@@ -66,7 +67,7 @@ function StatCard({
|
||||
</CardFooter>
|
||||
</Card>
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
function formatUpdateTime(dateStr: string | null, locale: string, noDataText: string) {
|
||||
if (!dateStr) return noDataText
|
||||
|
||||
@@ -23,24 +23,18 @@ import {
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { IconExternalLink } from "@tabler/icons-react"
|
||||
import { Circle, CheckCircle2 } from "lucide-react"
|
||||
import type { VulnerabilitySeverity } from "@/types/vulnerability.types"
|
||||
import { useTranslations } from "next-intl"
|
||||
import { useLocale } from "next-intl"
|
||||
|
||||
// Unified vulnerability severity color configuration (consistent with charts)
|
||||
const severityStyles: Record<VulnerabilitySeverity, string> = {
|
||||
critical: "bg-[#da3633]/10 text-[#da3633] border border-[#da3633]/20 dark:text-[#f85149]",
|
||||
high: "bg-[#d29922]/10 text-[#d29922] border border-[#d29922]/20",
|
||||
medium: "bg-[#d4a72c]/10 text-[#d4a72c] border border-[#d4a72c]/20",
|
||||
low: "bg-[#238636]/10 text-[#238636] border border-[#238636]/20 dark:text-[#3fb950]",
|
||||
info: "bg-[#848d97]/10 text-[#848d97] border border-[#848d97]/20",
|
||||
}
|
||||
import { SEVERITY_STYLES } from "@/lib/severity-config"
|
||||
|
||||
export function RecentVulnerabilities() {
|
||||
const router = useRouter()
|
||||
const t = useTranslations("dashboard.recentVulns")
|
||||
const tSeverity = useTranslations("severity")
|
||||
const tColumns = useTranslations("columns")
|
||||
const tTooltips = useTranslations("tooltips")
|
||||
const locale = useLocale()
|
||||
|
||||
const formatTime = (dateStr: string) => {
|
||||
@@ -54,11 +48,11 @@ export function RecentVulnerabilities() {
|
||||
}
|
||||
|
||||
const severityConfig = useMemo(() => ({
|
||||
critical: { label: tSeverity("critical"), className: severityStyles.critical },
|
||||
high: { label: tSeverity("high"), className: severityStyles.high },
|
||||
medium: { label: tSeverity("medium"), className: severityStyles.medium },
|
||||
low: { label: tSeverity("low"), className: severityStyles.low },
|
||||
info: { label: tSeverity("info"), className: severityStyles.info },
|
||||
critical: { label: tSeverity("critical"), className: SEVERITY_STYLES.critical.className },
|
||||
high: { label: tSeverity("high"), className: SEVERITY_STYLES.high.className },
|
||||
medium: { label: tSeverity("medium"), className: SEVERITY_STYLES.medium.className },
|
||||
low: { label: tSeverity("low"), className: SEVERITY_STYLES.low.className },
|
||||
info: { label: tSeverity("info"), className: SEVERITY_STYLES.info.className },
|
||||
}), [tSeverity])
|
||||
|
||||
const { data, isLoading } = useQuery({
|
||||
@@ -100,6 +94,7 @@ export function RecentVulnerabilities() {
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>{tColumns("common.status")}</TableHead>
|
||||
<TableHead>{tColumns("vulnerability.severity")}</TableHead>
|
||||
<TableHead>{tColumns("vulnerability.source")}</TableHead>
|
||||
<TableHead>{tColumns("common.type")}</TableHead>
|
||||
<TableHead>{tColumns("common.url")}</TableHead>
|
||||
@@ -107,31 +102,52 @@ export function RecentVulnerabilities() {
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{vulnerabilities.map((vuln: any) => (
|
||||
<TableRow
|
||||
key={vuln.id}
|
||||
className="cursor-pointer hover:bg-muted/50"
|
||||
onClick={() => router.push(`/vulnerabilities/?id=${vuln.id}`)}
|
||||
>
|
||||
<TableCell>
|
||||
<Badge className={severityConfig[vuln.severity as VulnerabilitySeverity]?.className}>
|
||||
{severityConfig[vuln.severity as VulnerabilitySeverity]?.label ?? vuln.severity}
|
||||
</Badge>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<Badge variant="outline">{vuln.source}</Badge>
|
||||
</TableCell>
|
||||
<TableCell className="font-medium max-w-[120px] truncate">
|
||||
{vuln.vulnType}
|
||||
</TableCell>
|
||||
<TableCell className="text-muted-foreground text-xs max-w-[200px] truncate">
|
||||
{vuln.url}
|
||||
</TableCell>
|
||||
<TableCell className="text-muted-foreground text-xs whitespace-nowrap">
|
||||
{formatTime(vuln.createdAt)}
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
{vulnerabilities.map((vuln: any) => {
|
||||
const isReviewed = vuln.isReviewed
|
||||
const isPending = !isReviewed
|
||||
|
||||
return (
|
||||
<TableRow
|
||||
key={vuln.id}
|
||||
className="cursor-pointer hover:bg-muted/50"
|
||||
onClick={() => router.push(`/vulnerabilities/?id=${vuln.id}`)}
|
||||
>
|
||||
<TableCell>
|
||||
<Badge
|
||||
variant="outline"
|
||||
className={`transition-all gap-1.5 cursor-default ${isPending
|
||||
? "bg-blue-500/10 text-blue-600 border-blue-500/30 dark:text-blue-400 dark:border-blue-400/30"
|
||||
: "bg-muted/50 text-muted-foreground border-muted-foreground/20"
|
||||
}`}
|
||||
>
|
||||
{isPending ? (
|
||||
<Circle className="h-3 w-3" />
|
||||
) : (
|
||||
<CheckCircle2 className="h-3 w-3" />
|
||||
)}
|
||||
{isPending ? tTooltips("pending") : tTooltips("reviewed")}
|
||||
</Badge>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<Badge className={severityConfig[vuln.severity as VulnerabilitySeverity]?.className}>
|
||||
{severityConfig[vuln.severity as VulnerabilitySeverity]?.label ?? vuln.severity}
|
||||
</Badge>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<Badge variant="outline">{vuln.source}</Badge>
|
||||
</TableCell>
|
||||
<TableCell className="font-medium max-w-[120px] truncate">
|
||||
{vuln.vulnType}
|
||||
</TableCell>
|
||||
<TableCell className="text-muted-foreground text-xs max-w-[200px] truncate">
|
||||
{vuln.url}
|
||||
</TableCell>
|
||||
<TableCell className="text-muted-foreground text-xs whitespace-nowrap">
|
||||
{formatTime(vuln.createdAt)}
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
)
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
|
||||
@@ -18,15 +18,7 @@ import {
|
||||
} from "@/components/ui/chart"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { useTranslations } from "next-intl"
|
||||
|
||||
// 漏洞严重程度使用固定语义化颜色
|
||||
const SEVERITY_COLORS = {
|
||||
critical: "#dc2626", // 红色
|
||||
high: "#f97316", // 橙色
|
||||
medium: "#eab308", // 黄色
|
||||
low: "#3b82f6", // 蓝色
|
||||
info: "#6b7280", // 灰色
|
||||
}
|
||||
import { SEVERITY_COLORS } from "@/lib/severity-config"
|
||||
|
||||
export function VulnSeverityChart() {
|
||||
const { data, isLoading } = useAssetStatistics()
|
||||
|
||||
@@ -14,10 +14,7 @@ export interface DirectoryTranslations {
|
||||
url: string
|
||||
status: string
|
||||
length: string
|
||||
words: string
|
||||
lines: string
|
||||
contentType: string
|
||||
duration: string
|
||||
createdAt: string
|
||||
}
|
||||
actions: {
|
||||
@@ -56,15 +53,6 @@ function StatusBadge({ status }: { status: number | null }) {
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Format duration (nanoseconds to milliseconds)
|
||||
*/
|
||||
function formatDuration(nanoseconds: number | null): string {
|
||||
if (nanoseconds === null) return "-"
|
||||
const milliseconds = nanoseconds / 1000000
|
||||
return `${milliseconds.toFixed(2)} ms`
|
||||
}
|
||||
|
||||
/**
|
||||
* Create directory table column definitions
|
||||
*/
|
||||
@@ -138,34 +126,6 @@ export function createDirectoryColumns({
|
||||
return <span>{length !== null ? length.toLocaleString() : "-"}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "words",
|
||||
size: 80,
|
||||
minSize: 60,
|
||||
maxSize: 120,
|
||||
meta: { title: t.columns.words },
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title={t.columns.words} />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const words = row.getValue("words") as number | null
|
||||
return <span>{words !== null ? words.toLocaleString() : "-"}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "lines",
|
||||
size: 80,
|
||||
minSize: 60,
|
||||
maxSize: 120,
|
||||
meta: { title: t.columns.lines },
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title={t.columns.lines} />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const lines = row.getValue("lines") as number | null
|
||||
return <span>{lines !== null ? lines.toLocaleString() : "-"}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "contentType",
|
||||
size: 120,
|
||||
@@ -185,20 +145,6 @@ export function createDirectoryColumns({
|
||||
)
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "duration",
|
||||
size: 100,
|
||||
minSize: 80,
|
||||
maxSize: 150,
|
||||
meta: { title: t.columns.duration },
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title={t.columns.duration} />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const duration = row.getValue("duration") as number | null
|
||||
return <span className="text-muted-foreground">{formatDuration(duration)}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "createdAt",
|
||||
size: 150,
|
||||
|
||||
@@ -18,7 +18,7 @@ const DIRECTORY_FILTER_FIELDS: FilterField[] = [
|
||||
// Directory page filter examples
|
||||
const DIRECTORY_FILTER_EXAMPLES = [
|
||||
'url="/admin" && status="200"',
|
||||
'url="/api/*" || url="/config/*"',
|
||||
'url="/api/" || url="/config/"',
|
||||
'status="200" && url!="/index.html"',
|
||||
]
|
||||
|
||||
@@ -114,7 +114,7 @@ export function DirectoriesDataTable({
|
||||
onSelectionChange={handleSelectionChange}
|
||||
// Bulk operations
|
||||
onBulkDelete={onBulkDelete}
|
||||
bulkDeleteLabel="Delete"
|
||||
bulkDeleteLabel={tActions("delete")}
|
||||
showAddButton={false}
|
||||
// Bulk add button
|
||||
onBulkAdd={onBulkAdd}
|
||||
|
||||
@@ -11,6 +11,7 @@ import { useTargetDirectories, useScanDirectories } from "@/hooks/use-directorie
|
||||
import { useTarget } from "@/hooks/use-targets"
|
||||
import { DirectoryService } from "@/services/directory.service"
|
||||
import { BulkAddUrlsDialog } from "@/components/common/bulk-add-urls-dialog"
|
||||
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
|
||||
import { getDateLocale } from "@/lib/date-utils"
|
||||
import type { TargetType } from "@/lib/url-validator"
|
||||
import type { Directory } from "@/types/directory.types"
|
||||
@@ -29,11 +30,14 @@ export function DirectoriesView({
|
||||
})
|
||||
const [selectedDirectories, setSelectedDirectories] = useState<Directory[]>([])
|
||||
const [bulkAddDialogOpen, setBulkAddDialogOpen] = useState(false)
|
||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false)
|
||||
const [isDeleting, setIsDeleting] = useState(false)
|
||||
|
||||
const [filterQuery, setFilterQuery] = useState("")
|
||||
const [isSearching, setIsSearching] = useState(false)
|
||||
|
||||
// Internationalization
|
||||
const t = useTranslations("pages.targetDetail")
|
||||
const tColumns = useTranslations("columns")
|
||||
const tCommon = useTranslations("common")
|
||||
const tToast = useTranslations("toast")
|
||||
@@ -240,6 +244,26 @@ export function DirectoriesView({
|
||||
URL.revokeObjectURL(url)
|
||||
}
|
||||
|
||||
// Handle bulk delete
|
||||
const handleBulkDelete = async () => {
|
||||
if (selectedDirectories.length === 0) return
|
||||
|
||||
setIsDeleting(true)
|
||||
try {
|
||||
const ids = selectedDirectories.map(d => d.id)
|
||||
const result = await DirectoryService.bulkDelete(ids)
|
||||
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
|
||||
setSelectedDirectories([])
|
||||
setDeleteDialogOpen(false)
|
||||
refetch()
|
||||
} catch (error) {
|
||||
console.error("Failed to delete directories", error)
|
||||
toast.error(tToast("deleteFailed"))
|
||||
} finally {
|
||||
setIsDeleting(false)
|
||||
}
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center py-12">
|
||||
@@ -280,10 +304,13 @@ export function DirectoriesView({
|
||||
onSelectionChange={handleSelectionChange}
|
||||
onDownloadAll={handleDownloadAll}
|
||||
onDownloadSelected={handleDownloadSelected}
|
||||
onBulkDelete={targetId ? () => setDeleteDialogOpen(true) : undefined}
|
||||
onBulkAdd={targetId ? () => setBulkAddDialogOpen(true) : undefined}
|
||||
/>
|
||||
|
||||
{/* Bulk add dialog */}
|
||||
{/* Bulk add dialog */
|
||||
/* ... */
|
||||
}
|
||||
{targetId && (
|
||||
<BulkAddUrlsDialog
|
||||
targetId={targetId}
|
||||
@@ -295,6 +322,17 @@ export function DirectoriesView({
|
||||
onSuccess={() => refetch()}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Delete confirmation dialog */}
|
||||
<ConfirmDialog
|
||||
open={deleteDialogOpen}
|
||||
onOpenChange={setDeleteDialogOpen}
|
||||
title={tCommon("actions.confirmDelete")}
|
||||
description={tCommon("actions.deleteConfirmMessage", { count: selectedDirectories.length })}
|
||||
onConfirm={handleBulkDelete}
|
||||
loading={isDeleting}
|
||||
variant="destructive"
|
||||
/>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ const ENDPOINT_FILTER_FIELDS: FilterField[] = [
|
||||
|
||||
// Endpoint page filter examples
|
||||
const ENDPOINT_FILTER_EXAMPLES = [
|
||||
'url="/api/*" && status="200"',
|
||||
'url="/api/" && status="200"',
|
||||
'host="api.example.com" || host="admin.example.com"',
|
||||
'title="Dashboard" && status!="404"',
|
||||
'tech="php" || tech="wordpress"',
|
||||
@@ -36,6 +36,7 @@ interface EndpointsDataTableProps<TData extends { id: number | string }, TValue>
|
||||
onAddNew?: () => void
|
||||
addButtonText?: string
|
||||
onSelectionChange?: (selectedRows: TData[]) => void
|
||||
onBulkDelete?: () => void
|
||||
pagination?: { pageIndex: number; pageSize: number }
|
||||
onPaginationChange?: (pagination: { pageIndex: number; pageSize: number }) => void
|
||||
totalCount?: number
|
||||
@@ -54,6 +55,7 @@ export function EndpointsDataTable<TData extends { id: number | string }, TValue
|
||||
onAddNew,
|
||||
addButtonText = "Add",
|
||||
onSelectionChange,
|
||||
onBulkDelete,
|
||||
pagination: externalPagination,
|
||||
onPaginationChange,
|
||||
totalCount,
|
||||
@@ -135,7 +137,8 @@ export function EndpointsDataTable<TData extends { id: number | string }, TValue
|
||||
// Selection
|
||||
onSelectionChange={onSelectionChange}
|
||||
// Bulk operations
|
||||
showBulkDelete={false}
|
||||
onBulkDelete={onBulkDelete}
|
||||
bulkDeleteLabel={tActions("delete")}
|
||||
onAddNew={onAddNew}
|
||||
addButtonLabel={addButtonText}
|
||||
// Bulk add button
|
||||
|
||||
@@ -10,6 +10,7 @@ import { createEndpointColumns } from "./endpoints-columns"
|
||||
import { LoadingSpinner } from "@/components/loading-spinner"
|
||||
import { DataTableSkeleton } from "@/components/ui/data-table-skeleton"
|
||||
import { BulkAddUrlsDialog } from "@/components/common/bulk-add-urls-dialog"
|
||||
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
|
||||
import { getDateLocale } from "@/lib/date-utils"
|
||||
import type { TargetType } from "@/lib/url-validator"
|
||||
import {
|
||||
@@ -41,6 +42,8 @@ export function EndpointsDetailView({
|
||||
const [endpointToDelete, setEndpointToDelete] = useState<Endpoint | null>(null)
|
||||
const [selectedEndpoints, setSelectedEndpoints] = useState<Endpoint[]>([])
|
||||
const [bulkAddDialogOpen, setBulkAddDialogOpen] = useState(false)
|
||||
const [bulkDeleteDialogOpen, setBulkDeleteDialogOpen] = useState(false)
|
||||
const [isDeleting, setIsDeleting] = useState(false)
|
||||
|
||||
// Pagination state management
|
||||
const [pagination, setPagination] = useState({
|
||||
@@ -280,6 +283,26 @@ export function EndpointsDetailView({
|
||||
URL.revokeObjectURL(url)
|
||||
}
|
||||
|
||||
// Handle bulk delete
|
||||
const handleBulkDelete = async () => {
|
||||
if (selectedEndpoints.length === 0) return
|
||||
|
||||
setIsDeleting(true)
|
||||
try {
|
||||
const ids = selectedEndpoints.map(e => e.id)
|
||||
const result = await EndpointService.bulkDelete(ids)
|
||||
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
|
||||
setSelectedEndpoints([])
|
||||
setBulkDeleteDialogOpen(false)
|
||||
refetch()
|
||||
} catch (error) {
|
||||
console.error("Failed to delete endpoints", error)
|
||||
toast.error(tToast("deleteFailed"))
|
||||
} finally {
|
||||
setIsDeleting(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Error state
|
||||
if (error) {
|
||||
return (
|
||||
@@ -327,6 +350,7 @@ export function EndpointsDetailView({
|
||||
onSelectionChange={handleSelectionChange}
|
||||
onDownloadAll={handleDownloadAll}
|
||||
onDownloadSelected={handleDownloadSelected}
|
||||
onBulkDelete={targetId ? () => setBulkDeleteDialogOpen(true) : undefined}
|
||||
onBulkAdd={targetId ? () => setBulkAddDialogOpen(true) : undefined}
|
||||
/>
|
||||
|
||||
@@ -343,7 +367,18 @@ export function EndpointsDetailView({
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Delete confirmation dialog */}
|
||||
{/* Bulk delete confirmation dialog */}
|
||||
<ConfirmDialog
|
||||
open={bulkDeleteDialogOpen}
|
||||
onOpenChange={setBulkDeleteDialogOpen}
|
||||
title={tConfirm("deleteTitle")}
|
||||
description={tCommon("actions.deleteConfirmMessage", { count: selectedEndpoints.length })}
|
||||
onConfirm={handleBulkDelete}
|
||||
loading={isDeleting}
|
||||
variant="destructive"
|
||||
/>
|
||||
|
||||
{/* Single delete confirmation dialog */}
|
||||
<AlertDialog open={deleteDialogOpen} onOpenChange={setDeleteDialogOpen}>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader>
|
||||
|
||||
@@ -9,6 +9,7 @@ import { ExpandableCell, ExpandableMonoCell } from "@/components/ui/data-table/e
|
||||
import { ChevronDown, ChevronUp } from "lucide-react"
|
||||
import { useTranslations } from "next-intl"
|
||||
import type { FingerPrintHubFingerprint } from "@/types/fingerprint.types"
|
||||
import { getSeverityStyle } from "@/lib/severity-config"
|
||||
|
||||
interface ColumnOptions {
|
||||
formatDate: (date: string) => string
|
||||
@@ -18,15 +19,7 @@ interface ColumnOptions {
|
||||
* Severity badge with color coding (matching Vulnerabilities style)
|
||||
*/
|
||||
function SeverityBadge({ severity }: { severity: string }) {
|
||||
const severityConfig: Record<string, { className: string }> = {
|
||||
critical: { className: "bg-[#da3633]/10 text-[#da3633] border border-[#da3633]/20 dark:text-[#f85149]" },
|
||||
high: { className: "bg-[#d29922]/10 text-[#d29922] border border-[#d29922]/20" },
|
||||
medium: { className: "bg-[#d4a72c]/10 text-[#d4a72c] border border-[#d4a72c]/20" },
|
||||
low: { className: "bg-[#238636]/10 text-[#238636] border border-[#238636]/20 dark:text-[#3fb950]" },
|
||||
info: { className: "bg-[#848d97]/10 text-[#848d97] border border-[#848d97]/20" },
|
||||
}
|
||||
|
||||
const config = severityConfig[severity?.toLowerCase()] || severityConfig.info
|
||||
const config = getSeverityStyle(severity)
|
||||
|
||||
return (
|
||||
<Badge className={config.className}>
|
||||
|
||||
@@ -238,15 +238,39 @@ export function ImportFingerprintDialog({
|
||||
// Frontend basic validation for JSON files
|
||||
try {
|
||||
const text = await file.text()
|
||||
const json = JSON.parse(text)
|
||||
let json: any
|
||||
|
||||
// Try standard JSON first
|
||||
try {
|
||||
json = JSON.parse(text)
|
||||
} catch {
|
||||
// If standard JSON fails, try JSONL format (for goby)
|
||||
if (fingerprintType === "goby") {
|
||||
const lines = text.trim().split('\n').filter(line => line.trim())
|
||||
if (lines.length === 0) {
|
||||
toast.error(t("import.emptyData"))
|
||||
return
|
||||
}
|
||||
// Parse each line as JSON
|
||||
json = lines.map((line, index) => {
|
||||
try {
|
||||
return JSON.parse(line)
|
||||
} catch {
|
||||
throw new Error(`Line ${index + 1}: Invalid JSON`)
|
||||
}
|
||||
})
|
||||
} else {
|
||||
throw new Error("Invalid JSON")
|
||||
}
|
||||
}
|
||||
|
||||
const validation = config.validate(json)
|
||||
if (!validation.valid) {
|
||||
toast.error(validation.error)
|
||||
return
|
||||
}
|
||||
} catch (e) {
|
||||
toast.error(tToast("invalidJsonFile"))
|
||||
} catch (e: any) {
|
||||
toast.error(e.message || tToast("invalidJsonFile"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,41 +91,8 @@ export function createIPAddressColumns({
|
||||
),
|
||||
cell: ({ getValue }) => {
|
||||
const hosts = getValue<string[]>()
|
||||
if (!hosts || hosts.length === 0) {
|
||||
return <span className="text-muted-foreground">-</span>
|
||||
}
|
||||
|
||||
const displayHosts = hosts.slice(0, 3)
|
||||
const hasMore = hosts.length > 3
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-1">
|
||||
{displayHosts.map((host, index) => (
|
||||
<ExpandableCell key={index} value={host} maxLines={1} />
|
||||
))}
|
||||
{hasMore && (
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<Badge variant="secondary" className="text-xs w-fit cursor-pointer hover:bg-muted">
|
||||
+{hosts.length - 3} more
|
||||
</Badge>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="w-80 p-3">
|
||||
<div className="space-y-2">
|
||||
<h4 className="font-medium text-sm">{t.tooltips.allHosts} ({hosts.length})</h4>
|
||||
<div className="flex flex-col gap-1 max-h-48 overflow-y-auto">
|
||||
{hosts.map((host, index) => (
|
||||
<span key={index} className="text-sm break-all">
|
||||
{host}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
const value = hosts?.length ? hosts.join("\n") : null
|
||||
return <ExpandableCell value={value} maxLines={3} />
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -18,7 +18,7 @@ const IP_ADDRESS_FILTER_FIELDS: FilterField[] = [
|
||||
|
||||
// IP address page filter examples
|
||||
const IP_ADDRESS_FILTER_EXAMPLES = [
|
||||
'ip="192.168.1.*" && port="80"',
|
||||
'ip="192.168.1." && port="80"',
|
||||
'port="443" || port="8443"',
|
||||
'host="api.example.com" && port!="22"',
|
||||
]
|
||||
@@ -54,6 +54,7 @@ export function IPAddressesDataTable({
|
||||
}: IPAddressesDataTableProps) {
|
||||
const t = useTranslations("common.status")
|
||||
const tDownload = useTranslations("common.download")
|
||||
const tActions = useTranslations("common.actions")
|
||||
|
||||
// Smart search handler
|
||||
const handleSmartSearch = (rawQuery: string) => {
|
||||
@@ -98,7 +99,7 @@ export function IPAddressesDataTable({
|
||||
onSelectionChange={onSelectionChange}
|
||||
// Bulk operations
|
||||
onBulkDelete={onBulkDelete}
|
||||
bulkDeleteLabel="Delete"
|
||||
bulkDeleteLabel={tActions("delete")}
|
||||
showAddButton={false}
|
||||
// Download
|
||||
downloadOptions={downloadOptions.length > 0 ? downloadOptions : undefined}
|
||||
|
||||
@@ -8,6 +8,7 @@ import { createIPAddressColumns } from "./ip-addresses-columns"
|
||||
import { DataTableSkeleton } from "@/components/ui/data-table-skeleton"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { useTargetIPAddresses, useScanIPAddresses } from "@/hooks/use-ip-addresses"
|
||||
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
|
||||
import { getDateLocale } from "@/lib/date-utils"
|
||||
import type { IPAddress } from "@/types/ip-address.types"
|
||||
import { IPAddressService } from "@/services/ip-address.service"
|
||||
@@ -26,6 +27,8 @@ export function IPAddressesView({
|
||||
})
|
||||
const [selectedIPAddresses, setSelectedIPAddresses] = useState<IPAddress[]>([])
|
||||
const [filterQuery, setFilterQuery] = useState("")
|
||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false)
|
||||
const [isDeleting, setIsDeleting] = useState(false)
|
||||
|
||||
// Internationalization
|
||||
const tColumns = useTranslations("columns")
|
||||
@@ -197,22 +200,63 @@ export function IPAddressesView({
|
||||
}
|
||||
|
||||
// Handle download selected IP addresses
|
||||
const handleDownloadSelected = () => {
|
||||
const handleDownloadSelected = async () => {
|
||||
if (selectedIPAddresses.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const csvContent = generateCSV(selectedIPAddresses)
|
||||
const blob = new Blob([csvContent], { type: "text/csv;charset=utf-8" })
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = document.createElement("a")
|
||||
const prefix = scanId ? `scan-${scanId}` : targetId ? `target-${targetId}` : "ip-addresses"
|
||||
a.href = url
|
||||
a.download = `${prefix}-ip-addresses-selected-${Date.now()}.csv`
|
||||
document.body.appendChild(a)
|
||||
a.click()
|
||||
document.body.removeChild(a)
|
||||
URL.revokeObjectURL(url)
|
||||
try {
|
||||
// Get selected IPs and call backend export API
|
||||
const ips = selectedIPAddresses.map(ip => ip.ip)
|
||||
let blob: Blob | null = null
|
||||
|
||||
if (targetId) {
|
||||
blob = await IPAddressService.exportIPAddressesByTargetId(targetId, ips)
|
||||
} else if (scanId) {
|
||||
// For scan, use frontend CSV generation as fallback (scan export doesn't support IP filter yet)
|
||||
const csvContent = generateCSV(selectedIPAddresses)
|
||||
blob = new Blob([csvContent], { type: "text/csv;charset=utf-8" })
|
||||
} else {
|
||||
const csvContent = generateCSV(selectedIPAddresses)
|
||||
blob = new Blob([csvContent], { type: "text/csv;charset=utf-8" })
|
||||
}
|
||||
|
||||
if (!blob) return
|
||||
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = document.createElement("a")
|
||||
const prefix = scanId ? `scan-${scanId}` : targetId ? `target-${targetId}` : "ip-addresses"
|
||||
a.href = url
|
||||
a.download = `${prefix}-ip-addresses-selected-${Date.now()}.csv`
|
||||
document.body.appendChild(a)
|
||||
a.click()
|
||||
document.body.removeChild(a)
|
||||
URL.revokeObjectURL(url)
|
||||
} catch (error) {
|
||||
console.error("Failed to download selected IP addresses", error)
|
||||
toast.error(tToast("downloadFailed"))
|
||||
}
|
||||
}
|
||||
|
||||
// Handle bulk delete
|
||||
const handleBulkDelete = async () => {
|
||||
if (selectedIPAddresses.length === 0) return
|
||||
|
||||
setIsDeleting(true)
|
||||
try {
|
||||
// IP addresses are aggregated, pass IP strings instead of IDs
|
||||
const ips = selectedIPAddresses.map(ip => ip.ip)
|
||||
const result = await IPAddressService.bulkDelete(ips)
|
||||
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
|
||||
setSelectedIPAddresses([])
|
||||
setDeleteDialogOpen(false)
|
||||
refetch()
|
||||
} catch (error) {
|
||||
console.error("Failed to delete IP addresses", error)
|
||||
toast.error(tToast("deleteFailed"))
|
||||
} finally {
|
||||
setIsDeleting(false)
|
||||
}
|
||||
}
|
||||
|
||||
if (error) {
|
||||
@@ -253,6 +297,18 @@ export function IPAddressesView({
|
||||
onSelectionChange={handleSelectionChange}
|
||||
onDownloadAll={handleDownloadAll}
|
||||
onDownloadSelected={handleDownloadSelected}
|
||||
onBulkDelete={targetId ? () => setDeleteDialogOpen(true) : undefined}
|
||||
/>
|
||||
|
||||
{/* Delete confirmation dialog */}
|
||||
<ConfirmDialog
|
||||
open={deleteDialogOpen}
|
||||
onOpenChange={setDeleteDialogOpen}
|
||||
title={tCommon("actions.confirmDelete")}
|
||||
description={tCommon("actions.deleteConfirmMessage", { count: selectedIPAddresses.length })}
|
||||
onConfirm={handleBulkDelete}
|
||||
loading={isDeleting}
|
||||
variant="destructive"
|
||||
/>
|
||||
</>
|
||||
)
|
||||
|
||||
@@ -18,6 +18,7 @@ import { cn } from "@/lib/utils"
|
||||
import { transformBackendNotification, useNotificationSSE } from "@/hooks/use-notification-sse"
|
||||
import { useMarkAllAsRead, useNotifications } from "@/hooks/use-notifications"
|
||||
import type { Notification, NotificationType, NotificationSeverity } from "@/types/notification.types"
|
||||
import { SEVERITY_CARD_STYLES, SEVERITY_ICON_BG } from "@/lib/severity-config"
|
||||
|
||||
/**
|
||||
* Notification drawer component
|
||||
@@ -71,12 +72,52 @@ function getTimeGroup(dateStr?: string): 'today' | 'yesterday' | 'earlier' {
|
||||
const now = new Date()
|
||||
const today = new Date(now.getFullYear(), now.getMonth(), now.getDate())
|
||||
const yesterday = new Date(today.getTime() - 24 * 60 * 60 * 1000)
|
||||
|
||||
|
||||
if (date >= today) return 'today'
|
||||
if (date >= yesterday) return 'yesterday'
|
||||
return 'earlier'
|
||||
}
|
||||
|
||||
/** Severity icon class mapping */
|
||||
const SEVERITY_ICON_CLASS_MAP: Record<NotificationSeverity, string> = {
|
||||
critical: "text-[#da3633] dark:text-[#f85149]",
|
||||
high: "text-[#d29922]",
|
||||
medium: "text-[#d4a72c]",
|
||||
low: "text-[#848d97]",
|
||||
}
|
||||
|
||||
/** Severity card class mapping */
|
||||
const SEVERITY_CARD_CLASS_MAP: Record<NotificationSeverity, string> = {
|
||||
critical: SEVERITY_CARD_STYLES.critical,
|
||||
high: SEVERITY_CARD_STYLES.high,
|
||||
medium: SEVERITY_CARD_STYLES.medium,
|
||||
low: SEVERITY_CARD_STYLES.low,
|
||||
}
|
||||
|
||||
/** Get notification icon based on type and severity */
|
||||
function getNotificationIcon(type: NotificationType, severity?: NotificationSeverity) {
|
||||
const severityClass = severity ? SEVERITY_ICON_CLASS_MAP[severity] : "text-gray-500"
|
||||
|
||||
if (type === "vulnerability") {
|
||||
return <AlertTriangle className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
if (type === "scan") {
|
||||
return <Activity className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
if (type === "asset") {
|
||||
return <Server className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
return <Info className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
|
||||
/** Get notification card classes based on severity */
|
||||
function getNotificationCardClasses(severity?: NotificationSeverity) {
|
||||
if (!severity) {
|
||||
return "border-border bg-card hover:bg-accent/50"
|
||||
}
|
||||
return cn("border-border", SEVERITY_CARD_CLASS_MAP[severity] ?? "")
|
||||
}
|
||||
|
||||
export function NotificationDrawer() {
|
||||
const t = useTranslations("notificationDrawer")
|
||||
const [open, setOpen] = React.useState(false)
|
||||
@@ -94,20 +135,20 @@ export function NotificationDrawer() {
|
||||
{ value: 'system', label: t("filters.system"), icon: <Info className="h-3 w-3" /> },
|
||||
]
|
||||
|
||||
// Category title mapping
|
||||
const categoryTitleMap: Record<NotificationType, string> = {
|
||||
// Category title mapping (memoized to avoid recreation)
|
||||
const categoryTitleMap = React.useMemo<Record<NotificationType, string>>(() => ({
|
||||
scan: t("categories.scan"),
|
||||
vulnerability: t("categories.vulnerability"),
|
||||
asset: t("categories.asset"),
|
||||
system: t("categories.system"),
|
||||
}
|
||||
}), [t])
|
||||
|
||||
// Time group labels
|
||||
const timeGroupLabels = {
|
||||
// Time group labels (memoized to avoid recreation)
|
||||
const timeGroupLabels = React.useMemo(() => ({
|
||||
today: t("timeGroups.today"),
|
||||
yesterday: t("timeGroups.yesterday"),
|
||||
earlier: t("timeGroups.earlier"),
|
||||
}
|
||||
}), [t])
|
||||
|
||||
// SSE real-time notifications
|
||||
const { notifications: sseNotifications, isConnected, markNotificationsAsRead } = useNotificationSSE()
|
||||
@@ -139,7 +180,7 @@ export function NotificationDrawer() {
|
||||
}
|
||||
}
|
||||
|
||||
return merged.sort((a, b) => {
|
||||
return merged.toSorted((a, b) => {
|
||||
const aTime = a.createdAt ? new Date(a.createdAt).getTime() : 0
|
||||
const bTime = b.createdAt ? new Date(b.createdAt).getTime() : 0
|
||||
return bTime - aTime
|
||||
@@ -175,43 +216,6 @@ export function NotificationDrawer() {
|
||||
return allNotifications.filter(n => n.type === activeFilter)
|
||||
}, [allNotifications, activeFilter])
|
||||
|
||||
// Get notification icon
|
||||
const severityIconClassMap: Record<NotificationSeverity, string> = {
|
||||
critical: "text-[#da3633] dark:text-[#f85149]",
|
||||
high: "text-[#d29922]",
|
||||
medium: "text-[#d4a72c]",
|
||||
low: "text-[#848d97]",
|
||||
}
|
||||
|
||||
const getNotificationIcon = (type: NotificationType, severity?: NotificationSeverity) => {
|
||||
const severityClass = severity ? severityIconClassMap[severity] : "text-gray-500"
|
||||
|
||||
if (type === "vulnerability") {
|
||||
return <AlertTriangle className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
if (type === "scan") {
|
||||
return <Activity className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
if (type === "asset") {
|
||||
return <Server className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
return <Info className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
|
||||
const severityCardClassMap: Record<NotificationSeverity, string> = {
|
||||
critical: "border-[#da3633]/30 bg-[#da3633]/5 hover:bg-[#da3633]/10 dark:border-[#f85149]/30 dark:bg-[#f85149]/5 dark:hover:bg-[#f85149]/10",
|
||||
high: "border-[#d29922]/30 bg-[#d29922]/5 hover:bg-[#d29922]/10 dark:border-[#d29922]/30 dark:bg-[#d29922]/5 dark:hover:bg-[#d29922]/10",
|
||||
medium: "border-[#d4a72c]/30 bg-[#d4a72c]/5 hover:bg-[#d4a72c]/10 dark:border-[#d4a72c]/30 dark:bg-[#d4a72c]/5 dark:hover:bg-[#d4a72c]/10",
|
||||
low: "border-[#848d97]/30 bg-[#848d97]/5 hover:bg-[#848d97]/10 dark:border-[#848d97]/30 dark:bg-[#848d97]/5 dark:hover:bg-[#848d97]/10",
|
||||
}
|
||||
|
||||
const getNotificationCardClasses = (severity?: NotificationSeverity) => {
|
||||
if (!severity) {
|
||||
return "border-border bg-card hover:bg-accent/50"
|
||||
}
|
||||
return cn("border-border", severityCardClassMap[severity] ?? "")
|
||||
}
|
||||
|
||||
const handleMarkAll = React.useCallback(() => {
|
||||
if (allNotifications.length === 0 || isMarkingAll) return
|
||||
markAllAsRead(undefined, {
|
||||
@@ -240,8 +244,8 @@ export function NotificationDrawer() {
|
||||
return groups
|
||||
}, [filteredNotifications])
|
||||
|
||||
// Render single notification card
|
||||
const renderNotificationCard = (notification: Notification) => (
|
||||
// Render single notification card (memoized to avoid recreation)
|
||||
const renderNotificationCard = React.useCallback((notification: Notification) => (
|
||||
<div
|
||||
key={notification.id}
|
||||
className={cn(
|
||||
@@ -256,10 +260,10 @@ export function NotificationDrawer() {
|
||||
<div className="flex items-start gap-3">
|
||||
<div className={cn(
|
||||
"mt-0.5 p-1.5 rounded-full shrink-0",
|
||||
notification.severity === 'critical' && "bg-[#da3633]/10 dark:bg-[#f85149]/10",
|
||||
notification.severity === 'high' && "bg-[#d29922]/10",
|
||||
notification.severity === 'medium' && "bg-[#d4a72c]/10",
|
||||
(!notification.severity || notification.severity === 'low') && "bg-muted"
|
||||
notification.severity === 'critical' && SEVERITY_ICON_BG.critical,
|
||||
notification.severity === 'high' && SEVERITY_ICON_BG.high,
|
||||
notification.severity === 'medium' && SEVERITY_ICON_BG.medium,
|
||||
(!notification.severity || notification.severity === 'low') && SEVERITY_ICON_BG.info
|
||||
)}>
|
||||
{getNotificationIcon(notification.type, notification.severity)}
|
||||
</div>
|
||||
@@ -284,12 +288,12 @@ export function NotificationDrawer() {
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
), [categoryTitleMap])
|
||||
|
||||
// Render notification list (with time grouping)
|
||||
const renderNotificationList = () => {
|
||||
// Render notification list (with time grouping, memoized to avoid recreation)
|
||||
const renderNotificationList = React.useCallback(() => {
|
||||
const hasAny = filteredNotifications.length > 0
|
||||
|
||||
|
||||
if (!hasAny) {
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center h-40 text-muted-foreground">
|
||||
@@ -304,7 +308,7 @@ export function NotificationDrawer() {
|
||||
{(['today', 'yesterday', 'earlier'] as const).map(group => {
|
||||
const items = groupedNotifications[group]
|
||||
if (items.length === 0) return null
|
||||
|
||||
|
||||
return (
|
||||
<div key={group}>
|
||||
<h3 className="sticky top-0 z-10 text-xs font-medium text-muted-foreground mb-2 px-1 py-1 backdrop-blur bg-background/90">
|
||||
@@ -318,7 +322,7 @@ export function NotificationDrawer() {
|
||||
})}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
}, [filteredNotifications, groupedNotifications, timeGroupLabels, renderNotificationCard, t])
|
||||
|
||||
return (
|
||||
<Sheet open={open} onOpenChange={setOpen}>
|
||||
|
||||
@@ -32,7 +32,7 @@ import {
|
||||
} from "@/components/ui/form"
|
||||
|
||||
import { useCreateOrganization } from "@/hooks/use-organizations"
|
||||
import { useBatchCreateTargets } from "@/hooks/use-targets"
|
||||
import { batchCreateTargets } from "@/services/target.service"
|
||||
|
||||
import type { Organization } from "@/types/organization.types"
|
||||
|
||||
@@ -68,7 +68,7 @@ export function AddOrganizationDialog({
|
||||
const textareaRef = useRef<HTMLTextAreaElement | null>(null)
|
||||
|
||||
const createOrganization = useCreateOrganization()
|
||||
const batchCreateTargets = useBatchCreateTargets()
|
||||
const [isCreatingTargets, setIsCreatingTargets] = useState(false)
|
||||
|
||||
const form = useForm<FormValues>({
|
||||
resolver: zodResolver(formSchema),
|
||||
@@ -105,7 +105,7 @@ export function AddOrganizationDialog({
|
||||
}
|
||||
}
|
||||
|
||||
const onSubmit = (values: FormValues) => {
|
||||
const onSubmit = async (values: FormValues) => {
|
||||
if (targetValidation.invalid.length > 0) return
|
||||
|
||||
createOrganization.mutate(
|
||||
@@ -114,7 +114,7 @@ export function AddOrganizationDialog({
|
||||
description: values.description?.trim() || "",
|
||||
},
|
||||
{
|
||||
onSuccess: (newOrganization) => {
|
||||
onSuccess: async (newOrganization) => {
|
||||
if (values.targets && values.targets.trim()) {
|
||||
const targetList = values.targets
|
||||
.split("\n")
|
||||
@@ -123,40 +123,32 @@ export function AddOrganizationDialog({
|
||||
.map(name => ({ name }))
|
||||
|
||||
if (targetList.length > 0) {
|
||||
batchCreateTargets.mutate(
|
||||
{ targets: targetList, organizationId: newOrganization.id },
|
||||
{
|
||||
onSuccess: () => {
|
||||
form.reset()
|
||||
setOpen(false)
|
||||
if (onAdd) onAdd(newOrganization)
|
||||
}
|
||||
}
|
||||
)
|
||||
} else {
|
||||
form.reset()
|
||||
setOpen(false)
|
||||
if (onAdd) onAdd(newOrganization)
|
||||
setIsCreatingTargets(true)
|
||||
try {
|
||||
// Call service directly to avoid double toast
|
||||
await batchCreateTargets({ targets: targetList, organizationId: newOrganization.id })
|
||||
} finally {
|
||||
setIsCreatingTargets(false)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
form.reset()
|
||||
setOpen(false)
|
||||
if (onAdd) onAdd(newOrganization)
|
||||
}
|
||||
form.reset()
|
||||
setOpen(false)
|
||||
if (onAdd) onAdd(newOrganization)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
const handleOpenChange = (newOpen: boolean) => {
|
||||
if (!createOrganization.isPending && !batchCreateTargets.isPending) {
|
||||
if (!createOrganization.isPending && !isCreatingTargets) {
|
||||
setOpen(newOpen)
|
||||
if (!newOpen) form.reset()
|
||||
}
|
||||
}
|
||||
|
||||
const isFormValid = form.formState.isValid && targetValidation.invalid.length === 0
|
||||
const isSubmitting = createOrganization.isPending || batchCreateTargets.isPending
|
||||
const isSubmitting = createOrganization.isPending || isCreatingTargets
|
||||
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={handleOpenChange}>
|
||||
|
||||
@@ -41,7 +41,7 @@ export interface OrganizationTranslations {
|
||||
selectRow: string
|
||||
}
|
||||
tooltips: {
|
||||
targetSummary: string
|
||||
organizationDetails: string
|
||||
initiateScan: string
|
||||
}
|
||||
}
|
||||
@@ -240,7 +240,7 @@ export const createOrganizationColumns = ({
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="top">
|
||||
<p className="text-xs">{t.tooltips.targetSummary}</p>
|
||||
<p className="text-xs">{t.tooltips.organizationDetails}</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
|
||||
@@ -82,12 +82,20 @@ export function OrganizationDetailView({
|
||||
const [searchQuery, setSearchQuery] = useState("")
|
||||
const [isSearching, setIsSearching] = useState(false)
|
||||
|
||||
// Type filter state
|
||||
const [typeFilter, setTypeFilter] = useState<string>("")
|
||||
|
||||
const handleSearchChange = (value: string) => {
|
||||
setIsSearching(true)
|
||||
setSearchQuery(value)
|
||||
setPagination((prev) => ({ ...prev, pageIndex: 0 }))
|
||||
}
|
||||
|
||||
const handleTypeFilterChange = (value: string) => {
|
||||
setTypeFilter(value)
|
||||
setPagination((prev) => ({ ...prev, pageIndex: 0 }))
|
||||
}
|
||||
|
||||
// Use unlink targets mutation
|
||||
const unlinkTargets = useUnlinkTargetsFromOrganization()
|
||||
|
||||
@@ -111,6 +119,7 @@ export function OrganizationDetailView({
|
||||
page: pagination.pageIndex + 1,
|
||||
pageSize: pagination.pageSize,
|
||||
search: searchQuery || undefined,
|
||||
type: typeFilter || undefined,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -306,7 +315,6 @@ export function OrganizationDetailView({
|
||||
searchValue={searchQuery}
|
||||
onSearch={handleSearchChange}
|
||||
isSearching={isSearching}
|
||||
addButtonText={tCommon("actions.add")}
|
||||
pagination={pagination}
|
||||
setPagination={setPagination}
|
||||
paginationInfo={targetsData ? {
|
||||
@@ -316,6 +324,8 @@ export function OrganizationDetailView({
|
||||
totalPages: targetsData.totalPages,
|
||||
} : undefined}
|
||||
onPaginationChange={handlePaginationChange}
|
||||
typeFilter={typeFilter}
|
||||
onTypeFilterChange={handleTypeFilterChange}
|
||||
/>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ export function OrganizationList() {
|
||||
const tCommon = useTranslations("common")
|
||||
const tTooltips = useTranslations("tooltips")
|
||||
const tConfirm = useTranslations("common.confirm")
|
||||
const tOrg = useTranslations("organization")
|
||||
const locale = useLocale()
|
||||
|
||||
// 构建翻译对象
|
||||
@@ -77,7 +78,7 @@ export function OrganizationList() {
|
||||
selectRow: tCommon("actions.selectRow"),
|
||||
},
|
||||
tooltips: {
|
||||
targetSummary: tTooltips("targetSummary"),
|
||||
organizationDetails: tTooltips("organizationDetails"),
|
||||
initiateScan: tTooltips("initiateScan"),
|
||||
},
|
||||
}), [tColumns, tCommon, tTooltips])
|
||||
@@ -120,7 +121,7 @@ export function OrganizationList() {
|
||||
} = useOrganizations({
|
||||
page: pagination.pageIndex + 1, // 转换为 1-based
|
||||
pageSize: pagination.pageSize,
|
||||
search: searchQuery || undefined,
|
||||
filter: searchQuery || undefined,
|
||||
}, { enabled: true })
|
||||
|
||||
useEffect(() => {
|
||||
@@ -272,7 +273,7 @@ export function OrganizationList() {
|
||||
onAddNew={() => setAddDialogOpen(true)}
|
||||
onBulkDelete={handleBulkDelete}
|
||||
onSelectionChange={setSelectedOrganizations}
|
||||
searchPlaceholder={tColumns("organization.organization")}
|
||||
searchPlaceholder={tOrg("name")}
|
||||
searchColumn="name"
|
||||
searchValue={searchQuery}
|
||||
onSearch={handleSearchChange}
|
||||
|
||||
@@ -132,7 +132,7 @@ function TargetNameCell({
|
||||
return (
|
||||
<div className="group flex items-start gap-1 flex-1 min-w-0">
|
||||
<button
|
||||
onClick={() => navigate(`/target/${targetId}/website/`)}
|
||||
onClick={() => navigate(`/target/${targetId}/overview/`)}
|
||||
className="text-sm font-medium hover:text-primary hover:underline underline-offset-2 transition-colors cursor-pointer text-left break-all leading-relaxed whitespace-normal"
|
||||
>
|
||||
{name}
|
||||
@@ -251,7 +251,7 @@ export const createTargetColumns = ({
|
||||
cell: ({ row }) => (
|
||||
<TargetRowActions
|
||||
target={row.original}
|
||||
onView={() => navigate(`/target/${row.original.id}/website/`)}
|
||||
onView={() => navigate(`/target/${row.original.id}/overview/`)}
|
||||
onDelete={() => handleDelete(row.original)}
|
||||
t={t}
|
||||
/>
|
||||
|
||||
@@ -2,10 +2,18 @@
|
||||
|
||||
import * as React from "react"
|
||||
import { IconSearch, IconLoader2 } from "@tabler/icons-react"
|
||||
import { Filter } from "lucide-react"
|
||||
import { useTranslations } from "next-intl"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { UnifiedDataTable } from "@/components/ui/data-table"
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/ui/select"
|
||||
import type { ColumnDef } from "@tanstack/react-table"
|
||||
import type { Target } from "@/types/target.types"
|
||||
import type { PaginationInfo } from "@/types/common.types"
|
||||
@@ -26,6 +34,8 @@ interface TargetsDataTableProps {
|
||||
setPagination?: React.Dispatch<React.SetStateAction<{ pageIndex: number; pageSize: number }>>
|
||||
paginationInfo?: PaginationInfo
|
||||
onPaginationChange?: (pagination: { pageIndex: number; pageSize: number }) => void
|
||||
typeFilter?: string
|
||||
onTypeFilterChange?: (value: string) => void
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -48,9 +58,13 @@ export function TargetsDataTable({
|
||||
setPagination: setExternalPagination,
|
||||
paginationInfo,
|
||||
onPaginationChange,
|
||||
typeFilter,
|
||||
onTypeFilterChange,
|
||||
}: TargetsDataTableProps) {
|
||||
const t = useTranslations("common.status")
|
||||
const tTarget = useTranslations("target")
|
||||
const tTooltips = useTranslations("tooltips")
|
||||
const tCommon = useTranslations("common")
|
||||
|
||||
// 本地搜索输入状态
|
||||
const [localSearchValue, setLocalSearchValue] = React.useState(searchValue || "")
|
||||
@@ -71,13 +85,6 @@ export function TargetsDataTable({
|
||||
}
|
||||
}
|
||||
|
||||
// 自定义添加按钮(支持 onAddHover)
|
||||
const addButton = onAddNew ? (
|
||||
<Button onClick={onAddNew} onMouseEnter={onAddHover} size="sm">
|
||||
{addButtonText || tTarget("createTarget")}
|
||||
</Button>
|
||||
) : undefined
|
||||
|
||||
return (
|
||||
<UnifiedDataTable
|
||||
data={data}
|
||||
@@ -91,8 +98,14 @@ export function TargetsDataTable({
|
||||
// 选择
|
||||
onSelectionChange={onSelectionChange}
|
||||
// 批量操作
|
||||
showBulkDelete={false}
|
||||
showAddButton={false}
|
||||
showBulkDelete={!!onBulkDelete}
|
||||
onBulkDelete={onBulkDelete}
|
||||
bulkDeleteLabel={tTooltips("unlinkTarget")}
|
||||
// 添加按钮(在解除关联按钮之后)
|
||||
showAddButton={!!onAddNew}
|
||||
onAddNew={onAddNew}
|
||||
onAddHover={onAddHover}
|
||||
addButtonLabel={addButtonText || tTarget("addTarget")}
|
||||
// 空状态
|
||||
emptyMessage={t("noData")}
|
||||
// 自定义工具栏
|
||||
@@ -112,9 +125,22 @@ export function TargetsDataTable({
|
||||
<IconSearch className="h-4 w-4" />
|
||||
)}
|
||||
</Button>
|
||||
{onTypeFilterChange && (
|
||||
<Select value={typeFilter || "all"} onValueChange={(value) => onTypeFilterChange(value === "all" ? "" : value)}>
|
||||
<SelectTrigger size="sm" className="w-auto">
|
||||
<Filter className="h-4 w-4" />
|
||||
<SelectValue placeholder={tCommon("actions.filter")} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="all">{tCommon("actions.all")}</SelectItem>
|
||||
<SelectItem value="domain">{tTarget("types.domain")}</SelectItem>
|
||||
<SelectItem value="ip">{tTarget("types.ip")}</SelectItem>
|
||||
<SelectItem value="cidr">{tTarget("types.cidr")}</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
)}
|
||||
</div>
|
||||
}
|
||||
toolbarRight={addButton}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -248,7 +248,7 @@ export function OrganizationTargetsDetailView({
|
||||
onBulkDelete={handleBulkDelete}
|
||||
onSelectionChange={setSelectedTargets}
|
||||
searchPlaceholder={tColumns("target.target")}
|
||||
addButtonText={tCommon("actions.add")}
|
||||
addButtonText={tTarget("addTarget")}
|
||||
pagination={pagination}
|
||||
setPagination={setPagination}
|
||||
paginationInfo={targetsData ? {
|
||||
|
||||
@@ -54,7 +54,7 @@ export function EnginePresetSelector({
|
||||
|
||||
engines.forEach(e => {
|
||||
const caps = parseEngineCapabilities(e.configuration || "")
|
||||
const hasRecon = caps.includes("subdomain_discovery") || caps.includes("port_scan") || caps.includes("site_scan") || caps.includes("directory_scan") || caps.includes("url_fetch")
|
||||
const hasRecon = caps.includes("subdomain_discovery") || caps.includes("port_scan") || caps.includes("site_scan") || caps.includes("fingerprint_detect") || caps.includes("directory_scan") || caps.includes("url_fetch") || caps.includes("screenshot")
|
||||
const hasVuln = caps.includes("vuln_scan")
|
||||
|
||||
if (hasRecon && hasVuln) {
|
||||
|
||||
@@ -58,14 +58,6 @@ subdomain_discovery:
|
||||
enabled: true
|
||||
timeout: 600 # 10 minutes (required)
|
||||
|
||||
amass_passive:
|
||||
enabled: true
|
||||
timeout: 600 # 10 minutes (required)
|
||||
|
||||
amass_active:
|
||||
enabled: true
|
||||
timeout: 1800 # 30 minutes (required)
|
||||
|
||||
sublist3r:
|
||||
enabled: true
|
||||
timeout: 900 # 15 minutes (required)
|
||||
@@ -96,6 +88,7 @@ site_scan:
|
||||
httpx:
|
||||
enabled: true
|
||||
timeout: auto # Auto calculate
|
||||
# screenshot: true # Enable site screenshot (requires Chromium)
|
||||
|
||||
|
||||
# ==================== Directory Scan ====================
|
||||
@@ -212,16 +205,11 @@ url_fetch:
|
||||
await new Promise(resolve => setTimeout(resolve, 1000))
|
||||
}
|
||||
|
||||
toast.success(tToast("configSaveSuccess"), {
|
||||
description: tToast("configSaveSuccessDesc", { name: engine.name }),
|
||||
})
|
||||
setHasChanges(false)
|
||||
onOpenChange(false)
|
||||
} catch (error) {
|
||||
console.error("Failed to save YAML config:", error)
|
||||
toast.error(tToast("configSaveFailed"), {
|
||||
description: error instanceof Error ? error.message : tToast("unknownError"),
|
||||
})
|
||||
// Error toast is handled by useUpdateEngine hook
|
||||
} finally {
|
||||
setIsSubmitting(false)
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ export interface ScanHistoryTranslations {
|
||||
cancelled: string
|
||||
completed: string
|
||||
failed: string
|
||||
initiated: string
|
||||
pending: string
|
||||
running: string
|
||||
}
|
||||
summary: {
|
||||
@@ -109,7 +109,7 @@ function StatusBadge({
|
||||
variant: "outline",
|
||||
className: "bg-[#da3633]/10 text-[#da3633] border-[#da3633]/20 hover:bg-[#da3633]/20 dark:text-[#f85149] transition-colors",
|
||||
},
|
||||
initiated: {
|
||||
pending: {
|
||||
icon: IconClock,
|
||||
variant: "outline",
|
||||
className: "bg-[#d29922]/10 text-[#d29922] border-[#d29922]/20 hover:bg-[#d29922]/20 transition-colors",
|
||||
@@ -126,7 +126,7 @@ function StatusBadge({
|
||||
|
||||
const badge = (
|
||||
<Badge variant={variant} className={className}>
|
||||
{(status === "running" || status === "initiated") ? (
|
||||
{(status === "running" || status === "pending") ? (
|
||||
<span className="relative flex h-2 w-2">
|
||||
<span className="absolute inline-flex h-full w-full animate-ping rounded-full bg-current opacity-75" />
|
||||
<span className="relative inline-flex h-2 w-2 rounded-full bg-current" />
|
||||
@@ -204,7 +204,8 @@ export const createScanHistoryColumns = ({
|
||||
enableHiding: false,
|
||||
},
|
||||
{
|
||||
accessorKey: "targetName",
|
||||
accessorKey: "target",
|
||||
accessorFn: (row) => row.target?.name,
|
||||
size: 350,
|
||||
minSize: 100,
|
||||
meta: { title: t.columns.target },
|
||||
@@ -212,8 +213,8 @@ export const createScanHistoryColumns = ({
|
||||
<DataTableColumnHeader column={column} title={t.columns.target} />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const targetName = row.getValue("targetName") as string
|
||||
const targetId = row.original.target
|
||||
const targetName = row.original.target?.name
|
||||
const targetId = row.original.targetId
|
||||
|
||||
return (
|
||||
<div className="flex-1 min-w-0">
|
||||
@@ -239,7 +240,8 @@ export const createScanHistoryColumns = ({
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "summary",
|
||||
accessorKey: "cachedStats",
|
||||
accessorFn: (row) => row.cachedStats,
|
||||
meta: { title: t.columns.summary },
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title={t.columns.summary} />
|
||||
@@ -247,25 +249,11 @@ export const createScanHistoryColumns = ({
|
||||
size: 290,
|
||||
minSize: 150,
|
||||
cell: ({ row }) => {
|
||||
const summary = (row.getValue("summary") as {
|
||||
subdomains: number
|
||||
websites: number
|
||||
endpoints: number
|
||||
ips: number
|
||||
vulnerabilities: {
|
||||
total: number
|
||||
critical: number
|
||||
high: number
|
||||
medium: number
|
||||
low: number
|
||||
}
|
||||
}) || {}
|
||||
|
||||
const subdomains = summary?.subdomains ?? 0
|
||||
const websites = summary?.websites ?? 0
|
||||
const endpoints = summary?.endpoints ?? 0
|
||||
const ips = summary?.ips ?? 0
|
||||
const vulns = summary?.vulnerabilities?.total ?? 0
|
||||
const subdomains = row.original.cachedStats?.subdomainsCount ?? 0
|
||||
const websites = row.original.cachedStats?.websitesCount ?? 0
|
||||
const endpoints = row.original.cachedStats?.endpointsCount ?? 0
|
||||
const ips = row.original.cachedStats?.ipsCount ?? 0
|
||||
const vulns = row.original.cachedStats?.vulnsTotal ?? 0
|
||||
|
||||
const badges: React.ReactNode[] = []
|
||||
|
||||
@@ -368,7 +356,7 @@ export const createScanHistoryColumns = ({
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="top">
|
||||
<p className="text-xs font-medium">
|
||||
{summary?.vulnerabilities?.critical ?? 0} Critical, {summary?.vulnerabilities?.high ?? 0} High, {summary?.vulnerabilities?.medium ?? 0} Medium {t.summary.vulnerabilities}
|
||||
{row.original.cachedStats?.vulnsCritical ?? 0} Critical, {row.original.cachedStats?.vulnsHigh ?? 0} High, {row.original.cachedStats?.vulnsMedium ?? 0} Medium {t.summary.vulnerabilities}
|
||||
</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
@@ -502,7 +490,7 @@ export const createScanHistoryColumns = ({
|
||||
status === "failed" ? "bg-[#da3633]" :
|
||||
status === "running" ? "bg-[#d29922] progress-striped" :
|
||||
status === "cancelled" ? "bg-[#848d97]" :
|
||||
status === "initiated" ? "bg-[#d29922] progress-striped" :
|
||||
status === "pending" ? "bg-[#d29922] progress-striped" :
|
||||
"bg-muted-foreground/80"
|
||||
}`}
|
||||
style={{ width: `${displayProgress}%` }}
|
||||
@@ -524,7 +512,7 @@ export const createScanHistoryColumns = ({
|
||||
enableResizing: false,
|
||||
cell: ({ row }) => {
|
||||
const scan = row.original
|
||||
const canStop = scan.status === 'running' || scan.status === 'initiated'
|
||||
const canStop = scan.status === 'running' || scan.status === 'pending'
|
||||
|
||||
return (
|
||||
<div className="flex items-center gap-1">
|
||||
@@ -578,9 +566,9 @@ export const createScanHistoryColumns = ({
|
||||
},
|
||||
]
|
||||
|
||||
// Filter out targetName column if hideTargetColumn is true
|
||||
// Filter out target column if hideTargetColumn is true
|
||||
if (hideTargetColumn) {
|
||||
return columns.filter(col => (col as any).accessorKey !== 'targetName')
|
||||
return columns.filter(col => (col as any).accessorKey !== 'target')
|
||||
}
|
||||
|
||||
return columns
|
||||
|
||||
@@ -4,11 +4,19 @@ import * as React from "react"
|
||||
import type { ColumnDef } from "@tanstack/react-table"
|
||||
import { useTranslations } from "next-intl"
|
||||
import { IconSearch, IconLoader2 } from "@tabler/icons-react"
|
||||
import { Filter } from "lucide-react"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { UnifiedDataTable } from "@/components/ui/data-table"
|
||||
import type { ScanRecord } from "@/types/scan.types"
|
||||
import type { ScanRecord, ScanStatus } from "@/types/scan.types"
|
||||
import type { PaginationInfo } from "@/types/common.types"
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/ui/select"
|
||||
|
||||
interface ScanHistoryDataTableProps {
|
||||
data: ScanRecord[]
|
||||
@@ -28,6 +36,8 @@ interface ScanHistoryDataTableProps {
|
||||
hideToolbar?: boolean
|
||||
hidePagination?: boolean
|
||||
pageSizeOptions?: number[]
|
||||
statusFilter?: ScanStatus | "all"
|
||||
onStatusFilterChange?: (status: ScanStatus | "all") => void
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -52,9 +62,12 @@ export function ScanHistoryDataTable({
|
||||
hideToolbar = false,
|
||||
hidePagination = false,
|
||||
pageSizeOptions,
|
||||
statusFilter = "all",
|
||||
onStatusFilterChange,
|
||||
}: ScanHistoryDataTableProps) {
|
||||
const t = useTranslations("common.status")
|
||||
const tScan = useTranslations("scan.history")
|
||||
const tActions = useTranslations("common.actions")
|
||||
|
||||
// Search local state
|
||||
const [localSearchValue, setLocalSearchValue] = React.useState(searchValue || "")
|
||||
@@ -75,6 +88,16 @@ export function ScanHistoryDataTable({
|
||||
}
|
||||
}
|
||||
|
||||
// Status options
|
||||
const statusOptions: { value: ScanStatus | "all"; label: string }[] = [
|
||||
{ value: "all", label: tScan("allStatus") },
|
||||
{ value: "running", label: t("running") },
|
||||
{ value: "completed", label: t("completed") },
|
||||
{ value: "failed", label: t("failed") },
|
||||
{ value: "pending", label: t("pending") },
|
||||
{ value: "cancelled", label: t("cancelled") },
|
||||
]
|
||||
|
||||
return (
|
||||
<UnifiedDataTable
|
||||
data={data}
|
||||
@@ -91,16 +114,18 @@ export function ScanHistoryDataTable({
|
||||
onSelectionChange={onSelectionChange}
|
||||
// Bulk operations
|
||||
onBulkDelete={onBulkDelete}
|
||||
bulkDeleteLabel="Delete"
|
||||
bulkDeleteLabel={tActions("delete")}
|
||||
onAddNew={onAddNew}
|
||||
addButtonLabel={addButtonText || tScan("title")}
|
||||
// Toolbar
|
||||
hideToolbar={hideToolbar}
|
||||
// Empty state
|
||||
emptyMessage={t("noData")}
|
||||
// Custom search box
|
||||
// Auto column sizing
|
||||
enableAutoColumnSizing
|
||||
// Custom search box and status filter
|
||||
toolbarLeft={
|
||||
<div className="flex items-center space-x-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<Input
|
||||
placeholder={searchPlaceholder || tScan("searchPlaceholder")}
|
||||
value={localSearchValue}
|
||||
@@ -115,6 +140,24 @@ export function ScanHistoryDataTable({
|
||||
<IconSearch className="h-4 w-4" />
|
||||
)}
|
||||
</Button>
|
||||
{onStatusFilterChange && (
|
||||
<Select
|
||||
value={statusFilter}
|
||||
onValueChange={(value) => onStatusFilterChange(value as ScanStatus | "all")}
|
||||
>
|
||||
<SelectTrigger size="sm" className="w-auto">
|
||||
<Filter className="h-4 w-4" />
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{statusOptions.map((option) => (
|
||||
<SelectItem key={option.value} value={option.value}>
|
||||
{option.label}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
)}
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
|
||||
@@ -6,7 +6,7 @@ import { useTranslations, useLocale } from "next-intl"
|
||||
import { ScanHistoryDataTable } from "./scan-history-data-table"
|
||||
import { createScanHistoryColumns } from "./scan-history-columns"
|
||||
import { getDateLocale } from "@/lib/date-utils"
|
||||
import type { ScanRecord } from "@/types/scan.types"
|
||||
import type { ScanRecord, ScanStatus } from "@/types/scan.types"
|
||||
import type { ColumnDef } from "@tanstack/react-table"
|
||||
import { DataTableSkeleton } from "@/components/ui/data-table-skeleton"
|
||||
import {
|
||||
@@ -83,7 +83,7 @@ export function ScanHistoryList({ hideToolbar = false, targetId, pageSize: custo
|
||||
cancelled: tCommon("status.cancelled"),
|
||||
completed: tCommon("status.completed"),
|
||||
failed: tCommon("status.failed"),
|
||||
initiated: tCommon("status.pending"),
|
||||
pending: tCommon("status.pending"),
|
||||
running: tCommon("status.running"),
|
||||
},
|
||||
summary: {
|
||||
@@ -108,6 +108,9 @@ export function ScanHistoryList({ hideToolbar = false, targetId, pageSize: custo
|
||||
// Search state
|
||||
const [searchQuery, setSearchQuery] = useState("")
|
||||
const [isSearching, setIsSearching] = useState(false)
|
||||
|
||||
// Status filter state
|
||||
const [statusFilter, setStatusFilter] = useState<ScanStatus | "all">("all")
|
||||
|
||||
const handleSearchChange = (value: string) => {
|
||||
setIsSearching(true)
|
||||
@@ -115,12 +118,18 @@ export function ScanHistoryList({ hideToolbar = false, targetId, pageSize: custo
|
||||
setPagination((prev) => ({ ...prev, pageIndex: 0 }))
|
||||
}
|
||||
|
||||
const handleStatusFilterChange = (status: ScanStatus | "all") => {
|
||||
setStatusFilter(status)
|
||||
setPagination((prev) => ({ ...prev, pageIndex: 0 }))
|
||||
}
|
||||
|
||||
// Get scan list data
|
||||
const { data, isLoading, isFetching, error } = useScans({
|
||||
page: pagination.pageIndex + 1, // API page numbers start from 1
|
||||
pageSize: pagination.pageSize,
|
||||
search: searchQuery || undefined,
|
||||
target: targetId,
|
||||
status: statusFilter === "all" ? undefined : statusFilter,
|
||||
})
|
||||
|
||||
// Reset search state when request completes
|
||||
@@ -195,7 +204,7 @@ export function ScanHistoryList({ hideToolbar = false, targetId, pageSize: custo
|
||||
|
||||
try {
|
||||
await deleteMutation.mutateAsync(scanToDelete.id)
|
||||
toast.success(tToast("deletedScanRecord", { name: scanToDelete.targetName }))
|
||||
toast.success(tToast("deletedScanRecord", { name: scanToDelete.target?.name ?? "" }))
|
||||
} catch (error) {
|
||||
toast.error(tToast("deleteFailed"))
|
||||
console.error('Delete failed:', error)
|
||||
@@ -226,7 +235,7 @@ export function ScanHistoryList({ hideToolbar = false, targetId, pageSize: custo
|
||||
|
||||
try {
|
||||
await stopMutation.mutateAsync(scanToStop.id)
|
||||
toast.success(tToast("stoppedScan", { name: scanToStop.targetName }))
|
||||
toast.success(tToast("stoppedScan", { name: scanToStop.target?.name ?? "" }))
|
||||
} catch (error) {
|
||||
toast.error(tToast("stopFailed"))
|
||||
console.error('Stop scan failed:', error)
|
||||
@@ -339,6 +348,8 @@ export function ScanHistoryList({ hideToolbar = false, targetId, pageSize: custo
|
||||
hideToolbar={hideToolbar}
|
||||
pageSizeOptions={pageSizeOptions}
|
||||
hidePagination={hidePagination}
|
||||
statusFilter={statusFilter}
|
||||
onStatusFilterChange={handleStatusFilterChange}
|
||||
/>
|
||||
|
||||
{/* Delete confirmation dialog */}
|
||||
@@ -347,7 +358,7 @@ export function ScanHistoryList({ hideToolbar = false, targetId, pageSize: custo
|
||||
<AlertDialogHeader>
|
||||
<AlertDialogTitle>{tConfirm("deleteTitle")}</AlertDialogTitle>
|
||||
<AlertDialogDescription>
|
||||
{tConfirm("deleteScanMessage", { name: scanToDelete?.targetName ?? "" })}
|
||||
{tConfirm("deleteScanMessage", { name: scanToDelete?.target?.name ?? "" })}
|
||||
</AlertDialogDescription>
|
||||
</AlertDialogHeader>
|
||||
<AlertDialogFooter>
|
||||
@@ -376,7 +387,7 @@ export function ScanHistoryList({ hideToolbar = false, targetId, pageSize: custo
|
||||
<ul className="text-sm space-y-1">
|
||||
{selectedScans.map((scan) => (
|
||||
<li key={scan.id} className="flex items-center justify-between">
|
||||
<span className="font-medium">{scan.targetName}</span>
|
||||
<span className="font-medium">{scan.target?.name}</span>
|
||||
<span className="text-muted-foreground text-xs">{scan.engineNames?.join(", ") || "-"}</span>
|
||||
</li>
|
||||
))}
|
||||
@@ -400,7 +411,7 @@ export function ScanHistoryList({ hideToolbar = false, targetId, pageSize: custo
|
||||
<AlertDialogHeader>
|
||||
<AlertDialogTitle>{tConfirm("stopScanTitle")}</AlertDialogTitle>
|
||||
<AlertDialogDescription>
|
||||
{tConfirm("stopScanMessage", { name: scanToStop?.targetName ?? "" })}
|
||||
{tConfirm("stopScanMessage", { name: scanToStop?.target?.name ?? "" })}
|
||||
</AlertDialogDescription>
|
||||
</AlertDialogHeader>
|
||||
<AlertDialogFooter>
|
||||
|
||||
531
frontend/components/scan/history/scan-overview.tsx
Normal file
531
frontend/components/scan/history/scan-overview.tsx
Normal file
@@ -0,0 +1,531 @@
|
||||
"use client"
|
||||
|
||||
import React from "react"
|
||||
import Link from "next/link"
|
||||
import dynamic from "next/dynamic"
|
||||
import { useTranslations, useLocale } from "next-intl"
|
||||
import {
|
||||
Globe,
|
||||
Network,
|
||||
Server,
|
||||
Link2,
|
||||
FolderOpen,
|
||||
AlertTriangle,
|
||||
Clock,
|
||||
Calendar,
|
||||
ChevronRight,
|
||||
Target,
|
||||
CheckCircle2,
|
||||
XCircle,
|
||||
Loader2,
|
||||
Cpu,
|
||||
HardDrive,
|
||||
} from "lucide-react"
|
||||
import {
|
||||
IconCircleCheck,
|
||||
IconCircleX,
|
||||
IconClock,
|
||||
} from "@tabler/icons-react"
|
||||
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import { Switch } from "@/components/ui/switch"
|
||||
import { Label } from "@/components/ui/label"
|
||||
import { Separator } from "@/components/ui/separator"
|
||||
import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
import { useScan } from "@/hooks/use-scans"
|
||||
import { useScanLogs } from "@/hooks/use-scan-logs"
|
||||
import { ScanLogList } from "@/components/scan/scan-log-list"
|
||||
import { cn } from "@/lib/utils"
|
||||
import type { StageStatus } from "@/types/scan.types"
|
||||
|
||||
// Dynamic import for YamlEditor (only loaded when config tab is active)
|
||||
const YamlEditor = dynamic(() => import('@/components/ui/yaml-editor').then(m => ({ default: m.YamlEditor })), {
|
||||
loading: () => <div className="flex items-center justify-center h-full text-muted-foreground text-sm">加载编辑器中...</div>,
|
||||
ssr: false
|
||||
})
|
||||
|
||||
interface ScanOverviewProps {
|
||||
scanId: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan overview component
|
||||
* Displays statistics cards for the scan results
|
||||
*/
|
||||
// Pulsing dot animation
|
||||
function PulsingDot({ className }: { className?: string }) {
|
||||
return (
|
||||
<span className={cn("relative flex h-3 w-3", className)}>
|
||||
<span className="absolute inline-flex h-full w-full animate-ping rounded-full bg-current opacity-75" />
|
||||
<span className="relative inline-flex h-3 w-3 rounded-full bg-current" />
|
||||
</span>
|
||||
)
|
||||
}
|
||||
|
||||
// Stage status icon
|
||||
function StageStatusIcon({ status }: { status: StageStatus }) {
|
||||
switch (status) {
|
||||
case "completed":
|
||||
return <IconCircleCheck className="h-5 w-5 text-[#238636] dark:text-[#3fb950]" />
|
||||
case "running":
|
||||
return <PulsingDot className="text-[#d29922]" />
|
||||
case "failed":
|
||||
return <IconCircleX className="h-5 w-5 text-[#da3633] dark:text-[#f85149]" />
|
||||
case "cancelled":
|
||||
return <IconCircleX className="h-5 w-5 text-[#848d97]" />
|
||||
default:
|
||||
return <IconClock className="h-5 w-5 text-muted-foreground" />
|
||||
}
|
||||
}
|
||||
|
||||
// Format duration (seconds -> readable string)
|
||||
function formatStageDuration(seconds?: number): string | undefined {
|
||||
if (seconds === undefined || seconds === null) return undefined
|
||||
if (seconds < 1) return "<1s"
|
||||
if (seconds < 60) return `${Math.round(seconds)}s`
|
||||
const minutes = Math.floor(seconds / 60)
|
||||
const secs = Math.round(seconds % 60)
|
||||
return secs > 0 ? `${minutes}m ${secs}s` : `${minutes}m`
|
||||
}
|
||||
|
||||
// Status priority for sorting (lower = higher priority)
|
||||
const STAGE_STATUS_PRIORITY: Record<StageStatus, number> = {
|
||||
running: 0,
|
||||
pending: 1,
|
||||
completed: 2,
|
||||
failed: 3,
|
||||
cancelled: 4,
|
||||
}
|
||||
|
||||
// Status style configuration (consistent with scan-history-columns)
|
||||
const SCAN_STATUS_STYLES: Record<string, string> = {
|
||||
running: "bg-[#d29922]/10 text-[#d29922] border-[#d29922]/20",
|
||||
cancelled: "bg-[#848d97]/10 text-[#848d97] border-[#848d97]/20",
|
||||
completed: "bg-[#238636]/10 text-[#238636] border-[#238636]/20 dark:text-[#3fb950]",
|
||||
failed: "bg-[#da3633]/10 text-[#da3633] border-[#da3633]/20 dark:text-[#f85149]",
|
||||
pending: "bg-[#d29922]/10 text-[#d29922] border-[#d29922]/20",
|
||||
}
|
||||
|
||||
/**
|
||||
* Format date helper function
|
||||
*/
|
||||
function formatDate(dateString: string | undefined, locale: string): string {
|
||||
if (!dateString) return "-"
|
||||
const localeStr = locale === 'zh' ? 'zh-CN' : 'en-US'
|
||||
return new Date(dateString).toLocaleString(localeStr, {
|
||||
year: "numeric",
|
||||
month: "short",
|
||||
day: "numeric",
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate duration between two dates
|
||||
*/
|
||||
function formatDuration(startedAt: string | undefined, completedAt: string | undefined): string {
|
||||
if (!startedAt) return "-"
|
||||
const start = new Date(startedAt)
|
||||
const end = completedAt ? new Date(completedAt) : new Date()
|
||||
const diffMs = end.getTime() - start.getTime()
|
||||
const diffMins = Math.floor(diffMs / 60000)
|
||||
const diffHours = Math.floor(diffMins / 60)
|
||||
const remainingMins = diffMins % 60
|
||||
|
||||
if (diffHours > 0) {
|
||||
return `${diffHours}h ${remainingMins}m`
|
||||
}
|
||||
return `${diffMins}m`
|
||||
}
|
||||
|
||||
/**
|
||||
* Get status icon configuration
|
||||
*/
|
||||
function getStatusIcon(status: string) {
|
||||
switch (status) {
|
||||
case "completed":
|
||||
return { icon: CheckCircle2, animate: false }
|
||||
case "running":
|
||||
return { icon: Loader2, animate: true }
|
||||
case "failed":
|
||||
return { icon: XCircle, animate: false }
|
||||
case "cancelled":
|
||||
return { icon: XCircle, animate: false }
|
||||
case "pending":
|
||||
return { icon: Loader2, animate: true }
|
||||
default:
|
||||
return { icon: Clock, animate: false }
|
||||
}
|
||||
}
|
||||
|
||||
export function ScanOverview({ scanId }: ScanOverviewProps) {
|
||||
const t = useTranslations("scan.history.overview")
|
||||
const tStatus = useTranslations("scan.history.status")
|
||||
const tProgress = useTranslations("scan.progress")
|
||||
const locale = useLocale()
|
||||
|
||||
const { data: scan, isLoading, error } = useScan(scanId)
|
||||
|
||||
// Memoize isRunning to avoid unnecessary recalculations
|
||||
const isRunning = React.useMemo(
|
||||
() => scan?.status === 'running' || scan?.status === 'pending',
|
||||
[scan?.status]
|
||||
)
|
||||
|
||||
// Auto-refresh state (default: on when running)
|
||||
const [autoRefresh, setAutoRefresh] = React.useState(true)
|
||||
|
||||
// Tab state for logs/config
|
||||
const [activeTab, setActiveTab] = React.useState<'logs' | 'config'>('logs')
|
||||
|
||||
// Logs hook
|
||||
const { logs, loading: logsLoading } = useScanLogs({
|
||||
scanId,
|
||||
enabled: !!scan,
|
||||
pollingInterval: isRunning && autoRefresh ? 3000 : 0,
|
||||
})
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<div className="space-y-6">
|
||||
{/* Stats cards skeleton */}
|
||||
<div className="grid gap-4 md:grid-cols-2 lg:grid-cols-3">
|
||||
{[...Array(6)].map((_, i) => (
|
||||
<Card key={i}>
|
||||
<CardHeader className="flex flex-row items-center justify-between space-y-0 pb-2">
|
||||
<Skeleton className="h-4 w-24" />
|
||||
<Skeleton className="h-4 w-4" />
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<Skeleton className="h-8 w-16" />
|
||||
</CardContent>
|
||||
</Card>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (error || !scan) {
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center py-12">
|
||||
<AlertTriangle className="h-10 w-10 text-destructive mb-4" />
|
||||
<p className="text-muted-foreground">{t("loadError")}</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Memoize derived values to avoid unnecessary recalculations
|
||||
const summary = React.useMemo(() => {
|
||||
const scanAny = scan as any
|
||||
const stats = scan.cachedStats || scanAny.summary || {}
|
||||
return {
|
||||
subdomains: stats.subdomainsCount ?? stats.subdomains ?? 0,
|
||||
websites: stats.websitesCount ?? stats.websites ?? 0,
|
||||
endpoints: stats.endpointsCount ?? stats.endpoints ?? 0,
|
||||
ips: stats.ipsCount ?? stats.ips ?? 0,
|
||||
directories: stats.directoriesCount ?? stats.directories ?? 0,
|
||||
screenshots: stats.screenshotsCount ?? stats.screenshots ?? 0,
|
||||
}
|
||||
}, [scan])
|
||||
|
||||
const vulnSummary = React.useMemo(() => {
|
||||
const scanAny = scan as any
|
||||
const stats = scan.cachedStats || scanAny.summary || {}
|
||||
return stats.vulnerabilities || {
|
||||
total: stats.vulnsTotal ?? 0,
|
||||
critical: stats.vulnsCritical ?? 0,
|
||||
high: stats.vulnsHigh ?? 0,
|
||||
medium: stats.vulnsMedium ?? 0,
|
||||
low: stats.vulnsLow ?? 0,
|
||||
}
|
||||
}, [scan])
|
||||
|
||||
const statusIconConfig = React.useMemo(() => getStatusIcon(scan.status), [scan.status])
|
||||
const StatusIcon = statusIconConfig.icon
|
||||
const statusStyle = SCAN_STATUS_STYLES[scan.status] || "bg-muted text-muted-foreground"
|
||||
const targetId = scan.targetId
|
||||
const targetName = scan.target?.name
|
||||
const startedAt = React.useMemo(() => {
|
||||
const scanAny = scan as any
|
||||
return scanAny.startedAt || scan.createdAt
|
||||
}, [scan])
|
||||
const completedAt = React.useMemo(() => (scan as any).completedAt, [scan])
|
||||
|
||||
const assetCards = React.useMemo(
|
||||
() => [
|
||||
{
|
||||
title: t("cards.websites"),
|
||||
value: summary.websites || 0,
|
||||
icon: Globe,
|
||||
href: `/scan/history/${scanId}/websites/`,
|
||||
},
|
||||
{
|
||||
title: t("cards.subdomains"),
|
||||
value: summary.subdomains || 0,
|
||||
icon: Network,
|
||||
href: `/scan/history/${scanId}/subdomain/`,
|
||||
},
|
||||
{
|
||||
title: t("cards.ips"),
|
||||
value: summary.ips || 0,
|
||||
icon: Server,
|
||||
href: `/scan/history/${scanId}/ip-addresses/`,
|
||||
},
|
||||
{
|
||||
title: t("cards.urls"),
|
||||
value: summary.endpoints || 0,
|
||||
icon: Link2,
|
||||
href: `/scan/history/${scanId}/endpoints/`,
|
||||
},
|
||||
{
|
||||
title: t("cards.directories"),
|
||||
value: summary.directories || 0,
|
||||
icon: FolderOpen,
|
||||
href: `/scan/history/${scanId}/directories/`,
|
||||
},
|
||||
],
|
||||
[summary, scanId, t]
|
||||
)
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-6 flex-1 min-h-0">
|
||||
{/* Scan info + Status */}
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-6 text-sm text-muted-foreground">
|
||||
{/* Target */}
|
||||
{targetId && targetName && (
|
||||
<Link
|
||||
href={`/target/${targetId}/overview/`}
|
||||
className="flex items-center gap-1.5 hover:text-foreground transition-colors"
|
||||
>
|
||||
<Target className="h-4 w-4" />
|
||||
<span>{targetName}</span>
|
||||
</Link>
|
||||
)}
|
||||
{/* Started at */}
|
||||
<div className="flex items-center gap-1.5">
|
||||
<Calendar className="h-4 w-4" />
|
||||
<span>{t("startedAt")}: {formatDate(startedAt, locale)}</span>
|
||||
</div>
|
||||
{/* Duration */}
|
||||
<div className="flex items-center gap-1.5">
|
||||
<Clock className="h-4 w-4" />
|
||||
<span>{t("duration")}: {formatDuration(startedAt, completedAt)}</span>
|
||||
</div>
|
||||
{/* Engine */}
|
||||
{scan.engineNames && scan.engineNames.length > 0 && (
|
||||
<div className="flex items-center gap-1.5">
|
||||
<Cpu className="h-4 w-4" />
|
||||
<span>{scan.engineNames.join(", ")}</span>
|
||||
</div>
|
||||
)}
|
||||
{/* Worker */}
|
||||
{scan.workerName && (
|
||||
<div className="flex items-center gap-1.5">
|
||||
<HardDrive className="h-4 w-4" />
|
||||
<span>{scan.workerName}</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
{/* Status badge */}
|
||||
<Badge variant="outline" className={statusStyle}>
|
||||
<StatusIcon className={`h-3.5 w-3.5 mr-1.5 ${statusIconConfig.animate ? 'animate-spin' : ''}`} />
|
||||
{tStatus(scan.status)}
|
||||
</Badge>
|
||||
</div>
|
||||
|
||||
{/* Asset statistics cards */}
|
||||
<div>
|
||||
<h3 className="text-lg font-semibold mb-4">{t("assetsTitle")}</h3>
|
||||
<div className="grid gap-4 md:grid-cols-2 lg:grid-cols-5">
|
||||
{assetCards.map((card) => (
|
||||
<Link key={card.title} href={card.href}>
|
||||
<Card className="hover:border-primary/50 transition-colors cursor-pointer">
|
||||
<CardHeader className="flex flex-row items-center justify-between space-y-0 pb-2">
|
||||
<CardTitle className="text-sm font-medium">{card.title}</CardTitle>
|
||||
<card.icon className="h-4 w-4 text-muted-foreground" />
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<div className="text-2xl font-bold">{card.value.toLocaleString()}</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</Link>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Stage Progress + Logs - Left-Right Split Layout */}
|
||||
<div className="grid gap-4 md:grid-cols-[280px_1fr] flex-1 min-h-0">
|
||||
{/* Left Column: Stage Progress + Vulnerability Stats */}
|
||||
<div className="flex flex-col gap-4 min-h-0">
|
||||
{/* Stage Progress */}
|
||||
<Card className="flex-1 min-h-0">
|
||||
<CardHeader className="flex flex-row items-center justify-between space-y-0 pb-3">
|
||||
<CardTitle className="text-sm font-medium">{t("stagesTitle")}</CardTitle>
|
||||
{scan.stageProgress && (
|
||||
<span className="text-xs text-muted-foreground">
|
||||
{Object.values(scan.stageProgress).filter((p: any) => p.status === "completed").length}/
|
||||
{Object.keys(scan.stageProgress).length} {t("stagesCompleted")}
|
||||
</span>
|
||||
)}
|
||||
</CardHeader>
|
||||
<CardContent className="pt-0 flex flex-col flex-1 min-h-0">
|
||||
{scan.stageProgress && Object.keys(scan.stageProgress).length > 0 ? (
|
||||
<div className="space-y-1 flex-1 min-h-0 overflow-y-auto pr-1">
|
||||
{Object.entries(scan.stageProgress)
|
||||
.toSorted(([, a], [, b]) => {
|
||||
const progressA = a as any
|
||||
const progressB = b as any
|
||||
const priorityA = STAGE_STATUS_PRIORITY[progressA.status as StageStatus] ?? 99
|
||||
const priorityB = STAGE_STATUS_PRIORITY[progressB.status as StageStatus] ?? 99
|
||||
if (priorityA !== priorityB) {
|
||||
return priorityA - priorityB
|
||||
}
|
||||
return (progressA.order ?? 0) - (progressB.order ?? 0)
|
||||
})
|
||||
.map(([stageName, progress]) => {
|
||||
const stageProgress = progress as any
|
||||
const isRunning = stageProgress.status === "running"
|
||||
return (
|
||||
<div
|
||||
key={stageName}
|
||||
className={cn(
|
||||
"flex items-center justify-between py-2 px-2 rounded-md transition-colors text-sm",
|
||||
isRunning && "bg-[#d29922]/10 border border-[#d29922]/30",
|
||||
stageProgress.status === "completed" && "text-muted-foreground",
|
||||
stageProgress.status === "failed" && "bg-[#da3633]/10 text-[#da3633]",
|
||||
stageProgress.status === "cancelled" && "text-muted-foreground",
|
||||
)}
|
||||
>
|
||||
<div className="flex items-center gap-2 min-w-0">
|
||||
<StageStatusIcon status={stageProgress.status} />
|
||||
<span className={cn("truncate", isRunning && "font-medium text-foreground")}>
|
||||
{tProgress(`stages.${stageName}`)}
|
||||
</span>
|
||||
</div>
|
||||
<span className="text-xs text-muted-foreground font-mono shrink-0 ml-2">
|
||||
{stageProgress.status === "completed" && stageProgress.duration
|
||||
? formatStageDuration(stageProgress.duration)
|
||||
: stageProgress.status === "running"
|
||||
? tProgress("stage_running")
|
||||
: stageProgress.status === "pending"
|
||||
? "--"
|
||||
: ""}
|
||||
</span>
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
) : (
|
||||
<div className="text-sm text-muted-foreground text-center py-4">
|
||||
{t("noStages")}
|
||||
</div>
|
||||
)}
|
||||
</CardContent>
|
||||
</Card>
|
||||
|
||||
{/* Vulnerability Stats - Compact */}
|
||||
<Link href={`/scan/history/${scanId}/vulnerabilities/`} className="block">
|
||||
<Card className="hover:border-primary/50 transition-colors cursor-pointer">
|
||||
<CardHeader className="flex flex-row items-center justify-between space-y-0 pb-2">
|
||||
<CardTitle className="text-sm font-medium">{t("vulnerabilitiesTitle")}</CardTitle>
|
||||
<ChevronRight className="h-4 w-4 text-muted-foreground" />
|
||||
</CardHeader>
|
||||
<CardContent className="pt-0">
|
||||
<div className="flex items-center gap-3 flex-wrap">
|
||||
<div className="flex items-center gap-1.5">
|
||||
<div className="w-2.5 h-2.5 rounded-full bg-red-500" />
|
||||
<span className="text-sm font-medium">{vulnSummary.critical}</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1.5">
|
||||
<div className="w-2.5 h-2.5 rounded-full bg-orange-500" />
|
||||
<span className="text-sm font-medium">{vulnSummary.high}</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1.5">
|
||||
<div className="w-2.5 h-2.5 rounded-full bg-yellow-500" />
|
||||
<span className="text-sm font-medium">{vulnSummary.medium}</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1.5">
|
||||
<div className="w-2.5 h-2.5 rounded-full bg-blue-500" />
|
||||
<span className="text-sm font-medium">{vulnSummary.low}</span>
|
||||
</div>
|
||||
<span className="text-xs text-muted-foreground ml-auto">
|
||||
{t("totalVulns", { count: vulnSummary.total })}
|
||||
</span>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</Link>
|
||||
</div>
|
||||
|
||||
{/* Right Column: Logs / Config */}
|
||||
<div className="flex flex-col min-h-0 rounded-lg overflow-hidden border">
|
||||
{/* Tab Header */}
|
||||
<div className="flex items-center justify-between px-3 py-2 bg-muted/30 border-b shrink-0">
|
||||
<Tabs value={activeTab} onValueChange={(v) => setActiveTab(v as 'logs' | 'config')}>
|
||||
<TabsList variant="underline" className="h-8 gap-3">
|
||||
<TabsTrigger variant="underline" value="logs" className="text-xs">{t("logsTitle")}</TabsTrigger>
|
||||
<TabsTrigger variant="underline" value="config" className="text-xs">{t("configTitle")}</TabsTrigger>
|
||||
</TabsList>
|
||||
</Tabs>
|
||||
{/* Auto-refresh toggle (only for logs tab when running) */}
|
||||
{activeTab === 'logs' && isRunning && (
|
||||
<div className="flex items-center gap-2">
|
||||
<Switch
|
||||
id="log-auto-refresh"
|
||||
checked={autoRefresh}
|
||||
onCheckedChange={setAutoRefresh}
|
||||
className="scale-75"
|
||||
/>
|
||||
<Label htmlFor="log-auto-refresh" className="text-xs cursor-pointer">
|
||||
{t("autoRefresh")}
|
||||
</Label>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Tab Content */}
|
||||
<div className="flex-1 min-h-0">
|
||||
{activeTab === 'logs' ? (
|
||||
<ScanLogList logs={logs} loading={logsLoading} />
|
||||
) : (
|
||||
<div className="h-full">
|
||||
{scan.yamlConfiguration ? (
|
||||
<YamlEditor
|
||||
value={scan.yamlConfiguration}
|
||||
onChange={() => {}}
|
||||
disabled={true}
|
||||
height="100%"
|
||||
/>
|
||||
) : (
|
||||
<div className="flex items-center justify-center h-full text-muted-foreground text-sm">
|
||||
{t("noConfig")}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Bottom status bar (only for logs tab) */}
|
||||
{activeTab === 'logs' && (
|
||||
<div className="flex items-center px-4 py-2 bg-muted/50 border-t text-xs text-muted-foreground shrink-0">
|
||||
<span>{logs.length} 条记录</span>
|
||||
{isRunning && autoRefresh && (
|
||||
<>
|
||||
<Separator orientation="vertical" className="h-3 mx-3" />
|
||||
<span className="flex items-center gap-1.5">
|
||||
<span className="size-1.5 rounded-full bg-green-500 animate-pulse" />
|
||||
每 3 秒刷新
|
||||
</span>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
"use client"
|
||||
|
||||
import { useEffect, useRef, useMemo } from "react"
|
||||
import { useMemo, useRef } from "react"
|
||||
import { AnsiLogViewer } from "@/components/settings/system-logs"
|
||||
import type { ScanLog } from "@/services/scan.service"
|
||||
|
||||
interface ScanLogListProps {
|
||||
@@ -14,98 +15,72 @@ interface ScanLogListProps {
|
||||
function formatTime(isoString: string): string {
|
||||
try {
|
||||
const date = new Date(isoString)
|
||||
return date.toLocaleTimeString('zh-CN', {
|
||||
hour: '2-digit',
|
||||
minute: '2-digit',
|
||||
second: '2-digit',
|
||||
hour12: false,
|
||||
})
|
||||
const h = String(date.getHours()).padStart(2, '0')
|
||||
const m = String(date.getMinutes()).padStart(2, '0')
|
||||
const s = String(date.getSeconds()).padStart(2, '0')
|
||||
return `${h}:${m}:${s}`
|
||||
} catch {
|
||||
return isoString
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* HTML 转义,防止 XSS
|
||||
*/
|
||||
function escapeHtml(text: string): string {
|
||||
return text
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''')
|
||||
}
|
||||
|
||||
/**
|
||||
* 扫描日志列表组件
|
||||
*
|
||||
* 特性:
|
||||
* - 预渲染 HTML 字符串,减少 DOM 节点提升性能
|
||||
* - 颜色区分:info=默认, warning=黄色, error=红色
|
||||
* - 自动滚动到底部
|
||||
* 复用 AnsiLogViewer 组件
|
||||
*/
|
||||
export function ScanLogList({ logs, loading }: ScanLogListProps) {
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
const isAtBottomRef = useRef(true) // 跟踪用户是否在底部
|
||||
// 稳定的 content 引用,只有内容真正变化时才更新
|
||||
const contentRef = useRef('')
|
||||
const lastLogCountRef = useRef(0)
|
||||
const lastLogIdRef = useRef<number | null>(null)
|
||||
|
||||
// 预渲染 HTML 字符串
|
||||
const htmlContent = useMemo(() => {
|
||||
// 将日志转换为纯文本格式
|
||||
const content = useMemo(() => {
|
||||
if (logs.length === 0) return ''
|
||||
|
||||
return logs.map(log => {
|
||||
// 检查是否真正需要更新
|
||||
const lastLog = logs[logs.length - 1]
|
||||
if (
|
||||
logs.length === lastLogCountRef.current &&
|
||||
lastLog?.id === lastLogIdRef.current
|
||||
) {
|
||||
// 日志没有变化,返回缓存的 content
|
||||
return contentRef.current
|
||||
}
|
||||
|
||||
// 更新缓存
|
||||
lastLogCountRef.current = logs.length
|
||||
lastLogIdRef.current = lastLog?.id ?? null
|
||||
|
||||
const newContent = logs.map(log => {
|
||||
const time = formatTime(log.createdAt)
|
||||
const content = escapeHtml(log.content)
|
||||
const levelStyle = log.level === 'error'
|
||||
? 'color:#ef4444'
|
||||
: log.level === 'warning'
|
||||
? 'color:#eab308'
|
||||
: ''
|
||||
|
||||
return `<div style="line-height:1.625;word-break:break-all;${levelStyle}"><span style="color:#6b7280">${time}</span> ${content}</div>`
|
||||
}).join('')
|
||||
const levelTag = log.level.toUpperCase()
|
||||
return `[${time}] [${levelTag}] ${log.content}`
|
||||
}).join('\n')
|
||||
|
||||
contentRef.current = newContent
|
||||
return newContent
|
||||
}, [logs])
|
||||
|
||||
// 监听滚动事件,检测用户是否在底部
|
||||
useEffect(() => {
|
||||
const container = containerRef.current
|
||||
if (!container) return
|
||||
|
||||
const handleScroll = () => {
|
||||
const { scrollTop, scrollHeight, clientHeight } = container
|
||||
// 允许 30px 的容差,认为在底部附近
|
||||
isAtBottomRef.current = scrollHeight - scrollTop - clientHeight < 30
|
||||
}
|
||||
|
||||
container.addEventListener('scroll', handleScroll)
|
||||
return () => container.removeEventListener('scroll', handleScroll)
|
||||
}, [])
|
||||
if (loading && logs.length === 0) {
|
||||
return (
|
||||
<div className="h-full flex items-center justify-center bg-[#1e1e1e] text-[#808080]">
|
||||
加载中...
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// 只有用户在底部时才自动滚动
|
||||
useEffect(() => {
|
||||
if (containerRef.current && isAtBottomRef.current) {
|
||||
containerRef.current.scrollTop = containerRef.current.scrollHeight
|
||||
}
|
||||
}, [htmlContent])
|
||||
if (logs.length === 0) {
|
||||
return (
|
||||
<div className="h-full flex items-center justify-center bg-[#1e1e1e] text-[#808080]">
|
||||
暂无日志
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={containerRef}
|
||||
className="h-[400px] overflow-y-auto font-mono text-[11px] p-3 bg-muted/30 rounded-lg"
|
||||
>
|
||||
{logs.length === 0 && !loading && (
|
||||
<div className="text-muted-foreground text-center py-8">
|
||||
暂无日志
|
||||
</div>
|
||||
)}
|
||||
{htmlContent && (
|
||||
<div dangerouslySetInnerHTML={{ __html: htmlContent }} />
|
||||
)}
|
||||
{loading && logs.length === 0 && (
|
||||
<div className="text-muted-foreground text-center py-8">
|
||||
加载中...
|
||||
</div>
|
||||
)}
|
||||
<div className="h-full">
|
||||
<AnsiLogViewer content={content} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -40,7 +40,11 @@ interface StageDetail {
|
||||
*/
|
||||
export interface ScanProgressData {
|
||||
id: number
|
||||
targetName: string
|
||||
target?: {
|
||||
id: number
|
||||
name: string
|
||||
type: string
|
||||
}
|
||||
engineNames: string[]
|
||||
status: string
|
||||
progress: number
|
||||
@@ -90,7 +94,7 @@ function ScanStatusIcon({ status }: { status: string }) {
|
||||
return <IconCircleX className="h-5 w-5 text-[#848d97]" />
|
||||
case "failed":
|
||||
return <IconCircleX className="h-5 w-5 text-[#da3633] dark:text-[#f85149]" />
|
||||
case "initiated":
|
||||
case "pending":
|
||||
return <PulsingDot className="text-[#d29922]" />
|
||||
default:
|
||||
return <PulsingDot className="text-muted-foreground" />
|
||||
@@ -184,6 +188,9 @@ function StageRow({ stage, t }: { stage: StageDetail; t: (key: string) => string
|
||||
)
|
||||
}
|
||||
|
||||
/** Dialog width constant */
|
||||
const DIALOG_WIDTH = 'sm:max-w-[600px] sm:min-w-[550px]'
|
||||
|
||||
/**
|
||||
* Scan progress dialog
|
||||
*/
|
||||
@@ -195,9 +202,12 @@ export function ScanProgressDialog({
|
||||
const t = useTranslations("scan.progress")
|
||||
const locale = useLocale()
|
||||
const [activeTab, setActiveTab] = useState<'stages' | 'logs'>('stages')
|
||||
|
||||
// 判断扫描是否正在运行(用于控制轮询)
|
||||
const isRunning = data?.status === 'running' || data?.status === 'initiated'
|
||||
|
||||
// Memoize isRunning to avoid unnecessary recalculations
|
||||
const isRunning = React.useMemo(
|
||||
() => data?.status === 'running' || data?.status === 'initiated',
|
||||
[data?.status]
|
||||
)
|
||||
|
||||
// 日志轮询 Hook
|
||||
const { logs, loading: logsLoading } = useScanLogs({
|
||||
@@ -208,12 +218,9 @@ export function ScanProgressDialog({
|
||||
|
||||
if (!data) return null
|
||||
|
||||
// 固定宽度,切换 Tab 时不变化
|
||||
const dialogWidth = 'sm:max-w-[600px] sm:min-w-[550px]'
|
||||
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={onOpenChange}>
|
||||
<DialogContent className={cn(dialogWidth, "transition-all duration-200")}>
|
||||
<DialogContent className={cn(DIALOG_WIDTH, "transition-all duration-200")}>
|
||||
<DialogHeader>
|
||||
<DialogTitle className="flex items-center gap-2">
|
||||
<ScanStatusIcon status={data.status} />
|
||||
@@ -225,7 +232,7 @@ export function ScanProgressDialog({
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-center justify-between text-sm">
|
||||
<span className="text-muted-foreground">{t("target")}</span>
|
||||
<span className="font-medium">{data.targetName}</span>
|
||||
<span className="font-medium">{data.target?.name}</span>
|
||||
</div>
|
||||
<div className="flex items-start justify-between text-sm gap-4">
|
||||
<span className="text-muted-foreground shrink-0">{t("engine")}</span>
|
||||
@@ -280,7 +287,9 @@ export function ScanProgressDialog({
|
||||
</div>
|
||||
) : (
|
||||
/* Log list */
|
||||
<ScanLogList logs={logs} loading={logsLoading} />
|
||||
<div className="h-[300px] overflow-hidden rounded-md">
|
||||
<ScanLogList logs={logs} loading={logsLoading} />
|
||||
</div>
|
||||
)}
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
@@ -320,25 +329,25 @@ function formatDateTime(isoString?: string, locale: string = "zh"): string {
|
||||
}
|
||||
}
|
||||
|
||||
/** Get stage result count from summary */
|
||||
function getStageResultCount(stageName: string, summary: ScanRecord["summary"]): number | undefined {
|
||||
if (!summary) return undefined
|
||||
/** Get stage result count from cachedStats */
|
||||
function getStageResultCount(stageName: string, stats: ScanRecord["cachedStats"]): number | undefined {
|
||||
if (!stats) return undefined
|
||||
switch (stageName) {
|
||||
case "subdomain_discovery":
|
||||
case "subdomainDiscovery":
|
||||
return summary.subdomains
|
||||
return stats.subdomainsCount
|
||||
case "site_scan":
|
||||
case "siteScan":
|
||||
return summary.websites
|
||||
return stats.websitesCount
|
||||
case "directory_scan":
|
||||
case "directoryScan":
|
||||
return summary.directories
|
||||
return stats.directoriesCount
|
||||
case "url_fetch":
|
||||
case "urlFetch":
|
||||
return summary.endpoints
|
||||
return stats.endpointsCount
|
||||
case "vuln_scan":
|
||||
case "vulnScan":
|
||||
return summary.vulnerabilities?.total
|
||||
return stats.vulnsTotal
|
||||
default:
|
||||
return undefined
|
||||
}
|
||||
@@ -350,17 +359,33 @@ function getStageResultCount(stageName: string, summary: ScanRecord["summary"]):
|
||||
* Stage names come directly from engine_config keys, no mapping needed
|
||||
* Stage order follows the order field, consistent with Flow execution order
|
||||
*/
|
||||
// Status priority for sorting (lower = higher priority)
|
||||
const STATUS_PRIORITY: Record<StageStatus, number> = {
|
||||
running: 0,
|
||||
pending: 1,
|
||||
completed: 2,
|
||||
failed: 3,
|
||||
cancelled: 4,
|
||||
}
|
||||
|
||||
export function buildScanProgressData(scan: ScanRecord): ScanProgressData {
|
||||
const stages: StageDetail[] = []
|
||||
|
||||
if (scan.stageProgress) {
|
||||
// Sort by order then iterate
|
||||
// Sort by status priority first, then by order
|
||||
const sortedEntries = Object.entries(scan.stageProgress)
|
||||
.sort(([, a], [, b]) => (a.order ?? 0) - (b.order ?? 0))
|
||||
.toSorted(([, a], [, b]) => {
|
||||
const priorityA = STATUS_PRIORITY[a.status] ?? 99
|
||||
const priorityB = STATUS_PRIORITY[b.status] ?? 99
|
||||
if (priorityA !== priorityB) {
|
||||
return priorityA - priorityB
|
||||
}
|
||||
return (a.order ?? 0) - (b.order ?? 0)
|
||||
})
|
||||
|
||||
for (const [stageName, progress] of sortedEntries) {
|
||||
const resultCount = progress.status === "completed"
|
||||
? getStageResultCount(stageName, scan.summary)
|
||||
? getStageResultCount(stageName, scan.cachedStats)
|
||||
: undefined
|
||||
|
||||
stages.push({
|
||||
@@ -375,7 +400,7 @@ export function buildScanProgressData(scan: ScanRecord): ScanProgressData {
|
||||
|
||||
return {
|
||||
id: scan.id,
|
||||
targetName: scan.targetName,
|
||||
target: scan.target,
|
||||
engineNames: scan.engineNames || [],
|
||||
status: scan.status,
|
||||
progress: scan.progress,
|
||||
|
||||
@@ -120,14 +120,15 @@ export function CreateScheduledScanDialog({
|
||||
const handleOrgSearch = () => setOrgSearch(orgSearchInput)
|
||||
const handleTargetSearch = () => setTargetSearch(targetSearchInput)
|
||||
|
||||
const { data: organizationsData, isFetching: isOrgFetching } = useOrganizations({
|
||||
pageSize: 50,
|
||||
search: orgSearch || undefined
|
||||
})
|
||||
const { data: targetsData, isFetching: isTargetFetching } = useTargets({
|
||||
pageSize: 50,
|
||||
search: targetSearch || undefined
|
||||
})
|
||||
// Only fetch data when dialog is open (avoid unnecessary requests on page load)
|
||||
const { data: organizationsData, isFetching: isOrgFetching } = useOrganizations({
|
||||
pageSize: 20,
|
||||
filter: orgSearch || undefined
|
||||
}, { enabled: open })
|
||||
const { data: targetsData, isFetching: isTargetFetching } = useTargets({
|
||||
pageSize: 20,
|
||||
filter: targetSearch || undefined
|
||||
}, { enabled: open })
|
||||
|
||||
const hasPreset = !!(presetOrganizationId || presetTargetId)
|
||||
const steps = hasPreset ? PRESET_STEPS : FULL_STEPS
|
||||
|
||||
437
frontend/components/screenshots/screenshots-gallery.tsx
Normal file
437
frontend/components/screenshots/screenshots-gallery.tsx
Normal file
@@ -0,0 +1,437 @@
|
||||
"use client"
|
||||
|
||||
import React, { useState, useCallback, useMemo } from "react"
|
||||
import { AlertTriangle, Image as ImageIcon, ExternalLink, Trash2, X, ChevronLeft, ChevronRight, Search } from "lucide-react"
|
||||
import { useTranslations } from "next-intl"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Checkbox } from "@/components/ui/checkbox"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/ui/select"
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogTitle,
|
||||
} from "@/components/ui/dialog"
|
||||
import { VisuallyHidden } from "@radix-ui/react-visually-hidden"
|
||||
import { useTargetScreenshots, useScanScreenshots } from "@/hooks/use-screenshots"
|
||||
import { ScreenshotService } from "@/services/screenshot.service"
|
||||
import { toast } from "sonner"
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
const PAGE_SIZE_OPTIONS = [12, 24, 48]
|
||||
|
||||
interface Screenshot {
|
||||
id: number
|
||||
url: string
|
||||
statusCode: number | null
|
||||
createdAt: string
|
||||
}
|
||||
|
||||
interface ScreenshotsGalleryProps {
|
||||
targetId?: number
|
||||
scanId?: number
|
||||
}
|
||||
|
||||
export function ScreenshotsGallery({ targetId, scanId }: ScreenshotsGalleryProps) {
|
||||
const [pagination, setPagination] = useState({ pageIndex: 0, pageSize: 12 })
|
||||
const [searchInput, setSearchInput] = useState("") // 输入框的值
|
||||
const [filterQuery, setFilterQuery] = useState("") // 实际用于查询的值
|
||||
const [selectedIds, setSelectedIds] = useState<Set<number>>(new Set())
|
||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false)
|
||||
const [isDeleting, setIsDeleting] = useState(false)
|
||||
const [lightboxOpen, setLightboxOpen] = useState(false)
|
||||
const [lightboxIndex, setLightboxIndex] = useState(0)
|
||||
|
||||
const t = useTranslations("pages.screenshots")
|
||||
const tCommon = useTranslations("common")
|
||||
const tToast = useTranslations("toast")
|
||||
|
||||
// Fetch screenshots
|
||||
const targetQuery = useTargetScreenshots(
|
||||
targetId || 0,
|
||||
{ page: pagination.pageIndex + 1, pageSize: pagination.pageSize, filter: filterQuery || undefined },
|
||||
{ enabled: !!targetId }
|
||||
)
|
||||
|
||||
const scanQuery = useScanScreenshots(
|
||||
scanId || 0,
|
||||
{ page: pagination.pageIndex + 1, pageSize: pagination.pageSize, filter: filterQuery || undefined },
|
||||
{ enabled: !!scanId }
|
||||
)
|
||||
|
||||
const activeQuery = targetId ? targetQuery : scanQuery
|
||||
const { data, isLoading, error, refetch } = activeQuery
|
||||
|
||||
const screenshots: Screenshot[] = useMemo(() => data?.results || [], [data])
|
||||
const totalPages = data?.totalPages || 0
|
||||
|
||||
// Selection handlers
|
||||
const toggleSelect = useCallback((id: number) => {
|
||||
setSelectedIds(prev => {
|
||||
const next = new Set(prev)
|
||||
if (next.has(id)) {
|
||||
next.delete(id)
|
||||
} else {
|
||||
next.add(id)
|
||||
}
|
||||
return next
|
||||
})
|
||||
}, [])
|
||||
|
||||
const selectAll = useCallback(() => {
|
||||
if (selectedIds.size === screenshots.length) {
|
||||
setSelectedIds(new Set())
|
||||
} else {
|
||||
setSelectedIds(new Set(screenshots.map(s => s.id)))
|
||||
}
|
||||
}, [screenshots, selectedIds.size])
|
||||
|
||||
// Delete handler
|
||||
const handleBulkDelete = async () => {
|
||||
if (selectedIds.size === 0) return
|
||||
setIsDeleting(true)
|
||||
try {
|
||||
const result = await ScreenshotService.bulkDelete(Array.from(selectedIds))
|
||||
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
|
||||
setSelectedIds(new Set())
|
||||
setDeleteDialogOpen(false)
|
||||
refetch()
|
||||
} catch (error) {
|
||||
console.error("Failed to delete screenshots", error)
|
||||
toast.error(tToast("deleteFailed"))
|
||||
} finally {
|
||||
setIsDeleting(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Filter handler - 手动触发搜索
|
||||
const handleSearch = () => {
|
||||
setFilterQuery(searchInput)
|
||||
setPagination(prev => ({ ...prev, pageIndex: 0 }))
|
||||
}
|
||||
|
||||
// 回车触发搜索
|
||||
const handleKeyDown = (e: React.KeyboardEvent<HTMLInputElement>) => {
|
||||
if (e.key === 'Enter') {
|
||||
handleSearch()
|
||||
}
|
||||
}
|
||||
|
||||
// Handle page size change
|
||||
const handlePageSizeChange = (value: string) => {
|
||||
const newPageSize = parseInt(value, 10)
|
||||
setPagination({ pageIndex: 0, pageSize: newPageSize })
|
||||
}
|
||||
|
||||
// Lightbox handlers
|
||||
const openLightbox = (index: number) => {
|
||||
setLightboxIndex(index)
|
||||
setLightboxOpen(true)
|
||||
}
|
||||
|
||||
const nextImage = () => {
|
||||
setLightboxIndex(prev => (prev + 1) % screenshots.length)
|
||||
}
|
||||
|
||||
const prevImage = () => {
|
||||
setLightboxIndex(prev => (prev - 1 + screenshots.length) % screenshots.length)
|
||||
}
|
||||
|
||||
// Get image URL
|
||||
const getImageUrl = (screenshot: Screenshot) => {
|
||||
if (scanId) {
|
||||
return ScreenshotService.getSnapshotImageUrl(scanId, screenshot.id)
|
||||
}
|
||||
return ScreenshotService.getImageUrl(screenshot.id)
|
||||
}
|
||||
|
||||
// Error state
|
||||
if (error) {
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center py-12">
|
||||
<div className="rounded-full bg-destructive/10 p-3 mb-4">
|
||||
<AlertTriangle className="h-10 w-10 text-destructive" />
|
||||
</div>
|
||||
<h3 className="text-lg font-semibold mb-2">{tCommon("status.error")}</h3>
|
||||
<p className="text-muted-foreground text-center mb-4">
|
||||
{t("loadError")}
|
||||
</p>
|
||||
<Button onClick={() => refetch()}>{tCommon("actions.retry")}</Button>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Loading state
|
||||
if (isLoading && !data) {
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
<div className="flex items-center gap-4">
|
||||
<Skeleton className="h-10 w-64" />
|
||||
</div>
|
||||
<div className="grid grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4">
|
||||
{Array.from({ length: 8 }).map((_, i) => (
|
||||
<Skeleton key={i} className="aspect-video rounded-lg" />
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Empty state
|
||||
if (screenshots.length === 0 && !filterQuery) {
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center py-12">
|
||||
<div className="rounded-full bg-muted p-3 mb-4">
|
||||
<ImageIcon className="h-10 w-10 text-muted-foreground" />
|
||||
</div>
|
||||
<h3 className="text-lg font-semibold mb-2">{t("empty.title")}</h3>
|
||||
<p className="text-muted-foreground text-center">
|
||||
{t("empty.description")}
|
||||
</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
{/* Toolbar */}
|
||||
<div className="flex items-center justify-between gap-4">
|
||||
<div className="flex items-center gap-2">
|
||||
<Input
|
||||
placeholder={t("filterPlaceholder")}
|
||||
value={searchInput}
|
||||
onChange={(e) => setSearchInput(e.target.value)}
|
||||
onKeyDown={handleKeyDown}
|
||||
className="w-64"
|
||||
/>
|
||||
<Button variant="outline" size="icon" onClick={handleSearch}>
|
||||
<Search className="h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
{targetId && selectedIds.size > 0 && (
|
||||
<Button
|
||||
variant="destructive"
|
||||
size="sm"
|
||||
onClick={() => setDeleteDialogOpen(true)}
|
||||
>
|
||||
<Trash2 className="h-4 w-4 mr-1" />
|
||||
{tCommon("actions.delete")} ({selectedIds.size})
|
||||
</Button>
|
||||
)}
|
||||
{screenshots.length > 0 && targetId && (
|
||||
<Button variant="outline" size="sm" onClick={selectAll}>
|
||||
{selectedIds.size === screenshots.length ? tCommon("actions.deselectAll") : tCommon("actions.selectAll")}
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Gallery grid */}
|
||||
<div className="grid grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4">
|
||||
{screenshots.map((screenshot, index) => (
|
||||
<div
|
||||
key={screenshot.id}
|
||||
className={cn(
|
||||
"group relative aspect-video rounded-lg overflow-hidden border bg-muted cursor-pointer transition-all",
|
||||
selectedIds.has(screenshot.id) && "ring-2 ring-primary"
|
||||
)}
|
||||
>
|
||||
{/* Checkbox */}
|
||||
{targetId && (
|
||||
<div
|
||||
className="absolute top-2 left-2 z-10"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
toggleSelect(screenshot.id)
|
||||
}}
|
||||
>
|
||||
<Checkbox
|
||||
checked={selectedIds.has(screenshot.id)}
|
||||
className="bg-background/80 backdrop-blur-sm"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Image */}
|
||||
<img
|
||||
src={getImageUrl(screenshot)}
|
||||
alt={screenshot.url}
|
||||
className="w-full h-full object-cover transition-transform group-hover:scale-105"
|
||||
onClick={() => openLightbox(index)}
|
||||
loading="lazy"
|
||||
/>
|
||||
|
||||
{/* Overlay with URL and status code */}
|
||||
<div className="absolute inset-x-0 bottom-0 bg-gradient-to-t from-black/80 to-transparent p-2">
|
||||
<div className="flex items-center gap-2">
|
||||
{screenshot.statusCode && (
|
||||
<span className={cn(
|
||||
"text-xs px-1.5 py-0.5 rounded font-medium shrink-0",
|
||||
screenshot.statusCode >= 200 && screenshot.statusCode < 300 && "bg-green-500/80 text-white",
|
||||
screenshot.statusCode >= 300 && screenshot.statusCode < 400 && "bg-blue-500/80 text-white",
|
||||
screenshot.statusCode >= 400 && screenshot.statusCode < 500 && "bg-yellow-500/80 text-black",
|
||||
screenshot.statusCode >= 500 && "bg-red-500/80 text-white"
|
||||
)}>
|
||||
{screenshot.statusCode}
|
||||
</span>
|
||||
)}
|
||||
<p className="text-white text-xs truncate" title={screenshot.url}>
|
||||
{screenshot.url}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Hover actions */}
|
||||
<div className="absolute top-2 right-2 opacity-0 group-hover:opacity-100 transition-opacity">
|
||||
<a
|
||||
href={screenshot.url}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
className="inline-flex items-center justify-center h-8 w-8 rounded-md bg-background/80 backdrop-blur-sm hover:bg-background"
|
||||
>
|
||||
<ExternalLink className="h-4 w-4" />
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
|
||||
{/* Empty search results */}
|
||||
{screenshots.length === 0 && filterQuery && (
|
||||
<div className="flex flex-col items-center justify-center py-12">
|
||||
<p className="text-muted-foreground">{t("noResults")}</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Pagination */}
|
||||
{(totalPages > 1 || (data?.total ?? 0) > 12) && (
|
||||
<div className="flex justify-center items-center gap-4">
|
||||
<div className="flex items-center gap-2">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => setPagination(prev => ({ ...prev, pageIndex: Math.max(0, prev.pageIndex - 1) }))}
|
||||
disabled={pagination.pageIndex === 0}
|
||||
>
|
||||
<ChevronLeft className="h-4 w-4" />
|
||||
</Button>
|
||||
<span className="text-sm text-muted-foreground">
|
||||
{pagination.pageIndex + 1} / {totalPages || 1}
|
||||
</span>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => setPagination(prev => ({ ...prev, pageIndex: Math.min(totalPages - 1, prev.pageIndex + 1) }))}
|
||||
disabled={pagination.pageIndex >= totalPages - 1}
|
||||
>
|
||||
<ChevronRight className="h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
<Select value={String(pagination.pageSize)} onValueChange={handlePageSizeChange}>
|
||||
<SelectTrigger className="w-20 h-8">
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{PAGE_SIZE_OPTIONS.map(size => (
|
||||
<SelectItem key={size} value={String(size)}>{size}</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Lightbox */}
|
||||
<Dialog open={lightboxOpen} onOpenChange={setLightboxOpen}>
|
||||
<DialogContent className="max-w-[90vw] max-h-[90vh] p-0 bg-black/95 border-none">
|
||||
<VisuallyHidden>
|
||||
<DialogTitle>{t("lightboxTitle")}</DialogTitle>
|
||||
</VisuallyHidden>
|
||||
<div className="relative w-full h-full flex items-center justify-center">
|
||||
{/* Close button */}
|
||||
<button
|
||||
onClick={() => setLightboxOpen(false)}
|
||||
className="absolute top-4 right-4 z-50 p-2 rounded-full bg-white/10 hover:bg-white/20 transition-colors"
|
||||
>
|
||||
<X className="h-6 w-6 text-white" />
|
||||
</button>
|
||||
|
||||
{/* Navigation */}
|
||||
{screenshots.length > 1 && (
|
||||
<>
|
||||
<button
|
||||
onClick={prevImage}
|
||||
className="absolute left-4 z-50 p-2 rounded-full bg-white/10 hover:bg-white/20 transition-colors"
|
||||
>
|
||||
<ChevronLeft className="h-8 w-8 text-white" />
|
||||
</button>
|
||||
<button
|
||||
onClick={nextImage}
|
||||
className="absolute right-4 z-50 p-2 rounded-full bg-white/10 hover:bg-white/20 transition-colors"
|
||||
>
|
||||
<ChevronRight className="h-8 w-8 text-white" />
|
||||
</button>
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* Image */}
|
||||
{screenshots[lightboxIndex] && (
|
||||
<div className="flex flex-col items-center gap-4 p-8">
|
||||
<img
|
||||
src={getImageUrl(screenshots[lightboxIndex])}
|
||||
alt={screenshots[lightboxIndex].url}
|
||||
className="max-w-full max-h-[70vh] object-contain"
|
||||
/>
|
||||
<div className="text-white text-center">
|
||||
<p className="text-sm opacity-80">{lightboxIndex + 1} / {screenshots.length}</p>
|
||||
<div className="flex items-center gap-2 justify-center mt-1">
|
||||
{screenshots[lightboxIndex].statusCode && (
|
||||
<span className={cn(
|
||||
"text-sm px-2 py-0.5 rounded font-medium",
|
||||
screenshots[lightboxIndex].statusCode >= 200 && screenshots[lightboxIndex].statusCode < 300 && "bg-green-500 text-white",
|
||||
screenshots[lightboxIndex].statusCode >= 300 && screenshots[lightboxIndex].statusCode < 400 && "bg-blue-500 text-white",
|
||||
screenshots[lightboxIndex].statusCode >= 400 && screenshots[lightboxIndex].statusCode < 500 && "bg-yellow-500 text-black",
|
||||
screenshots[lightboxIndex].statusCode >= 500 && "bg-red-500 text-white"
|
||||
)}>
|
||||
{screenshots[lightboxIndex].statusCode}
|
||||
</span>
|
||||
)}
|
||||
<a
|
||||
href={screenshots[lightboxIndex].url}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="text-blue-400 hover:underline flex items-center gap-1"
|
||||
>
|
||||
{screenshots[lightboxIndex].url}
|
||||
<ExternalLink className="h-3 w-3" />
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
|
||||
{/* Delete confirmation */}
|
||||
<ConfirmDialog
|
||||
open={deleteDialogOpen}
|
||||
onOpenChange={setDeleteDialogOpen}
|
||||
title={tCommon("actions.confirmDelete")}
|
||||
description={tCommon("actions.deleteConfirmMessage", { count: selectedIds.size })}
|
||||
onConfirm={handleBulkDelete}
|
||||
loading={isDeleting}
|
||||
variant="destructive"
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -57,7 +57,7 @@ const QUICK_SEARCH_TAGS = [
|
||||
]
|
||||
|
||||
// 最近搜索本地存储 key
|
||||
const RECENT_SEARCHES_KEY = 'xingrin_recent_searches'
|
||||
const RECENT_SEARCHES_KEY = 'star_patrol_recent_searches'
|
||||
const MAX_RECENT_SEARCHES = 5
|
||||
|
||||
// 获取最近搜索记录
|
||||
|
||||
@@ -37,13 +37,15 @@ interface SearchResultCardProps {
|
||||
onViewVulnerability?: (vuln: Vulnerability) => void
|
||||
}
|
||||
|
||||
import { SEVERITY_STYLES } from "@/lib/severity-config"
|
||||
|
||||
// 漏洞严重程度颜色配置
|
||||
const severityColors: Record<string, string> = {
|
||||
critical: "bg-[#da3633]/10 text-[#da3633] border border-[#da3633]/20 dark:text-[#f85149]",
|
||||
high: "bg-[#d29922]/10 text-[#d29922] border border-[#d29922]/20",
|
||||
medium: "bg-[#d4a72c]/10 text-[#d4a72c] border border-[#d4a72c]/20",
|
||||
low: "bg-[#238636]/10 text-[#238636] border border-[#238636]/20 dark:text-[#3fb950]",
|
||||
info: "bg-[#848d97]/10 text-[#848d97] border border-[#848d97]/20",
|
||||
critical: SEVERITY_STYLES.critical.className,
|
||||
high: SEVERITY_STYLES.high.className,
|
||||
medium: SEVERITY_STYLES.medium.className,
|
||||
low: SEVERITY_STYLES.low.className,
|
||||
info: SEVERITY_STYLES.info.className,
|
||||
}
|
||||
|
||||
// 状态码 Badge variant
|
||||
|
||||
@@ -2,10 +2,13 @@
|
||||
|
||||
import { useMemo, useRef, useEffect } from "react"
|
||||
import AnsiToHtml from "ansi-to-html"
|
||||
import type { LogLevel } from "./log-toolbar"
|
||||
|
||||
interface AnsiLogViewerProps {
|
||||
content: string
|
||||
className?: string
|
||||
searchQuery?: string
|
||||
logLevel?: LogLevel
|
||||
}
|
||||
|
||||
// 日志级别颜色配置
|
||||
@@ -52,9 +55,10 @@ function hasAnsiCodes(text: string): boolean {
|
||||
|
||||
// 解析纯文本日志内容,为日志级别添加颜色
|
||||
function colorizeLogContent(content: string): string {
|
||||
// 匹配日志格式: [时间] [级别] [模块:行号] 消息
|
||||
// 例如: [2025-01-05 10:30:00] [INFO] [apps.scan:123] 消息内容
|
||||
const logLineRegex = /^(\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\]) (\[(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\]) (.*)$/
|
||||
// 匹配日志格式:
|
||||
// 1) 系统日志: [2026-01-10 09:51:52] [INFO] [apps.scan.xxx:123] ...
|
||||
// 2) 扫描日志: [09:50:37] [INFO] [subdomain_discovery] ...
|
||||
const logLineRegex = /^(\[(?:\d{4}-\d{2}-\d{2} )?\d{2}:\d{2}:\d{2}\]) (\[(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\]) (.*)$/i
|
||||
|
||||
return content
|
||||
.split("\n")
|
||||
@@ -63,14 +67,15 @@ function colorizeLogContent(content: string): string {
|
||||
|
||||
if (match) {
|
||||
const [, timestamp, levelBracket, level, rest] = match
|
||||
const color = LOG_LEVEL_COLORS[level] || "#d4d4d4"
|
||||
const levelUpper = level.toUpperCase()
|
||||
const color = LOG_LEVEL_COLORS[levelUpper] || "#d4d4d4"
|
||||
// ansiConverter.toHtml 已经处理了 HTML 转义
|
||||
const escapedTimestamp = ansiConverter.toHtml(timestamp)
|
||||
const escapedLevelBracket = ansiConverter.toHtml(levelBracket)
|
||||
const escapedRest = ansiConverter.toHtml(rest)
|
||||
|
||||
// 时间戳灰色,日志级别带颜色,其余默认色
|
||||
return `<span style="color:#808080">${escapedTimestamp}</span> <span style="color:${color};font-weight:${level === "CRITICAL" ? "bold" : "normal"}">${escapedLevelBracket}</span> ${escapedRest}`
|
||||
return `<span style="color:#808080">${escapedTimestamp}</span> <span style="color:${color};font-weight:${levelUpper === "CRITICAL" ? "bold" : "normal"}">${escapedLevelBracket}</span> ${escapedRest}`
|
||||
}
|
||||
|
||||
// 非标准格式的行,也进行 HTML 转义
|
||||
@@ -79,7 +84,112 @@ function colorizeLogContent(content: string): string {
|
||||
.join("\n")
|
||||
}
|
||||
|
||||
export function AnsiLogViewer({ content, className }: AnsiLogViewerProps) {
|
||||
// 高亮搜索关键词
|
||||
function highlightSearch(html: string, query: string): string {
|
||||
if (!query.trim()) return html
|
||||
|
||||
// `ansi-to-html` 在 `escapeXML: true` 时,会把非 ASCII 字符(如中文)转成实体:
|
||||
// 例如 "中文" => "中文"。
|
||||
// 因此这里需要用同样的转义规则来生成可匹配的搜索串。
|
||||
const escapedQueryForHtml = ansiConverter.toHtml(query)
|
||||
|
||||
// 转义正则特殊字符
|
||||
const escapedQuery = escapedQueryForHtml.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")
|
||||
const regex = new RegExp(`(${escapedQuery})`, "giu")
|
||||
|
||||
// 在标签外的文本中高亮关键词
|
||||
return html.replace(/(<[^>]+>)|([^<]+)/g, (match, tag, text) => {
|
||||
if (tag) return tag
|
||||
if (text) {
|
||||
return text.replace(
|
||||
regex,
|
||||
'<mark style="background:#fbbf24;color:#1e1e1e;border-radius:2px;padding:0 2px">$1</mark>'
|
||||
)
|
||||
}
|
||||
return match
|
||||
})
|
||||
}
|
||||
|
||||
// 多种日志格式的级别提取正则
|
||||
const LOG_LEVEL_PATTERNS = [
|
||||
// 标准格式: [2026-01-07 12:00:00] [INFO]
|
||||
/^\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\] \[(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\]/i,
|
||||
// 扫描日志格式: [09:50:37] [INFO] [stage]
|
||||
/^\[\d{2}:\d{2}:\d{2}\] \[(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\]/i,
|
||||
// Prefect 格式: 12:01:50.419 | WARNING | prefect
|
||||
/^[\d:.]+\s+\|\s+(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\s+\|/i,
|
||||
// 简单格式: [INFO] message 或 INFO: message
|
||||
/^(?:\[)?(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)(?:\])?[:\s]/i,
|
||||
// Python logging 格式: INFO - message
|
||||
/^(DEBUG|INFO|WARNING|WARN|ERROR|CRITICAL)\s+-\s+/i,
|
||||
]
|
||||
|
||||
// 新日志条目起始模式(无级别但表示新条目开始)
|
||||
const NEW_ENTRY_PATTERNS = [
|
||||
/^\[\d+\/\d+\]/, // [1/4], [2/4] 等步骤标记
|
||||
/^\[CONFIG\]/i, // [CONFIG] 配置信息
|
||||
/^\[诊断\]/, // [诊断] 诊断信息
|
||||
/^={10,}$/, // ============ 分隔线
|
||||
/^\[\d{4}-\d{2}-\d{2}/, // 时间戳开头 [2026-01-07...
|
||||
/^\d{2}:\d{2}:\d{2}/, // 时间开头 12:01:50...
|
||||
/^\/[\w/]+\.py:\d+:/, // Python 文件路径 /path/file.py:123:
|
||||
]
|
||||
|
||||
// 从行中提取日志级别
|
||||
function extractLogLevel(line: string): string | null {
|
||||
for (const pattern of LOG_LEVEL_PATTERNS) {
|
||||
const match = line.match(pattern)
|
||||
if (match) {
|
||||
return match[1].toUpperCase()
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
// 检测是否是新日志条目的起始(无级别)
|
||||
function isNewEntryStart(line: string): boolean {
|
||||
return NEW_ENTRY_PATTERNS.some((pattern) => pattern.test(line))
|
||||
}
|
||||
|
||||
// 级别标准化
|
||||
function normalizeLevel(l: string): string {
|
||||
const upper = l.toUpperCase()
|
||||
if (upper === "WARNING") return "WARN"
|
||||
if (upper === "CRITICAL") return "ERROR"
|
||||
return upper
|
||||
}
|
||||
|
||||
// 根据级别筛选日志行
|
||||
// 支持多行日志:非标准格式的行会跟随前一个标准日志行的级别
|
||||
function filterByLevel(content: string, level: LogLevel): string {
|
||||
if (level === "all") return content
|
||||
|
||||
const targetLevel = normalizeLevel(level)
|
||||
const lines = content.split("\n")
|
||||
const result: string[] = []
|
||||
// 默认隐藏,直到遇到第一个匹配目标级别的日志行
|
||||
let currentBlockVisible = false
|
||||
|
||||
for (const line of lines) {
|
||||
const extractedLevel = extractLogLevel(line)
|
||||
if (extractedLevel) {
|
||||
// 这是一个新的日志条目,精确匹配级别
|
||||
const lineLevel = normalizeLevel(extractedLevel)
|
||||
currentBlockVisible = lineLevel === targetLevel
|
||||
} else if (isNewEntryStart(line)) {
|
||||
// 无级别但是新条目开始,隐藏
|
||||
currentBlockVisible = false
|
||||
}
|
||||
// 非标准行跟随前一个日志条目的可见性
|
||||
if (currentBlockVisible) {
|
||||
result.push(line)
|
||||
}
|
||||
}
|
||||
|
||||
return result.join("\n")
|
||||
}
|
||||
|
||||
export function AnsiLogViewer({ content, className, searchQuery = "", logLevel = "all" }: AnsiLogViewerProps) {
|
||||
const containerRef = useRef<HTMLPreElement>(null)
|
||||
const isAtBottomRef = useRef(true) // 跟踪用户是否在底部
|
||||
|
||||
@@ -88,14 +198,21 @@ export function AnsiLogViewer({ content, className }: AnsiLogViewerProps) {
|
||||
const htmlContent = useMemo(() => {
|
||||
if (!content) return ""
|
||||
|
||||
// 先按级别筛选
|
||||
const filteredContent = filterByLevel(content, logLevel)
|
||||
|
||||
let result: string
|
||||
// 如果包含 ANSI 颜色码,直接转换
|
||||
if (hasAnsiCodes(content)) {
|
||||
return ansiConverter.toHtml(content)
|
||||
if (hasAnsiCodes(filteredContent)) {
|
||||
result = ansiConverter.toHtml(filteredContent)
|
||||
} else {
|
||||
// 否则解析日志级别添加颜色
|
||||
result = colorizeLogContent(filteredContent)
|
||||
}
|
||||
|
||||
// 否则解析日志级别添加颜色
|
||||
return colorizeLogContent(content)
|
||||
}, [content])
|
||||
// 应用搜索高亮
|
||||
return highlightSearch(result, searchQuery)
|
||||
}, [content, searchQuery, logLevel])
|
||||
|
||||
// 监听滚动事件,检测用户是否在底部
|
||||
useEffect(() => {
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import { useMemo } from "react"
|
||||
import { useTranslations } from "next-intl"
|
||||
import { FileText, Search } from "lucide-react"
|
||||
|
||||
import {
|
||||
Select,
|
||||
@@ -12,30 +13,36 @@ import {
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/ui/select"
|
||||
import { Switch } from "@/components/ui/switch"
|
||||
import { Label } from "@/components/ui/label"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import type { LogFile } from "@/types/system-log.types"
|
||||
|
||||
const LINE_OPTIONS = [100, 200, 500, 1000, 5000] as const
|
||||
const LINE_OPTIONS = [100, 200, 500, 1000, 5000, 10000] as const
|
||||
|
||||
export type LogLevel = "all" | "DEBUG" | "INFO" | "WARN" | "ERROR"
|
||||
export const LOG_LEVELS: LogLevel[] = ["all", "DEBUG", "INFO", "WARN", "ERROR"]
|
||||
|
||||
interface LogToolbarProps {
|
||||
files: LogFile[]
|
||||
selectedFile: string
|
||||
lines: number
|
||||
autoRefresh: boolean
|
||||
searchQuery: string
|
||||
logLevel: LogLevel
|
||||
onFileChange: (filename: string) => void
|
||||
onLinesChange: (lines: number) => void
|
||||
onAutoRefreshChange: (enabled: boolean) => void
|
||||
onSearchChange: (query: string) => void
|
||||
onLogLevelChange: (level: LogLevel) => void
|
||||
}
|
||||
|
||||
export function LogToolbar({
|
||||
files,
|
||||
selectedFile,
|
||||
lines,
|
||||
autoRefresh,
|
||||
searchQuery,
|
||||
logLevel,
|
||||
onFileChange,
|
||||
onLinesChange,
|
||||
onAutoRefreshChange,
|
||||
onSearchChange,
|
||||
onLogLevelChange,
|
||||
}: LogToolbarProps) {
|
||||
const t = useTranslations("settings.systemLogs")
|
||||
|
||||
@@ -49,76 +56,75 @@ export function LogToolbar({
|
||||
}, [files])
|
||||
|
||||
return (
|
||||
<div className="flex items-center gap-4 flex-wrap">
|
||||
<div className="flex items-center gap-3 flex-wrap">
|
||||
{/* 日志文件选择 */}
|
||||
<div className="flex items-center gap-2">
|
||||
<Label className="text-sm text-muted-foreground whitespace-nowrap">
|
||||
{t("toolbar.logFile")}
|
||||
</Label>
|
||||
<Select value={selectedFile} onValueChange={onFileChange}>
|
||||
<SelectTrigger className="w-[200px]">
|
||||
<SelectValue placeholder={t("toolbar.selectFile")} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{groupedFiles.systemLogs.length > 0 && (
|
||||
<SelectGroup>
|
||||
<SelectLabel>{t("toolbar.systemLogsGroup")}</SelectLabel>
|
||||
{groupedFiles.systemLogs.map((file) => (
|
||||
<SelectItem key={file.filename} value={file.filename}>
|
||||
{file.filename}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectGroup>
|
||||
)}
|
||||
{groupedFiles.containerLogs.length > 0 && (
|
||||
<SelectGroup>
|
||||
<SelectLabel>{t("toolbar.containerLogsGroup")}</SelectLabel>
|
||||
{groupedFiles.containerLogs.map((file) => (
|
||||
<SelectItem key={file.filename} value={file.filename}>
|
||||
{file.filename}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectGroup>
|
||||
)}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
<Select value={selectedFile} onValueChange={onFileChange}>
|
||||
<SelectTrigger className="w-[200px]">
|
||||
<FileText className="h-4 w-4 text-muted-foreground" />
|
||||
<SelectValue placeholder={t("toolbar.selectFile")} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{groupedFiles.systemLogs.length > 0 && (
|
||||
<SelectGroup>
|
||||
<SelectLabel>{t("toolbar.systemLogsGroup")}</SelectLabel>
|
||||
{groupedFiles.systemLogs.map((file) => (
|
||||
<SelectItem key={file.filename} value={file.filename}>
|
||||
{file.filename}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectGroup>
|
||||
)}
|
||||
{groupedFiles.containerLogs.length > 0 && (
|
||||
<SelectGroup>
|
||||
<SelectLabel>{t("toolbar.containerLogsGroup")}</SelectLabel>
|
||||
{groupedFiles.containerLogs.map((file) => (
|
||||
<SelectItem key={file.filename} value={file.filename}>
|
||||
{file.filename}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectGroup>
|
||||
)}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
|
||||
{/* 行数选择 */}
|
||||
<div className="flex items-center gap-2">
|
||||
<Label className="text-sm text-muted-foreground whitespace-nowrap">
|
||||
{t("toolbar.lines")}
|
||||
</Label>
|
||||
<Select value={String(lines)} onValueChange={(v) => onLinesChange(Number(v))}>
|
||||
<SelectTrigger className="w-[100px]">
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{LINE_OPTIONS.map((option) => (
|
||||
<SelectItem key={option} value={String(option)}>
|
||||
{option}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
<Select value={String(lines)} onValueChange={(v) => onLinesChange(Number(v))}>
|
||||
<SelectTrigger className="w-[110px]">
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{LINE_OPTIONS.map((option) => (
|
||||
<SelectItem key={option} value={String(option)}>
|
||||
{option} {t("toolbar.linesUnit")}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
|
||||
{/* 自动刷新开关 */}
|
||||
<div className="flex items-center gap-2">
|
||||
<Switch
|
||||
id="auto-refresh"
|
||||
checked={autoRefresh}
|
||||
onCheckedChange={onAutoRefreshChange}
|
||||
{/* 日志级别筛选 */}
|
||||
<Select value={logLevel} onValueChange={(v) => onLogLevelChange(v as LogLevel)}>
|
||||
<SelectTrigger className="w-[130px]">
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{LOG_LEVELS.map((level) => (
|
||||
<SelectItem key={level} value={level}>
|
||||
{level === "all" ? t("toolbar.levelAll") : level}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
|
||||
{/* 搜索框 - 居中并扩展 */}
|
||||
<div className="relative flex-1 min-w-[280px] max-w-[500px]">
|
||||
<Search className="absolute left-3 top-1/2 h-4 w-4 -translate-y-1/2 text-muted-foreground" />
|
||||
<Input
|
||||
type="text"
|
||||
placeholder={t("toolbar.searchPlaceholder")}
|
||||
value={searchQuery}
|
||||
onChange={(e) => onSearchChange(e.target.value)}
|
||||
className="w-full pl-9 h-9"
|
||||
/>
|
||||
<Label
|
||||
htmlFor="auto-refresh"
|
||||
className="text-sm text-muted-foreground cursor-pointer flex items-center gap-1.5"
|
||||
>
|
||||
{t("toolbar.autoRefresh")}
|
||||
{autoRefresh && (
|
||||
<span className="size-2 rounded-full bg-green-500 animate-pulse" />
|
||||
)}
|
||||
</Label>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
"use client"
|
||||
|
||||
import { useEffect, useMemo, useState } from "react"
|
||||
import { useCallback, useEffect, useMemo, useState } from "react"
|
||||
import { useTranslations } from "next-intl"
|
||||
import { Download } from "lucide-react"
|
||||
|
||||
import { Card, CardContent } from "@/components/ui/card"
|
||||
import { Separator } from "@/components/ui/separator"
|
||||
import { Switch } from "@/components/ui/switch"
|
||||
import { Label } from "@/components/ui/label"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { useSystemLogs, useLogFiles } from "@/hooks/use-system-logs"
|
||||
import { LogToolbar } from "./log-toolbar"
|
||||
import { LogToolbar, type LogLevel } from "./log-toolbar"
|
||||
import { AnsiLogViewer } from "./ansi-log-viewer"
|
||||
|
||||
const DEFAULT_FILE = "xingrin.log"
|
||||
const DEFAULT_FILE = "orbit.log"
|
||||
const DEFAULT_LINES = 500
|
||||
|
||||
export function SystemLogsView() {
|
||||
@@ -18,6 +22,8 @@ export function SystemLogsView() {
|
||||
const [selectedFile, setSelectedFile] = useState(DEFAULT_FILE)
|
||||
const [lines, setLines] = useState(DEFAULT_LINES)
|
||||
const [autoRefresh, setAutoRefresh] = useState(true)
|
||||
const [searchQuery, setSearchQuery] = useState("")
|
||||
const [logLevel, setLogLevel] = useState<LogLevel>("all")
|
||||
|
||||
// 获取日志文件列表
|
||||
const { data: filesData } = useLogFiles()
|
||||
@@ -45,28 +51,94 @@ export function SystemLogsView() {
|
||||
return result
|
||||
}, [logsData])
|
||||
|
||||
// 下载日志
|
||||
const handleDownload = useCallback(() => {
|
||||
if (!content) return
|
||||
|
||||
const blob = new Blob([content], { type: "text/plain;charset=utf-8" })
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = document.createElement("a")
|
||||
a.href = url
|
||||
a.download = selectedFile
|
||||
document.body.appendChild(a)
|
||||
a.click()
|
||||
document.body.removeChild(a)
|
||||
URL.revokeObjectURL(url)
|
||||
}, [content, selectedFile])
|
||||
|
||||
return (
|
||||
<Card>
|
||||
<CardContent className="space-y-4">
|
||||
<div className="flex flex-col gap-3 flex-1 min-h-0">
|
||||
{/* 紧凑单行工具栏 - 标题融入 */}
|
||||
<div className="flex items-center gap-4 flex-wrap">
|
||||
<h1 className="text-lg font-semibold whitespace-nowrap">{t("title")}</h1>
|
||||
<Separator orientation="vertical" className="h-5" />
|
||||
<LogToolbar
|
||||
files={files}
|
||||
selectedFile={selectedFile}
|
||||
lines={lines}
|
||||
autoRefresh={autoRefresh}
|
||||
searchQuery={searchQuery}
|
||||
logLevel={logLevel}
|
||||
onFileChange={setSelectedFile}
|
||||
onLinesChange={setLines}
|
||||
onAutoRefreshChange={setAutoRefresh}
|
||||
onSearchChange={setSearchQuery}
|
||||
onLogLevelChange={setLogLevel}
|
||||
/>
|
||||
<div className="h-[calc(100vh-300px)] min-h-[360px] rounded-lg border overflow-hidden bg-[#1e1e1e]">
|
||||
{/* 下载按钮 */}
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
className="h-9"
|
||||
onClick={handleDownload}
|
||||
disabled={!content}
|
||||
>
|
||||
<Download className="h-4 w-4 mr-1.5" />
|
||||
{t("toolbar.download")}
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* 日志查看器 */}
|
||||
<div className="flex-1 flex flex-col rounded-lg overflow-hidden border">
|
||||
<div className="flex-1 min-h-[400px] bg-[#1e1e1e]">
|
||||
{content ? (
|
||||
<AnsiLogViewer content={content} />
|
||||
<AnsiLogViewer content={content} searchQuery={searchQuery} logLevel={logLevel} />
|
||||
) : (
|
||||
<div className="flex items-center justify-center h-full text-muted-foreground">
|
||||
{t("noContent")}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
|
||||
{/* 底部状态栏 */}
|
||||
<div className="flex items-center justify-between px-4 py-2 bg-muted/50 border-t text-xs text-muted-foreground">
|
||||
<div className="flex items-center gap-4">
|
||||
<span>{lines} {t("toolbar.linesUnit")}</span>
|
||||
<Separator orientation="vertical" className="h-3" />
|
||||
<span>{selectedFile}</span>
|
||||
<Separator orientation="vertical" className="h-3" />
|
||||
<span className="flex items-center gap-1.5">
|
||||
{autoRefresh && (
|
||||
<span className="size-1.5 rounded-full bg-green-500 animate-pulse" />
|
||||
)}
|
||||
{t("description")}
|
||||
</span>
|
||||
</div>
|
||||
{/* 自动刷新开关 */}
|
||||
<div className="flex items-center gap-2">
|
||||
<Switch
|
||||
id="auto-refresh"
|
||||
checked={autoRefresh}
|
||||
onCheckedChange={setAutoRefresh}
|
||||
className="scale-75"
|
||||
/>
|
||||
<Label
|
||||
htmlFor="auto-refresh"
|
||||
className="text-xs cursor-pointer"
|
||||
>
|
||||
{t("toolbar.autoRefresh")}
|
||||
</Label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -103,18 +103,23 @@ export function DeployTerminalDialog({
|
||||
|
||||
// Show connection prompt
|
||||
terminal.writeln(`\x1b[90m${tTerminal("connecting")}\x1b[0m`)
|
||||
|
||||
// Listen for window resize
|
||||
const handleResize = () => fitAddon.fit()
|
||||
window.addEventListener('resize', handleResize)
|
||||
|
||||
|
||||
// Auto-connect WebSocket
|
||||
connectWs()
|
||||
|
||||
}, [worker])
|
||||
|
||||
// Manage window resize listener separately for proper cleanup
|
||||
useEffect(() => {
|
||||
const fitAddon = fitAddonRef.current
|
||||
if (!fitAddon) return
|
||||
|
||||
const handleResize = () => fitAddon.fit()
|
||||
window.addEventListener('resize', handleResize)
|
||||
|
||||
return () => {
|
||||
window.removeEventListener('resize', handleResize)
|
||||
}
|
||||
}, [worker])
|
||||
}, [fitAddonRef.current])
|
||||
|
||||
// Connect WebSocket
|
||||
const connectWs = useCallback(() => {
|
||||
|
||||
@@ -17,7 +17,7 @@ const SUBDOMAIN_FILTER_FIELDS: FilterField[] = [
|
||||
// Subdomain page filter examples
|
||||
const SUBDOMAIN_FILTER_EXAMPLES = [
|
||||
'name="api.example.com"',
|
||||
'name="*.test.com"',
|
||||
'name=".test.com"',
|
||||
]
|
||||
|
||||
// Component props type definition
|
||||
@@ -125,7 +125,7 @@ export function SubdomainsDataTable({
|
||||
onSelectionChange={onSelectionChange}
|
||||
// Bulk operations
|
||||
onBulkDelete={onBulkDelete}
|
||||
bulkDeleteLabel="Delete"
|
||||
bulkDeleteLabel={tActions("delete")}
|
||||
// Add button
|
||||
onAddNew={onAddNew}
|
||||
addButtonLabel={addButtonText}
|
||||
|
||||
@@ -14,8 +14,10 @@ import { createSubdomainColumns } from "./subdomains-columns"
|
||||
import { DataTableSkeleton } from "@/components/ui/data-table-skeleton"
|
||||
import { SubdomainService } from "@/services/subdomain.service"
|
||||
import { BulkAddSubdomainsDialog } from "./bulk-add-subdomains-dialog"
|
||||
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
|
||||
import { getDateLocale } from "@/lib/date-utils"
|
||||
import type { Subdomain } from "@/types/subdomain.types"
|
||||
import { toast } from "sonner"
|
||||
|
||||
/**
|
||||
* Subdomain detail view component
|
||||
@@ -31,11 +33,14 @@ export function SubdomainsDetailView({
|
||||
scanId?: number
|
||||
}) {
|
||||
const [selectedSubdomains, setSelectedSubdomains] = useState<Subdomain[]>([])
|
||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false)
|
||||
const [isDeleting, setIsDeleting] = useState(false)
|
||||
|
||||
// Internationalization
|
||||
const tColumns = useTranslations("columns")
|
||||
const tCommon = useTranslations("common")
|
||||
const tSubdomains = useTranslations("subdomains")
|
||||
const tToast = useTranslations("toast")
|
||||
const locale = useLocale()
|
||||
|
||||
// Build translation object
|
||||
@@ -215,6 +220,26 @@ export function SubdomainsDetailView({
|
||||
URL.revokeObjectURL(url)
|
||||
}
|
||||
|
||||
// Handle bulk delete
|
||||
const handleBulkDelete = async () => {
|
||||
if (selectedSubdomains.length === 0) return
|
||||
|
||||
setIsDeleting(true)
|
||||
try {
|
||||
const ids = selectedSubdomains.map(s => s.id)
|
||||
const result = await SubdomainService.bulkDeleteSubdomains(ids)
|
||||
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
|
||||
setSelectedSubdomains([])
|
||||
setDeleteDialogOpen(false)
|
||||
refetch()
|
||||
} catch (error) {
|
||||
console.error("Failed to delete subdomains", error)
|
||||
toast.error(tToast("deleteFailed"))
|
||||
} finally {
|
||||
setIsDeleting(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Create column definitions
|
||||
const subdomainColumns = useMemo(
|
||||
() =>
|
||||
@@ -279,6 +304,7 @@ export function SubdomainsDetailView({
|
||||
isSearching={isSearching}
|
||||
onDownloadAll={handleDownloadAll}
|
||||
onDownloadSelected={handleDownloadSelected}
|
||||
onBulkDelete={targetId ? () => setDeleteDialogOpen(true) : undefined}
|
||||
pagination={pagination}
|
||||
setPagination={setPagination}
|
||||
paginationInfo={{
|
||||
@@ -301,6 +327,17 @@ export function SubdomainsDetailView({
|
||||
onSuccess={() => refetch()}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Delete confirmation dialog */}
|
||||
<ConfirmDialog
|
||||
open={deleteDialogOpen}
|
||||
onOpenChange={setDeleteDialogOpen}
|
||||
title={tCommon("actions.confirmDelete")}
|
||||
description={tCommon("actions.deleteConfirmMessage", { count: selectedSubdomains.length })}
|
||||
onConfirm={handleBulkDelete}
|
||||
loading={isDeleting}
|
||||
variant="destructive"
|
||||
/>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user