mirror of
https://github.com/yyhuni/xingrin.git
synced 2026-01-31 19:53:11 +08:00
Compare commits
177 Commits
v1.3.2-dev
...
001-websoc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b859fc9062 | ||
|
|
49b5fbef28 | ||
|
|
11112a68f6 | ||
|
|
9049b096ba | ||
|
|
ca6c0eb082 | ||
|
|
64bcd9a6f5 | ||
|
|
443e2172e4 | ||
|
|
c6dcfb0a5b | ||
|
|
25ae325c69 | ||
|
|
cab83d89cf | ||
|
|
0f8fff2dc4 | ||
|
|
6e48b97dc2 | ||
|
|
ed757d6e14 | ||
|
|
2aa1afbabf | ||
|
|
35ac64db57 | ||
|
|
b4bfab92e3 | ||
|
|
72210c42d0 | ||
|
|
91aaf7997f | ||
|
|
32e3179d58 | ||
|
|
487f7c84b5 | ||
|
|
b2cc83f569 | ||
|
|
f854cf09be | ||
|
|
7e1c2c187a | ||
|
|
4abb259ca0 | ||
|
|
bbef6af000 | ||
|
|
ba0864ed16 | ||
|
|
f54827829a | ||
|
|
170021130c | ||
|
|
b540f69152 | ||
|
|
d7f1e04855 | ||
|
|
68ad18e6da | ||
|
|
a7542d4a34 | ||
|
|
6f02d9f3c5 | ||
|
|
794846ca7a | ||
|
|
5eea7b2621 | ||
|
|
069527a7f1 | ||
|
|
e542633ad3 | ||
|
|
e8a9606d3b | ||
|
|
dc2e1e027d | ||
|
|
b1847faa3a | ||
|
|
e699842492 | ||
|
|
08a4807bef | ||
|
|
191ff9837b | ||
|
|
679dff9037 | ||
|
|
ce4330b628 | ||
|
|
4ce6b148f8 | ||
|
|
a89f775ee9 | ||
|
|
e3003f33f9 | ||
|
|
3760684b64 | ||
|
|
bfd7e11d09 | ||
|
|
f758feb0d0 | ||
|
|
8798eed337 | ||
|
|
bd1e25cfd5 | ||
|
|
d775055572 | ||
|
|
00dfad60b8 | ||
|
|
a5c48fe4d4 | ||
|
|
85c880731c | ||
|
|
c6b6507412 | ||
|
|
af457dc44c | ||
|
|
9e01a6aa5e | ||
|
|
ed80772e6f | ||
|
|
a22af21dcb | ||
|
|
8de950a7a5 | ||
|
|
9db84221e9 | ||
|
|
0728f3c01d | ||
|
|
4aa7b3d68a | ||
|
|
3946a53337 | ||
|
|
c94fe1ec4b | ||
|
|
6dea525527 | ||
|
|
5b0416972a | ||
|
|
5345a34cbd | ||
|
|
3ca56abc3e | ||
|
|
9703add22d | ||
|
|
f5a489e2d6 | ||
|
|
d75a3f6882 | ||
|
|
59e48e5b15 | ||
|
|
2d2ec93626 | ||
|
|
ced9f811f4 | ||
|
|
aa99b26f50 | ||
|
|
8342f196db | ||
|
|
1bd2a6ed88 | ||
|
|
033ff89aee | ||
|
|
4284a0cd9a | ||
|
|
943a4cb960 | ||
|
|
eb2d853b76 | ||
|
|
1184c18b74 | ||
|
|
8a6f1b6f24 | ||
|
|
255d505aba | ||
|
|
d06a9bab1f | ||
|
|
6d5c776bf7 | ||
|
|
bf058dd67b | ||
|
|
0532d7c8b8 | ||
|
|
2ee9b5ffa2 | ||
|
|
648a1888d4 | ||
|
|
2508268a45 | ||
|
|
c60383940c | ||
|
|
47298c294a | ||
|
|
eba394e14e | ||
|
|
592a1958c4 | ||
|
|
38e2856c08 | ||
|
|
f5ad8e68e9 | ||
|
|
d5f91a236c | ||
|
|
24ae8b5aeb | ||
|
|
86f43f94a0 | ||
|
|
53ba03d1e5 | ||
|
|
89c44ebd05 | ||
|
|
e0e3419edb | ||
|
|
52ee4684a7 | ||
|
|
ce8cebf11d | ||
|
|
ec006d8f54 | ||
|
|
48976a570f | ||
|
|
5da7229873 | ||
|
|
8bb737a9fa | ||
|
|
2d018d33f3 | ||
|
|
0c07cc8497 | ||
|
|
225b039985 | ||
|
|
d1624627bc | ||
|
|
7bb15e4ae4 | ||
|
|
8e8cc29669 | ||
|
|
d6d5338acb | ||
|
|
c521bdb511 | ||
|
|
abf2d95f6f | ||
|
|
ab58cf0d85 | ||
|
|
fb0111adf2 | ||
|
|
161ee9a2b1 | ||
|
|
0cf75585d5 | ||
|
|
1d8d5f51d9 | ||
|
|
3f8de07c8c | ||
|
|
cd5c2b9f11 | ||
|
|
54786c22dd | ||
|
|
d468f975ab | ||
|
|
a85a12b8ad | ||
|
|
a8b0d97b7b | ||
|
|
b8504921c2 | ||
|
|
ecfc1822fb | ||
|
|
81633642e6 | ||
|
|
d1ec9b7f27 | ||
|
|
2a3d9b4446 | ||
|
|
9b63203b5a | ||
|
|
6ff86e14ec | ||
|
|
4c1282e9bb | ||
|
|
ba3a9b709d | ||
|
|
283b28b46a | ||
|
|
1269e5a314 | ||
|
|
802e967906 | ||
|
|
e446326416 | ||
|
|
e0abb3ce7b | ||
|
|
d418baaf79 | ||
|
|
f8da408580 | ||
|
|
7cd4354d8f | ||
|
|
6bf35a760f | ||
|
|
be9ecadffb | ||
|
|
adb53c9f85 | ||
|
|
7b7bbed634 | ||
|
|
8dd3f0536e | ||
|
|
8a8062a12d | ||
|
|
55908a2da5 | ||
|
|
22a7d4f091 | ||
|
|
f287f18134 | ||
|
|
de27230b7a | ||
|
|
15a6295189 | ||
|
|
674acdac66 | ||
|
|
c59152bedf | ||
|
|
b4037202dc | ||
|
|
4b4f9862bf | ||
|
|
1c42e4978f | ||
|
|
57bab63997 | ||
|
|
b1f0f18ac0 | ||
|
|
ccee5471b8 | ||
|
|
0ccd362535 | ||
|
|
7f2af7f7e2 | ||
|
|
4bd0f9e8c1 | ||
|
|
68cc996e3b | ||
|
|
f1e79d638e | ||
|
|
d484133e4c | ||
|
|
fc977ae029 | ||
|
|
08372588a4 |
45
.github/workflows/check-generated-files.yml
vendored
Normal file
45
.github/workflows/check-generated-files.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Check Generated Files
|
||||
|
||||
on:
|
||||
workflow_call: # 只在被其他 workflow 调用时运行
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21
|
||||
|
||||
- name: Generate files for all workflows
|
||||
working-directory: worker
|
||||
run: make generate
|
||||
|
||||
- name: Check for differences
|
||||
run: |
|
||||
if ! git diff --exit-code; then
|
||||
echo "❌ Generated files are out of date!"
|
||||
echo "Please run: cd worker && make generate"
|
||||
echo ""
|
||||
echo "Changed files:"
|
||||
git status --porcelain
|
||||
echo ""
|
||||
echo "Diff:"
|
||||
git diff
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Generated files are up to date"
|
||||
|
||||
- name: Run metadata consistency tests
|
||||
working-directory: worker
|
||||
run: make test-metadata
|
||||
|
||||
- name: Run all tests
|
||||
working-directory: worker
|
||||
run: make test
|
||||
170
.github/workflows/docker-build.yml
vendored
170
.github/workflows/docker-build.yml
vendored
@@ -1,170 +0,0 @@
|
||||
name: Build and Push Docker Images
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*' # 只在推送 v 开头的 tag 时触发(如 v1.0.0)
|
||||
workflow_dispatch: # 手动触发
|
||||
|
||||
# 并发控制:同一分支只保留最新的构建,取消之前正在运行的
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
REGISTRY: docker.io
|
||||
IMAGE_PREFIX: yyhuni
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- image: xingrin-server
|
||||
dockerfile: docker/server/Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- image: xingrin-frontend
|
||||
dockerfile: docker/frontend/Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64 # ARM64 构建时 Next.js 在 QEMU 下会崩溃
|
||||
- image: xingrin-worker
|
||||
dockerfile: docker/worker/Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- image: xingrin-nginx
|
||||
dockerfile: docker/nginx/Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- image: xingrin-agent
|
||||
dockerfile: docker/agent/Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Free disk space (for large builds like worker)
|
||||
run: |
|
||||
echo "=== Before cleanup ==="
|
||||
df -h
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /opt/ghc
|
||||
sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo docker image prune -af
|
||||
echo "=== After cleanup ==="
|
||||
df -h
|
||||
|
||||
- name: Generate SSL certificates for nginx build
|
||||
if: matrix.image == 'xingrin-nginx'
|
||||
run: |
|
||||
mkdir -p docker/nginx/ssl
|
||||
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
|
||||
-keyout docker/nginx/ssl/privkey.pem \
|
||||
-out docker/nginx/ssl/fullchain.pem \
|
||||
-subj "/CN=localhost"
|
||||
echo "SSL certificates generated for CI build"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Get version from git tag
|
||||
id: version
|
||||
run: |
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
echo "IS_RELEASE=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "VERSION=dev-$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
echo "IS_RELEASE=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ${{ matrix.context }}
|
||||
file: ${{ matrix.dockerfile }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.IMAGE_PREFIX }}/${{ matrix.image }}:${{ steps.version.outputs.VERSION }}
|
||||
${{ steps.version.outputs.IS_RELEASE == 'true' && format('{0}/{1}:latest', env.IMAGE_PREFIX, matrix.image) || '' }}
|
||||
build-args: |
|
||||
IMAGE_TAG=${{ steps.version.outputs.VERSION }}
|
||||
cache-from: type=gha,scope=${{ matrix.image }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.image }}
|
||||
provenance: false
|
||||
sbom: false
|
||||
|
||||
# 所有镜像构建成功后,更新 VERSION 文件
|
||||
# 根据 tag 所在的分支更新对应分支的 VERSION 文件
|
||||
update-version:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # 获取完整历史,用于判断 tag 所在分支
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Determine source branch and version
|
||||
id: branch
|
||||
run: |
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
|
||||
# 查找包含此 tag 的分支
|
||||
BRANCHES=$(git branch -r --contains ${{ github.ref_name }})
|
||||
echo "Branches containing tag: $BRANCHES"
|
||||
|
||||
# 判断 tag 来自哪个分支
|
||||
if echo "$BRANCHES" | grep -q "origin/main"; then
|
||||
TARGET_BRANCH="main"
|
||||
UPDATE_LATEST="true"
|
||||
elif echo "$BRANCHES" | grep -q "origin/dev"; then
|
||||
TARGET_BRANCH="dev"
|
||||
UPDATE_LATEST="false"
|
||||
else
|
||||
echo "Warning: Tag not found in main or dev branch, defaulting to main"
|
||||
TARGET_BRANCH="main"
|
||||
UPDATE_LATEST="false"
|
||||
fi
|
||||
|
||||
echo "BRANCH=$TARGET_BRANCH" >> $GITHUB_OUTPUT
|
||||
echo "VERSION=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "UPDATE_LATEST=$UPDATE_LATEST" >> $GITHUB_OUTPUT
|
||||
echo "Will update VERSION on branch: $TARGET_BRANCH"
|
||||
|
||||
- name: Checkout target branch
|
||||
run: |
|
||||
git checkout ${{ steps.branch.outputs.BRANCH }}
|
||||
|
||||
- name: Update VERSION file
|
||||
run: |
|
||||
VERSION="${{ steps.branch.outputs.VERSION }}"
|
||||
echo "$VERSION" > VERSION
|
||||
echo "Updated VERSION to $VERSION on branch ${{ steps.branch.outputs.BRANCH }}"
|
||||
|
||||
- name: Commit and push
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git add VERSION
|
||||
git diff --staged --quiet || git commit -m "chore: bump version to ${{ steps.branch.outputs.VERSION }}"
|
||||
git push origin ${{ steps.branch.outputs.BRANCH }}
|
||||
159
.gitignore
vendored
159
.gitignore
vendored
@@ -1,136 +1,51 @@
|
||||
# ============================
|
||||
# 操作系统相关文件
|
||||
# ============================
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
# Go
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
*.test
|
||||
*.out
|
||||
vendor/
|
||||
go.work
|
||||
|
||||
# ============================
|
||||
# 前端 (Next.js/Node.js) 相关
|
||||
# ============================
|
||||
# 依赖目录
|
||||
front-back/node_modules/
|
||||
front-back/.pnpm-store/
|
||||
# Build artifacts
|
||||
dist/
|
||||
build/
|
||||
bin/
|
||||
|
||||
# Next.js 构建产物
|
||||
front-back/.next/
|
||||
front-back/out/
|
||||
front-back/dist/
|
||||
|
||||
# 环境变量文件
|
||||
front-back/.env
|
||||
front-back/.env.local
|
||||
front-back/.env.development.local
|
||||
front-back/.env.test.local
|
||||
front-back/.env.production.local
|
||||
|
||||
# 运行时和缓存
|
||||
front-back/.turbo/
|
||||
front-back/.swc/
|
||||
front-back/.eslintcache
|
||||
front-back/.tsbuildinfo
|
||||
|
||||
# ============================
|
||||
# 后端 (Python/Django) 相关
|
||||
# ============================
|
||||
# Python 虚拟环境
|
||||
.venv/
|
||||
venv/
|
||||
env/
|
||||
ENV/
|
||||
|
||||
# Python 编译文件
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pyd
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# Django 相关
|
||||
backend/db.sqlite3
|
||||
backend/db.sqlite3-journal
|
||||
backend/media/
|
||||
backend/staticfiles/
|
||||
backend/.env
|
||||
backend/.env.local
|
||||
|
||||
# Python 测试和覆盖率
|
||||
.pytest_cache/
|
||||
.coverage
|
||||
htmlcov/
|
||||
*.cover
|
||||
|
||||
# ============================
|
||||
# 后端 (Go) 相关
|
||||
# ============================
|
||||
# 编译产物
|
||||
backend/bin/
|
||||
backend/dist/
|
||||
backend/*.exe
|
||||
backend/*.exe~
|
||||
backend/*.dll
|
||||
backend/*.so
|
||||
backend/*.dylib
|
||||
|
||||
# 测试相关
|
||||
backend/*.test
|
||||
backend/*.out
|
||||
backend/*.prof
|
||||
|
||||
# Go workspace 文件
|
||||
backend/go.work
|
||||
backend/go.work.sum
|
||||
|
||||
# Go 依赖管理
|
||||
backend/vendor/
|
||||
|
||||
# ============================
|
||||
# IDE 和编辑器相关
|
||||
# ============================
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
.cursor/
|
||||
.claude/
|
||||
.kiro/
|
||||
.playwright-mcp/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
.DS_Store
|
||||
|
||||
# ============================
|
||||
# Docker 相关
|
||||
# ============================
|
||||
docker/.env
|
||||
docker/.env.local
|
||||
|
||||
# SSL 证书和私钥(不应提交)
|
||||
docker/nginx/ssl/*.pem
|
||||
docker/nginx/ssl/*.key
|
||||
docker/nginx/ssl/*.crt
|
||||
|
||||
# ============================
|
||||
# 日志文件和扫描结果
|
||||
# ============================
|
||||
# Environment
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
*.log
|
||||
logs/
|
||||
results/
|
||||
.venv/
|
||||
|
||||
# 开发脚本运行时文件(进程 ID 和启动日志)
|
||||
backend/scripts/dev/.pids/
|
||||
# Testing
|
||||
coverage.txt
|
||||
*.coverprofile
|
||||
.hypothesis/
|
||||
|
||||
# ============================
|
||||
# 临时文件
|
||||
# ============================
|
||||
# Temporary files
|
||||
*.tmp
|
||||
tmp/
|
||||
temp/
|
||||
.cache/
|
||||
|
||||
HGETALL
|
||||
KEYS
|
||||
vuln_scan/input_endpoints.txt
|
||||
open-in-v0
|
||||
.kiro/
|
||||
.claude/
|
||||
.specify/
|
||||
|
||||
# AI Assistant directories
|
||||
codex/
|
||||
openspec/
|
||||
specs/
|
||||
AGENTS.md
|
||||
WARP.md
|
||||
|
||||
4
.vscode/settings.json
vendored
Normal file
4
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"typescript.autoClosingTags": false,
|
||||
"kiroAgent.configureMCP": "Enabled"
|
||||
}
|
||||
272
README.md
272
README.md
@@ -1,272 +0,0 @@
|
||||
<h1 align="center">XingRin - 星环</h1>
|
||||
|
||||
<p align="center">
|
||||
<b>🛡️ 攻击面管理平台 (ASM) | 自动化资产发现与漏洞扫描系统</b>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/yyhuni/xingrin/stargazers"><img src="https://img.shields.io/github/stars/yyhuni/xingrin?style=flat-square&logo=github" alt="GitHub stars"></a>
|
||||
<a href="https://github.com/yyhuni/xingrin/network/members"><img src="https://img.shields.io/github/forks/yyhuni/xingrin?style=flat-square&logo=github" alt="GitHub forks"></a>
|
||||
<a href="https://github.com/yyhuni/xingrin/issues"><img src="https://img.shields.io/github/issues/yyhuni/xingrin?style=flat-square&logo=github" alt="GitHub issues"></a>
|
||||
<a href="https://github.com/yyhuni/xingrin/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-PolyForm%20NC-blue?style=flat-square" alt="License"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="#-功能特性">功能特性</a> •
|
||||
<a href="#-快速开始">快速开始</a> •
|
||||
<a href="#-文档">文档</a> •
|
||||
<a href="#-技术栈">技术栈</a> •
|
||||
<a href="#-反馈与贡献">反馈与贡献</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<sub>🔍 关键词: ASM | 攻击面管理 | 漏洞扫描 | 资产发现 | Bug Bounty | 渗透测试 | Nuclei | 子域名枚举 | EASM</sub>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
|
||||
<p align="center">
|
||||
<b>🎨 现代化 UI </b>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="docs/screenshots/light.png" alt="Light Mode" width="24%">
|
||||
<img src="docs/screenshots/bubblegum.png" alt="Bubblegum" width="24%">
|
||||
<img src="docs/screenshots/cosmic-night.png" alt="Cosmic Night" width="24%">
|
||||
<img src="docs/screenshots/quantum-rose.png" alt="Quantum Rose" width="24%">
|
||||
</p>
|
||||
|
||||
## 📚 文档
|
||||
|
||||
- [📖 技术文档](./docs/README.md) - 技术文档导航(🚧 持续完善中)
|
||||
- [🚀 快速开始](./docs/quick-start.md) - 一键安装和部署指南
|
||||
- [🔄 版本管理](./docs/version-management.md) - Git Tag 驱动的自动化版本管理系统
|
||||
- [📦 Nuclei 模板架构](./docs/nuclei-template-architecture.md) - 模板仓库的存储与同步
|
||||
- [📖 字典文件架构](./docs/wordlist-architecture.md) - 字典文件的存储与同步
|
||||
- [🔍 扫描流程架构](./docs/scan-flow-architecture.md) - 完整扫描流程与工具编排
|
||||
|
||||
|
||||
---
|
||||
|
||||
## ✨ 功能特性
|
||||
|
||||
### 🎯 目标与资产管理
|
||||
- **组织管理** - 多层级目标组织,灵活分组
|
||||
- **目标管理** - 支持域名、IP目标类型
|
||||
- **资产发现** - 子域名、网站、端点、目录自动发现
|
||||
- **资产快照** - 扫描结果快照对比,追踪资产变化
|
||||
|
||||
### 🔍 漏洞扫描
|
||||
- **多引擎支持** - 集成 Nuclei 等主流扫描引擎
|
||||
- **自定义流程** - YAML 配置扫描流程,灵活编排
|
||||
- **定时扫描** - Cron 表达式配置,自动化周期扫描
|
||||
|
||||
### 🔖 指纹识别
|
||||
- **多源指纹库** - 内置 EHole、Goby、Wappalyzer、Fingers、FingerPrintHub、ARL 等 2.7W+ 指纹规则
|
||||
- **自动识别** - 扫描流程自动执行,识别 Web 应用技术栈
|
||||
- **指纹管理** - 支持查询、导入、导出指纹规则
|
||||
|
||||
#### 扫描流程架构
|
||||
|
||||
完整的扫描流程包括:子域名发现、端口扫描、站点发现、指纹识别、URL 收集、目录扫描、漏洞扫描等阶段
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
START["开始扫描"]
|
||||
|
||||
subgraph STAGE1["阶段 1: 资产发现"]
|
||||
direction TB
|
||||
SUB["子域名发现<br/>subfinder, amass, puredns"]
|
||||
PORT["端口扫描<br/>naabu"]
|
||||
SITE["站点识别<br/>httpx"]
|
||||
FINGER["指纹识别<br/>xingfinger"]
|
||||
SUB --> PORT --> SITE --> FINGER
|
||||
end
|
||||
|
||||
subgraph STAGE2["阶段 2: 深度分析"]
|
||||
direction TB
|
||||
URL["URL 收集<br/>waymore, katana"]
|
||||
DIR["目录扫描<br/>ffuf"]
|
||||
end
|
||||
|
||||
subgraph STAGE3["阶段 3: 漏洞检测"]
|
||||
VULN["漏洞扫描<br/>nuclei, dalfox"]
|
||||
end
|
||||
|
||||
FINISH["扫描完成"]
|
||||
|
||||
START --> STAGE1
|
||||
FINGER --> STAGE2
|
||||
STAGE2 --> STAGE3
|
||||
STAGE3 --> FINISH
|
||||
|
||||
style START fill:#34495e,stroke:#2c3e50,stroke-width:2px,color:#fff
|
||||
style FINISH fill:#27ae60,stroke:#229954,stroke-width:2px,color:#fff
|
||||
style STAGE1 fill:#3498db,stroke:#2980b9,stroke-width:2px,color:#fff
|
||||
style STAGE2 fill:#9b59b6,stroke:#8e44ad,stroke-width:2px,color:#fff
|
||||
style STAGE3 fill:#e67e22,stroke:#d35400,stroke-width:2px,color:#fff
|
||||
style SUB fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style PORT fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style SITE fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style FINGER fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style URL fill:#bb8fce,stroke:#9b59b6,stroke-width:1px,color:#fff
|
||||
style DIR fill:#bb8fce,stroke:#9b59b6,stroke-width:1px,color:#fff
|
||||
style VULN fill:#f0b27a,stroke:#e67e22,stroke-width:1px,color:#fff
|
||||
```
|
||||
|
||||
详细说明请查看 [扫描流程架构文档](./docs/scan-flow-architecture.md)
|
||||
|
||||
### 🖥️ 分布式架构
|
||||
- **多节点扫描** - 支持部署多个 Worker 节点,横向扩展扫描能力
|
||||
- **本地节点** - 零配置,安装即自动注册本地 Docker Worker
|
||||
- **远程节点** - SSH 一键部署远程 VPS 作为扫描节点
|
||||
- **负载感知调度** - 实时感知节点负载,自动分发任务到最优节点
|
||||
- **节点监控** - 实时心跳检测,CPU/内存/磁盘状态监控
|
||||
- **断线重连** - 节点离线自动检测,恢复后自动重新接入
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph MASTER["主服务器 (Master Server)"]
|
||||
direction TB
|
||||
|
||||
REDIS["Redis 负载缓存"]
|
||||
|
||||
subgraph SCHEDULER["任务调度器 (Task Distributor)"]
|
||||
direction TB
|
||||
SUBMIT["接收扫描任务"]
|
||||
SELECT["负载感知选择"]
|
||||
DISPATCH["智能分发"]
|
||||
|
||||
SUBMIT --> SELECT
|
||||
SELECT --> DISPATCH
|
||||
end
|
||||
|
||||
REDIS -.负载数据.-> SELECT
|
||||
end
|
||||
|
||||
subgraph WORKERS["Worker 节点集群"]
|
||||
direction TB
|
||||
|
||||
W1["Worker 1 (本地)<br/>CPU: 45% | MEM: 60%"]
|
||||
W2["Worker 2 (远程)<br/>CPU: 30% | MEM: 40%"]
|
||||
W3["Worker N (远程)<br/>CPU: 90% | MEM: 85%"]
|
||||
end
|
||||
|
||||
DISPATCH -->|任务分发| W1
|
||||
DISPATCH -->|任务分发| W2
|
||||
DISPATCH -->|高负载跳过| W3
|
||||
|
||||
W1 -.心跳上报.-> REDIS
|
||||
W2 -.心跳上报.-> REDIS
|
||||
W3 -.心跳上报.-> REDIS
|
||||
```
|
||||
|
||||
### 📊 可视化界面
|
||||
- **数据统计** - 资产/漏洞统计仪表盘
|
||||
- **实时通知** - WebSocket 消息推送
|
||||
|
||||
---
|
||||
|
||||
## 📦 快速开始
|
||||
|
||||
### 环境要求
|
||||
|
||||
- **操作系统**: Ubuntu 20.04+ / Debian 11+ (推荐)
|
||||
- **硬件**: 2核 4G 内存起步,20GB+ 磁盘空间
|
||||
|
||||
### 一键安装
|
||||
|
||||
```bash
|
||||
# 克隆项目
|
||||
git clone https://github.com/yyhuni/xingrin.git
|
||||
cd xingrin
|
||||
|
||||
# 安装并启动(生产模式)
|
||||
sudo ./install.sh
|
||||
|
||||
# 🇨🇳 中国大陆用户推荐使用镜像加速(第三方加速服务可能会失效,不保证长期可用)
|
||||
sudo ./install.sh --mirror
|
||||
```
|
||||
|
||||
> **💡 --mirror 参数说明**
|
||||
> - 自动配置 Docker 镜像加速(国内镜像源)
|
||||
> - 加速 Git 仓库克隆(Nuclei 模板等)
|
||||
> - 大幅提升安装速度,避免网络超时
|
||||
|
||||
### 访问服务
|
||||
|
||||
- **Web 界面**: `https://ip:8083`
|
||||
|
||||
### 常用命令
|
||||
|
||||
```bash
|
||||
# 启动服务
|
||||
sudo ./start.sh
|
||||
|
||||
# 停止服务
|
||||
sudo ./stop.sh
|
||||
|
||||
# 重启服务
|
||||
sudo ./restart.sh
|
||||
|
||||
# 卸载
|
||||
sudo ./uninstall.sh
|
||||
```
|
||||
|
||||
## 🤝 反馈与贡献
|
||||
|
||||
- 🐛 **如果发现 Bug** 可以点击右边链接进行提交 [Issue](https://github.com/yyhuni/xingrin/issues)
|
||||
- 💡 **有新想法,比如UI设计,功能设计等** 欢迎点击右边链接进行提交建议 [Issue](https://github.com/yyhuni/xingrin/issues)
|
||||
|
||||
## 📧 联系
|
||||
- 目前版本就我个人使用,可能会有很多边界问题
|
||||
- 如有问题,建议,其他,优先提交[Issue](https://github.com/yyhuni/xingrin/issues),也可以直接给我的公众号发消息,我都会回复的
|
||||
|
||||
- 微信公众号: **塔罗安全学苑**
|
||||
|
||||
<img src="docs/wechat-qrcode.png" alt="微信公众号" width="200">
|
||||
|
||||
|
||||
## ⚠️ 免责声明
|
||||
|
||||
**重要:请在使用前仔细阅读**
|
||||
|
||||
1. 本工具仅供**授权的安全测试**和**安全研究**使用
|
||||
2. 使用者必须确保已获得目标系统的**合法授权**
|
||||
3. **严禁**将本工具用于未经授权的渗透测试或攻击行为
|
||||
4. 未经授权扫描他人系统属于**违法行为**,可能面临法律责任
|
||||
5. 开发者**不对任何滥用行为负责**
|
||||
|
||||
使用本工具即表示您同意:
|
||||
- 仅在合法授权范围内使用
|
||||
- 遵守所在地区的法律法规
|
||||
- 承担因滥用产生的一切后果
|
||||
|
||||
## 🌟 Star History
|
||||
|
||||
如果这个项目对你有帮助,请给一个 ⭐ Star 支持一下!
|
||||
|
||||
[](https://star-history.com/#yyhuni/xingrin&Date)
|
||||
|
||||
## 📄 许可证
|
||||
|
||||
本项目采用 [GNU General Public License v3.0](LICENSE) 许可证。
|
||||
|
||||
### 允许的用途
|
||||
|
||||
- ✅ 个人学习和研究
|
||||
- ✅ 商业和非商业使用
|
||||
- ✅ 修改和分发
|
||||
- ✅ 专利使用
|
||||
- ✅ 私人使用
|
||||
|
||||
### 义务和限制
|
||||
|
||||
- 📋 **开源义务**:分发时必须提供源代码
|
||||
- 📋 **相同许可**:衍生作品必须使用相同许可证
|
||||
- 📋 **版权声明**:必须保留原始版权和许可证声明
|
||||
- ❌ **责任免除**:不提供任何担保
|
||||
- ❌ 未经授权的渗透测试
|
||||
- ❌ 任何违法行为
|
||||
|
||||
32
agent/go.mod
Normal file
32
agent/go.mod
Normal file
@@ -0,0 +1,32 @@
|
||||
module github.com/yyhuni/orbit/agent
|
||||
|
||||
go 1.24.5
|
||||
|
||||
require (
|
||||
github.com/docker/docker v28.5.2+incompatible
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/shirou/gopsutil/v3 v3.24.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect
|
||||
go.opentelemetry.io/otel v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.39.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
)
|
||||
78
agent/go.sum
Normal file
78
agent/go.sum
Normal file
@@ -0,0 +1,78 @@
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
|
||||
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
|
||||
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
|
||||
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
|
||||
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
|
||||
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
|
||||
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
|
||||
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
|
||||
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
|
||||
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
@@ -1,106 +0,0 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from django.apps import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AssetConfig(AppConfig):
|
||||
default_auto_field = 'django.db.models.BigAutoField'
|
||||
name = 'apps.asset'
|
||||
|
||||
def ready(self):
|
||||
# 导入所有模型以确保Django发现并注册
|
||||
from . import models
|
||||
|
||||
# 启用 pg_trgm 扩展(用于文本模糊搜索索引)
|
||||
# 用于已有数据库升级场景
|
||||
self._ensure_pg_trgm_extension()
|
||||
|
||||
# 验证 pg_ivm 扩展是否可用(用于 IMMV 增量维护)
|
||||
self._verify_pg_ivm_extension()
|
||||
|
||||
def _ensure_pg_trgm_extension(self):
|
||||
"""
|
||||
确保 pg_trgm 扩展已启用。
|
||||
该扩展用于 response_body 和 response_headers 字段的 GIN 索引,
|
||||
支持高效的文本模糊搜索。
|
||||
"""
|
||||
from django.db import connection
|
||||
|
||||
# 检查是否为 PostgreSQL 数据库
|
||||
if connection.vendor != 'postgresql':
|
||||
logger.debug("跳过 pg_trgm 扩展:当前数据库不是 PostgreSQL")
|
||||
return
|
||||
|
||||
try:
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute("CREATE EXTENSION IF NOT EXISTS pg_trgm;")
|
||||
logger.debug("pg_trgm 扩展已启用")
|
||||
except Exception as e:
|
||||
# 记录错误但不阻止应用启动
|
||||
# 常见原因:权限不足(需要超级用户权限)
|
||||
logger.warning(
|
||||
"无法创建 pg_trgm 扩展: %s。"
|
||||
"这可能导致 response_body 和 response_headers 字段的 GIN 索引无法正常工作。"
|
||||
"请手动执行: CREATE EXTENSION IF NOT EXISTS pg_trgm;",
|
||||
str(e)
|
||||
)
|
||||
|
||||
def _verify_pg_ivm_extension(self):
|
||||
"""
|
||||
验证 pg_ivm 扩展是否可用。
|
||||
pg_ivm 用于 IMMV(增量维护物化视图),是系统必需的扩展。
|
||||
如果不可用,将记录错误并退出。
|
||||
"""
|
||||
from django.db import connection
|
||||
|
||||
# 检查是否为 PostgreSQL 数据库
|
||||
if connection.vendor != 'postgresql':
|
||||
logger.debug("跳过 pg_ivm 验证:当前数据库不是 PostgreSQL")
|
||||
return
|
||||
|
||||
# 跳过某些管理命令(如 migrate、makemigrations)
|
||||
import sys
|
||||
if len(sys.argv) > 1 and sys.argv[1] in ('migrate', 'makemigrations', 'collectstatic', 'check'):
|
||||
logger.debug("跳过 pg_ivm 验证:当前为管理命令")
|
||||
return
|
||||
|
||||
try:
|
||||
with connection.cursor() as cursor:
|
||||
# 检查 pg_ivm 扩展是否已安装
|
||||
cursor.execute("""
|
||||
SELECT COUNT(*) FROM pg_extension WHERE extname = 'pg_ivm'
|
||||
""")
|
||||
count = cursor.fetchone()[0]
|
||||
|
||||
if count > 0:
|
||||
logger.info("✓ pg_ivm 扩展已启用")
|
||||
else:
|
||||
# 尝试创建扩展
|
||||
try:
|
||||
cursor.execute("CREATE EXTENSION IF NOT EXISTS pg_ivm;")
|
||||
logger.info("✓ pg_ivm 扩展已创建并启用")
|
||||
except Exception as create_error:
|
||||
logger.error(
|
||||
"=" * 60 + "\n"
|
||||
"错误: pg_ivm 扩展未安装\n"
|
||||
"=" * 60 + "\n"
|
||||
"pg_ivm 是系统必需的扩展,用于增量维护物化视图。\n\n"
|
||||
"请在 PostgreSQL 服务器上安装 pg_ivm:\n"
|
||||
" curl -sSL https://raw.githubusercontent.com/yyhuni/xingrin/main/docker/scripts/install-pg-ivm.sh | sudo bash\n\n"
|
||||
"或手动安装:\n"
|
||||
" 1. apt install build-essential postgresql-server-dev-15 git\n"
|
||||
" 2. git clone https://github.com/sraoss/pg_ivm.git && cd pg_ivm && make && make install\n"
|
||||
" 3. 在 postgresql.conf 中添加: shared_preload_libraries = 'pg_ivm'\n"
|
||||
" 4. 重启 PostgreSQL\n"
|
||||
"=" * 60
|
||||
)
|
||||
# 在生产环境中退出,开发环境中仅警告
|
||||
from django.conf import settings
|
||||
if not settings.DEBUG:
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"pg_ivm 扩展验证失败: {e}")
|
||||
@@ -1,101 +0,0 @@
|
||||
"""
|
||||
创建资产搜索 IMMV(增量维护物化视图)
|
||||
|
||||
使用 pg_ivm 扩展创建 IMMV,数据变更时自动增量更新,无需手动刷新。
|
||||
"""
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('asset', '0001_initial'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# 1. 确保 pg_ivm 扩展已启用
|
||||
migrations.RunSQL(
|
||||
sql="CREATE EXTENSION IF NOT EXISTS pg_ivm;",
|
||||
reverse_sql="-- pg_ivm extension kept for other uses"
|
||||
),
|
||||
|
||||
# 2. 使用 pg_ivm 创建 IMMV
|
||||
migrations.RunSQL(
|
||||
sql="""
|
||||
SELECT pgivm.create_immv('asset_search_view', $$
|
||||
SELECT
|
||||
w.id,
|
||||
w.url,
|
||||
w.host,
|
||||
w.title,
|
||||
w.tech,
|
||||
w.status_code,
|
||||
w.response_headers,
|
||||
w.response_body,
|
||||
w.created_at,
|
||||
w.target_id
|
||||
FROM website w
|
||||
$$);
|
||||
""",
|
||||
reverse_sql="SELECT pgivm.drop_immv('asset_search_view');"
|
||||
),
|
||||
|
||||
# 3. 创建唯一索引(用于标识)
|
||||
migrations.RunSQL(
|
||||
sql="""
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS asset_search_view_id_idx
|
||||
ON asset_search_view (id);
|
||||
""",
|
||||
reverse_sql="""
|
||||
DROP INDEX IF EXISTS asset_search_view_id_idx;
|
||||
"""
|
||||
),
|
||||
|
||||
# 4. 创建搜索优化索引
|
||||
migrations.RunSQL(
|
||||
sql="""
|
||||
-- host 模糊搜索索引
|
||||
CREATE INDEX IF NOT EXISTS asset_search_view_host_trgm_idx
|
||||
ON asset_search_view USING gin (host gin_trgm_ops);
|
||||
|
||||
-- title 模糊搜索索引
|
||||
CREATE INDEX IF NOT EXISTS asset_search_view_title_trgm_idx
|
||||
ON asset_search_view USING gin (title gin_trgm_ops);
|
||||
|
||||
-- url 模糊搜索索引
|
||||
CREATE INDEX IF NOT EXISTS asset_search_view_url_trgm_idx
|
||||
ON asset_search_view USING gin (url gin_trgm_ops);
|
||||
|
||||
-- response_headers 模糊搜索索引
|
||||
CREATE INDEX IF NOT EXISTS asset_search_view_headers_trgm_idx
|
||||
ON asset_search_view USING gin (response_headers gin_trgm_ops);
|
||||
|
||||
-- response_body 模糊搜索索引
|
||||
CREATE INDEX IF NOT EXISTS asset_search_view_body_trgm_idx
|
||||
ON asset_search_view USING gin (response_body gin_trgm_ops);
|
||||
|
||||
-- tech 数组索引
|
||||
CREATE INDEX IF NOT EXISTS asset_search_view_tech_idx
|
||||
ON asset_search_view USING gin (tech);
|
||||
|
||||
-- status_code 索引
|
||||
CREATE INDEX IF NOT EXISTS asset_search_view_status_idx
|
||||
ON asset_search_view (status_code);
|
||||
|
||||
-- created_at 排序索引
|
||||
CREATE INDEX IF NOT EXISTS asset_search_view_created_idx
|
||||
ON asset_search_view (created_at DESC);
|
||||
""",
|
||||
reverse_sql="""
|
||||
DROP INDEX IF EXISTS asset_search_view_host_trgm_idx;
|
||||
DROP INDEX IF EXISTS asset_search_view_title_trgm_idx;
|
||||
DROP INDEX IF EXISTS asset_search_view_url_trgm_idx;
|
||||
DROP INDEX IF EXISTS asset_search_view_headers_trgm_idx;
|
||||
DROP INDEX IF EXISTS asset_search_view_body_trgm_idx;
|
||||
DROP INDEX IF EXISTS asset_search_view_tech_idx;
|
||||
DROP INDEX IF EXISTS asset_search_view_status_idx;
|
||||
DROP INDEX IF EXISTS asset_search_view_created_idx;
|
||||
"""
|
||||
),
|
||||
]
|
||||
@@ -1,7 +0,0 @@
|
||||
"""
|
||||
Asset 应用的任务模块
|
||||
|
||||
注意:物化视图刷新已移至 APScheduler 定时任务(apps.engine.scheduler)
|
||||
"""
|
||||
|
||||
__all__ = []
|
||||
@@ -1,26 +0,0 @@
|
||||
"""
|
||||
通用模块 URL 配置
|
||||
|
||||
路由说明:
|
||||
- /api/health/ 健康检查接口(无需认证)
|
||||
- /api/auth/* 认证相关接口(登录、登出、用户信息)
|
||||
- /api/system/* 系统管理接口(日志查看等)
|
||||
"""
|
||||
|
||||
from django.urls import path
|
||||
from .views import LoginView, LogoutView, MeView, ChangePasswordView, SystemLogsView, SystemLogFilesView, HealthCheckView
|
||||
|
||||
urlpatterns = [
|
||||
# 健康检查(无需认证)
|
||||
path('health/', HealthCheckView.as_view(), name='health-check'),
|
||||
|
||||
# 认证相关
|
||||
path('auth/login/', LoginView.as_view(), name='auth-login'),
|
||||
path('auth/logout/', LogoutView.as_view(), name='auth-logout'),
|
||||
path('auth/me/', MeView.as_view(), name='auth-me'),
|
||||
path('auth/change-password/', ChangePasswordView.as_view(), name='auth-change-password'),
|
||||
|
||||
# 系统管理
|
||||
path('system/logs/', SystemLogsView.as_view(), name='system-logs'),
|
||||
path('system/logs/files/', SystemLogFilesView.as_view(), name='system-log-files'),
|
||||
]
|
||||
@@ -1,116 +0,0 @@
|
||||
"""CSV 导出工具模块
|
||||
|
||||
提供流式 CSV 生成功能,支持:
|
||||
- UTF-8 BOM(Excel 兼容)
|
||||
- RFC 4180 规范转义
|
||||
- 流式生成(内存友好)
|
||||
"""
|
||||
|
||||
import csv
|
||||
import io
|
||||
from datetime import datetime
|
||||
from typing import Iterator, Dict, Any, List, Callable, Optional
|
||||
|
||||
# UTF-8 BOM,确保 Excel 正确识别编码
|
||||
UTF8_BOM = '\ufeff'
|
||||
|
||||
|
||||
def generate_csv_rows(
|
||||
data_iterator: Iterator[Dict[str, Any]],
|
||||
headers: List[str],
|
||||
field_formatters: Optional[Dict[str, Callable]] = None
|
||||
) -> Iterator[str]:
|
||||
"""
|
||||
流式生成 CSV 行
|
||||
|
||||
Args:
|
||||
data_iterator: 数据迭代器,每个元素是一个字典
|
||||
headers: CSV 表头列表
|
||||
field_formatters: 字段格式化函数字典,key 为字段名,value 为格式化函数
|
||||
|
||||
Yields:
|
||||
CSV 行字符串(包含换行符)
|
||||
|
||||
Example:
|
||||
>>> data = [{'ip': '192.168.1.1', 'hosts': ['a.com', 'b.com']}]
|
||||
>>> headers = ['ip', 'hosts']
|
||||
>>> formatters = {'hosts': format_list_field}
|
||||
>>> for row in generate_csv_rows(iter(data), headers, formatters):
|
||||
... print(row, end='')
|
||||
"""
|
||||
# 输出 BOM + 表头
|
||||
output = io.StringIO()
|
||||
writer = csv.writer(output, quoting=csv.QUOTE_MINIMAL)
|
||||
writer.writerow(headers)
|
||||
yield UTF8_BOM + output.getvalue()
|
||||
|
||||
# 输出数据行
|
||||
for row_data in data_iterator:
|
||||
output = io.StringIO()
|
||||
writer = csv.writer(output, quoting=csv.QUOTE_MINIMAL)
|
||||
|
||||
row = []
|
||||
for header in headers:
|
||||
value = row_data.get(header, '')
|
||||
if field_formatters and header in field_formatters:
|
||||
value = field_formatters[header](value)
|
||||
row.append(value if value is not None else '')
|
||||
|
||||
writer.writerow(row)
|
||||
yield output.getvalue()
|
||||
|
||||
|
||||
def format_list_field(values: List, separator: str = ';') -> str:
|
||||
"""
|
||||
将列表字段格式化为分号分隔的字符串
|
||||
|
||||
Args:
|
||||
values: 值列表
|
||||
separator: 分隔符,默认为分号
|
||||
|
||||
Returns:
|
||||
分隔符连接的字符串
|
||||
|
||||
Example:
|
||||
>>> format_list_field(['a.com', 'b.com'])
|
||||
'a.com;b.com'
|
||||
>>> format_list_field([80, 443])
|
||||
'80;443'
|
||||
>>> format_list_field([])
|
||||
''
|
||||
>>> format_list_field(None)
|
||||
''
|
||||
"""
|
||||
if not values:
|
||||
return ''
|
||||
return separator.join(str(v) for v in values)
|
||||
|
||||
|
||||
def format_datetime(dt: Optional[datetime]) -> str:
|
||||
"""
|
||||
格式化日期时间为字符串(转换为本地时区)
|
||||
|
||||
Args:
|
||||
dt: datetime 对象或 None
|
||||
|
||||
Returns:
|
||||
格式化的日期时间字符串,格式为 YYYY-MM-DD HH:MM:SS(本地时区)
|
||||
|
||||
Example:
|
||||
>>> from datetime import datetime
|
||||
>>> format_datetime(datetime(2024, 1, 15, 10, 30, 0))
|
||||
'2024-01-15 10:30:00'
|
||||
>>> format_datetime(None)
|
||||
''
|
||||
"""
|
||||
if dt is None:
|
||||
return ''
|
||||
if isinstance(dt, str):
|
||||
return dt
|
||||
|
||||
# 转换为本地时区(从 Django settings 获取)
|
||||
from django.utils import timezone
|
||||
if timezone.is_aware(dt):
|
||||
dt = timezone.localtime(dt)
|
||||
|
||||
return dt.strftime('%Y-%m-%d %H:%M:%S')
|
||||
@@ -1,684 +0,0 @@
|
||||
"""
|
||||
目录扫描 Flow
|
||||
|
||||
负责编排目录扫描的完整流程
|
||||
|
||||
架构:
|
||||
- Flow 负责编排多个原子 Task
|
||||
- 支持并发执行扫描工具(使用 ThreadPoolTaskRunner)
|
||||
- 每个 Task 可独立重试
|
||||
- 配置由 YAML 解析
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
from prefect import flow
|
||||
from prefect.task_runners import ThreadPoolTaskRunner
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
from apps.scan.tasks.directory_scan import (
|
||||
export_sites_task,
|
||||
run_and_stream_save_directories_task
|
||||
)
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.utils import config_parser, build_scan_command, ensure_wordlist_local
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 默认最大并发数
|
||||
DEFAULT_MAX_WORKERS = 5
|
||||
|
||||
|
||||
def calculate_directory_scan_timeout(
|
||||
tool_config: dict,
|
||||
base_per_word: float = 1.0,
|
||||
min_timeout: int = 60,
|
||||
max_timeout: int = 7200
|
||||
) -> int:
|
||||
"""
|
||||
根据字典行数计算目录扫描超时时间
|
||||
|
||||
计算公式:超时时间 = 字典行数 × 每个单词基础时间
|
||||
超时范围:60秒 ~ 2小时(7200秒)
|
||||
|
||||
Args:
|
||||
tool_config: 工具配置字典,包含 wordlist 路径
|
||||
base_per_word: 每个单词的基础时间(秒),默认 1.0秒
|
||||
min_timeout: 最小超时时间(秒),默认 60秒
|
||||
max_timeout: 最大超时时间(秒),默认 7200秒(2小时)
|
||||
|
||||
Returns:
|
||||
int: 计算出的超时时间(秒),范围:60 ~ 7200
|
||||
|
||||
Example:
|
||||
# 1000行字典 × 1.0秒 = 1000秒 → 限制为7200秒中的 1000秒
|
||||
# 10000行字典 × 1.0秒 = 10000秒 → 限制为7200秒(最大值)
|
||||
timeout = calculate_directory_scan_timeout(
|
||||
tool_config={'wordlist': '/path/to/wordlist.txt'}
|
||||
)
|
||||
"""
|
||||
try:
|
||||
# 从 tool_config 中获取 wordlist 路径
|
||||
wordlist_path = tool_config.get('wordlist')
|
||||
if not wordlist_path:
|
||||
logger.warning("工具配置中未指定 wordlist,使用默认超时: %d秒", min_timeout)
|
||||
return min_timeout
|
||||
|
||||
# 展开用户目录(~)
|
||||
wordlist_path = os.path.expanduser(wordlist_path)
|
||||
|
||||
# 检查文件是否存在
|
||||
if not os.path.exists(wordlist_path):
|
||||
logger.warning("字典文件不存在: %s,使用默认超时: %d秒", wordlist_path, min_timeout)
|
||||
return min_timeout
|
||||
|
||||
# 使用 wc -l 快速统计字典行数
|
||||
result = subprocess.run(
|
||||
['wc', '-l', wordlist_path],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
# wc -l 输出格式:行数 + 空格 + 文件名
|
||||
line_count = int(result.stdout.strip().split()[0])
|
||||
|
||||
# 计算超时时间
|
||||
timeout = int(line_count * base_per_word)
|
||||
|
||||
# 设置合理的下限(不再设置上限)
|
||||
timeout = max(min_timeout, timeout)
|
||||
|
||||
logger.info(
|
||||
"目录扫描超时计算 - 字典: %s, 行数: %d, 基础时间: %.3f秒/词, 计算超时: %d秒",
|
||||
wordlist_path, line_count, base_per_word, timeout
|
||||
)
|
||||
|
||||
return timeout
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error("统计字典行数失败: %s", e)
|
||||
# 失败时返回默认超时
|
||||
return min_timeout
|
||||
except (ValueError, IndexError) as e:
|
||||
logger.error("解析字典行数失败: %s", e)
|
||||
return min_timeout
|
||||
except Exception as e:
|
||||
logger.error("计算超时时间异常: %s", e)
|
||||
return min_timeout
|
||||
|
||||
|
||||
def _get_max_workers(tool_config: dict, default: int = DEFAULT_MAX_WORKERS) -> int:
|
||||
"""
|
||||
从单个工具配置中获取 max_workers 参数
|
||||
|
||||
Args:
|
||||
tool_config: 单个工具的配置字典,如 {'max_workers': 10, 'threads': 5, ...}
|
||||
default: 默认值,默认为 5
|
||||
|
||||
Returns:
|
||||
int: max_workers 值
|
||||
"""
|
||||
if not isinstance(tool_config, dict):
|
||||
return default
|
||||
|
||||
# 支持 max_workers 和 max-workers(YAML 中划线会被转换)
|
||||
max_workers = tool_config.get('max_workers') or tool_config.get('max-workers')
|
||||
if max_workers is not None and isinstance(max_workers, int) and max_workers > 0:
|
||||
return max_workers
|
||||
return default
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _export_site_urls(target_id: int, target_name: str, directory_scan_dir: Path) -> tuple[str, int]:
|
||||
"""
|
||||
导出目标下的所有站点 URL 到文件(支持懒加载)
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
target_name: 目标名称(用于懒加载创建默认站点)
|
||||
directory_scan_dir: 目录扫描目录
|
||||
|
||||
Returns:
|
||||
tuple: (sites_file, site_count)
|
||||
|
||||
Raises:
|
||||
ValueError: 站点数量为 0
|
||||
"""
|
||||
logger.info("Step 1: 导出目标的所有站点 URL")
|
||||
|
||||
sites_file = str(directory_scan_dir / 'sites.txt')
|
||||
export_result = export_sites_task(
|
||||
target_id=target_id,
|
||||
output_file=sites_file,
|
||||
batch_size=1000 # 每次读取 1000 条,优化内存占用
|
||||
)
|
||||
|
||||
site_count = export_result['total_count']
|
||||
|
||||
logger.info(
|
||||
"✓ 站点 URL 导出完成 - 文件: %s, 数量: %d",
|
||||
export_result['output_file'],
|
||||
site_count
|
||||
)
|
||||
|
||||
if site_count == 0:
|
||||
logger.warning("目标下没有站点,无法执行目录扫描")
|
||||
# 不抛出异常,由上层决定如何处理
|
||||
# raise ValueError("目标下没有站点,无法执行目录扫描")
|
||||
|
||||
return export_result['output_file'], site_count
|
||||
|
||||
|
||||
def _run_scans_sequentially(
|
||||
enabled_tools: dict,
|
||||
sites_file: str,
|
||||
directory_scan_dir: Path,
|
||||
scan_id: int,
|
||||
target_id: int,
|
||||
site_count: int,
|
||||
target_name: str
|
||||
) -> tuple[int, int, list]:
|
||||
"""
|
||||
串行执行目录扫描任务(支持多工具)- 已废弃,保留用于兼容
|
||||
|
||||
Args:
|
||||
enabled_tools: 启用的工具配置字典
|
||||
sites_file: 站点文件路径
|
||||
directory_scan_dir: 目录扫描目录
|
||||
scan_id: 扫描任务 ID
|
||||
target_id: 目标 ID
|
||||
site_count: 站点数量
|
||||
target_name: 目标名称(用于错误日志)
|
||||
|
||||
Returns:
|
||||
tuple: (total_directories, processed_sites, failed_sites)
|
||||
"""
|
||||
# 读取站点列表
|
||||
sites = []
|
||||
with open(sites_file, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
site_url = line.strip()
|
||||
if site_url:
|
||||
sites.append(site_url)
|
||||
|
||||
logger.info("准备扫描 %d 个站点,使用工具: %s", len(sites), ', '.join(enabled_tools.keys()))
|
||||
|
||||
total_directories = 0
|
||||
processed_sites_set = set() # 使用 set 避免重复计数
|
||||
failed_sites = []
|
||||
|
||||
# 遍历每个工具
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
logger.info("="*60)
|
||||
logger.info("使用工具: %s", tool_name)
|
||||
logger.info("="*60)
|
||||
|
||||
# 如果配置了 wordlist_name,则先确保本地存在对应的字典文件(含 hash 校验)
|
||||
wordlist_name = tool_config.get('wordlist_name')
|
||||
if wordlist_name:
|
||||
try:
|
||||
local_wordlist_path = ensure_wordlist_local(wordlist_name)
|
||||
tool_config['wordlist'] = local_wordlist_path
|
||||
except Exception as exc:
|
||||
logger.error("为工具 %s 准备字典失败: %s", tool_name, exc)
|
||||
# 当前工具无法执行,将所有站点视为失败,继续下一个工具
|
||||
failed_sites.extend(sites)
|
||||
continue
|
||||
|
||||
# 逐个站点执行扫描
|
||||
for idx, site_url in enumerate(sites, 1):
|
||||
logger.info(
|
||||
"[%d/%d] 开始扫描站点: %s (工具: %s)",
|
||||
idx, len(sites), site_url, tool_name
|
||||
)
|
||||
|
||||
# 使用统一的命令构建器
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='directory_scan',
|
||||
command_params={
|
||||
'url': site_url
|
||||
},
|
||||
tool_config=tool_config
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"✗ [%d/%d] 构建 %s 命令失败: %s - 站点: %s",
|
||||
idx, len(sites), tool_name, e, site_url
|
||||
)
|
||||
failed_sites.append(site_url)
|
||||
continue
|
||||
|
||||
# 单个站点超时:从配置中获取(支持 'auto' 动态计算)
|
||||
# ffuf 逐个站点扫描,timeout 就是单个站点的超时时间
|
||||
site_timeout = tool_config.get('timeout', 300)
|
||||
if site_timeout == 'auto':
|
||||
# 动态计算超时时间(基于字典行数)
|
||||
site_timeout = calculate_directory_scan_timeout(tool_config)
|
||||
logger.info(f"✓ 工具 {tool_name} 动态计算 timeout: {site_timeout}秒")
|
||||
|
||||
# 生成日志文件路径
|
||||
from datetime import datetime
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
log_file = directory_scan_dir / f"{tool_name}_{timestamp}_{idx}.log"
|
||||
|
||||
try:
|
||||
# 直接调用 task(串行执行)
|
||||
result = run_and_stream_save_directories_task(
|
||||
cmd=command,
|
||||
tool_name=tool_name, # 新增:工具名称
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
site_url=site_url,
|
||||
cwd=str(directory_scan_dir),
|
||||
shell=True,
|
||||
batch_size=1000,
|
||||
timeout=site_timeout,
|
||||
log_file=str(log_file) # 新增:日志文件路径
|
||||
)
|
||||
|
||||
total_directories += result.get('created_directories', 0)
|
||||
processed_sites_set.add(site_url) # 使用 set 记录成功的站点
|
||||
|
||||
logger.info(
|
||||
"✓ [%d/%d] 站点扫描完成: %s - 发现 %d 个目录",
|
||||
idx, len(sites), site_url,
|
||||
result.get('created_directories', 0)
|
||||
)
|
||||
|
||||
except subprocess.TimeoutExpired as exc:
|
||||
# 超时异常单独处理
|
||||
failed_sites.append(site_url)
|
||||
logger.warning(
|
||||
"⚠️ [%d/%d] 站点扫描超时: %s - 超时配置: %d秒\n"
|
||||
"注意:超时前已解析的目录数据已保存到数据库,但扫描未完全完成。",
|
||||
idx, len(sites), site_url, site_timeout
|
||||
)
|
||||
except Exception as exc:
|
||||
# 其他异常
|
||||
failed_sites.append(site_url)
|
||||
logger.error(
|
||||
"✗ [%d/%d] 站点扫描失败: %s - 错误: %s",
|
||||
idx, len(sites), site_url, exc
|
||||
)
|
||||
|
||||
# 每 10 个站点输出进度
|
||||
if idx % 10 == 0:
|
||||
logger.info(
|
||||
"进度: %d/%d (%.1f%%) - 已发现 %d 个目录",
|
||||
idx, len(sites), idx/len(sites)*100, total_directories
|
||||
)
|
||||
|
||||
# 计算成功和失败的站点数
|
||||
processed_count = len(processed_sites_set)
|
||||
|
||||
if failed_sites:
|
||||
logger.warning(
|
||||
"部分站点扫描失败: %d/%d",
|
||||
len(failed_sites), len(sites)
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"✓ 串行目录扫描执行完成 - 成功: %d/%d, 失败: %d, 总目录数: %d",
|
||||
processed_count, len(sites), len(failed_sites), total_directories
|
||||
)
|
||||
|
||||
return total_directories, processed_count, failed_sites
|
||||
|
||||
|
||||
def _generate_log_filename(tool_name: str, site_url: str, directory_scan_dir: Path) -> Path:
|
||||
"""
|
||||
生成唯一的日志文件名
|
||||
|
||||
使用 URL 的 hash 确保并发时不会冲突
|
||||
|
||||
Args:
|
||||
tool_name: 工具名称
|
||||
site_url: 站点 URL
|
||||
directory_scan_dir: 目录扫描目录
|
||||
|
||||
Returns:
|
||||
Path: 日志文件路径
|
||||
"""
|
||||
url_hash = hashlib.md5(site_url.encode()).hexdigest()[:8]
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f')
|
||||
return directory_scan_dir / f"{tool_name}_{url_hash}_{timestamp}.log"
|
||||
|
||||
|
||||
def _run_scans_concurrently(
|
||||
enabled_tools: dict,
|
||||
sites_file: str,
|
||||
directory_scan_dir: Path,
|
||||
scan_id: int,
|
||||
target_id: int,
|
||||
site_count: int,
|
||||
target_name: str
|
||||
) -> Tuple[int, int, List[str]]:
|
||||
"""
|
||||
并发执行目录扫描任务(使用 ThreadPoolTaskRunner)
|
||||
|
||||
Args:
|
||||
enabled_tools: 启用的工具配置字典
|
||||
sites_file: 站点文件路径
|
||||
directory_scan_dir: 目录扫描目录
|
||||
scan_id: 扫描任务 ID
|
||||
target_id: 目标 ID
|
||||
site_count: 站点数量
|
||||
target_name: 目标名称(用于错误日志)
|
||||
|
||||
Returns:
|
||||
tuple: (total_directories, processed_sites, failed_sites)
|
||||
"""
|
||||
# 读取站点列表
|
||||
sites: List[str] = []
|
||||
with open(sites_file, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
site_url = line.strip()
|
||||
if site_url:
|
||||
sites.append(site_url)
|
||||
|
||||
if not sites:
|
||||
logger.warning("站点列表为空")
|
||||
return 0, 0, []
|
||||
|
||||
logger.info(
|
||||
"准备并发扫描 %d 个站点,使用工具: %s",
|
||||
len(sites), ', '.join(enabled_tools.keys())
|
||||
)
|
||||
|
||||
total_directories = 0
|
||||
processed_sites_count = 0
|
||||
failed_sites: List[str] = []
|
||||
|
||||
# 遍历每个工具
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# 每个工具独立获取 max_workers 配置
|
||||
max_workers = _get_max_workers(tool_config)
|
||||
|
||||
logger.info("="*60)
|
||||
logger.info("使用工具: %s (并发模式, max_workers=%d)", tool_name, max_workers)
|
||||
logger.info("="*60)
|
||||
|
||||
# 如果配置了 wordlist_name,则先确保本地存在对应的字典文件(含 hash 校验)
|
||||
wordlist_name = tool_config.get('wordlist_name')
|
||||
if wordlist_name:
|
||||
try:
|
||||
local_wordlist_path = ensure_wordlist_local(wordlist_name)
|
||||
tool_config['wordlist'] = local_wordlist_path
|
||||
except Exception as exc:
|
||||
logger.error("为工具 %s 准备字典失败: %s", tool_name, exc)
|
||||
# 当前工具无法执行,将所有站点视为失败,继续下一个工具
|
||||
failed_sites.extend(sites)
|
||||
continue
|
||||
|
||||
# 计算超时时间(所有站点共用)
|
||||
site_timeout = tool_config.get('timeout', 300)
|
||||
if site_timeout == 'auto':
|
||||
site_timeout = calculate_directory_scan_timeout(tool_config)
|
||||
logger.info(f"✓ 工具 {tool_name} 动态计算 timeout: {site_timeout}秒")
|
||||
|
||||
# 准备所有站点的扫描参数
|
||||
scan_params_list = []
|
||||
for idx, site_url in enumerate(sites, 1):
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='directory_scan',
|
||||
command_params={'url': site_url},
|
||||
tool_config=tool_config
|
||||
)
|
||||
log_file = _generate_log_filename(tool_name, site_url, directory_scan_dir)
|
||||
scan_params_list.append({
|
||||
'idx': idx,
|
||||
'site_url': site_url,
|
||||
'command': command,
|
||||
'log_file': str(log_file),
|
||||
'timeout': site_timeout
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"✗ [%d/%d] 构建 %s 命令失败: %s - 站点: %s",
|
||||
idx, len(sites), tool_name, e, site_url
|
||||
)
|
||||
failed_sites.append(site_url)
|
||||
|
||||
if not scan_params_list:
|
||||
logger.warning("没有有效的扫描任务")
|
||||
continue
|
||||
|
||||
# ============================================================
|
||||
# 分批执行策略:控制实际并发的 ffuf 进程数
|
||||
# ============================================================
|
||||
total_tasks = len(scan_params_list)
|
||||
logger.info("开始分批执行 %d 个扫描任务(每批 %d 个)...", total_tasks, max_workers)
|
||||
|
||||
batch_num = 0
|
||||
for batch_start in range(0, total_tasks, max_workers):
|
||||
batch_end = min(batch_start + max_workers, total_tasks)
|
||||
batch_params = scan_params_list[batch_start:batch_end]
|
||||
batch_num += 1
|
||||
|
||||
logger.info("执行第 %d 批任务(%d-%d/%d)...", batch_num, batch_start + 1, batch_end, total_tasks)
|
||||
|
||||
# 提交当前批次的任务(非阻塞,立即返回 future)
|
||||
futures = []
|
||||
for params in batch_params:
|
||||
future = run_and_stream_save_directories_task.submit(
|
||||
cmd=params['command'],
|
||||
tool_name=tool_name,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
site_url=params['site_url'],
|
||||
cwd=str(directory_scan_dir),
|
||||
shell=True,
|
||||
batch_size=1000,
|
||||
timeout=params['timeout'],
|
||||
log_file=params['log_file']
|
||||
)
|
||||
futures.append((params['idx'], params['site_url'], future))
|
||||
|
||||
# 等待当前批次所有任务完成(阻塞,确保本批完成后再启动下一批)
|
||||
for idx, site_url, future in futures:
|
||||
try:
|
||||
result = future.result() # 阻塞等待单个任务完成
|
||||
directories_found = result.get('created_directories', 0)
|
||||
total_directories += directories_found
|
||||
processed_sites_count += 1
|
||||
|
||||
logger.info(
|
||||
"✓ [%d/%d] 站点扫描完成: %s - 发现 %d 个目录",
|
||||
idx, len(sites), site_url, directories_found
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
failed_sites.append(site_url)
|
||||
if 'timeout' in str(exc).lower() or isinstance(exc, subprocess.TimeoutExpired):
|
||||
logger.warning(
|
||||
"⚠️ [%d/%d] 站点扫描超时: %s - 错误: %s",
|
||||
idx, len(sites), site_url, exc
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
"✗ [%d/%d] 站点扫描失败: %s - 错误: %s",
|
||||
idx, len(sites), site_url, exc
|
||||
)
|
||||
|
||||
# 输出汇总信息
|
||||
if failed_sites:
|
||||
logger.warning(
|
||||
"部分站点扫描失败: %d/%d",
|
||||
len(failed_sites), len(sites)
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"✓ 并发目录扫描执行完成 - 成功: %d/%d, 失败: %d, 总目录数: %d",
|
||||
processed_sites_count, len(sites), len(failed_sites), total_directories
|
||||
)
|
||||
|
||||
return total_directories, processed_sites_count, failed_sites
|
||||
|
||||
|
||||
@flow(
|
||||
name="directory_scan",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def directory_scan_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: dict
|
||||
) -> dict:
|
||||
"""
|
||||
目录扫描 Flow
|
||||
|
||||
主要功能:
|
||||
1. 从 target 获取所有站点的 URL
|
||||
2. 对每个站点 URL 执行目录扫描(支持 ffuf 等工具)
|
||||
3. 流式保存扫描结果到数据库 Directory 表
|
||||
|
||||
工作流程:
|
||||
Step 0: 创建工作目录
|
||||
Step 1: 导出站点 URL 列表到文件(供扫描工具使用)
|
||||
Step 2: 验证工具配置
|
||||
Step 3: 并发执行扫描工具并实时保存结果(使用 ThreadPoolTaskRunner)
|
||||
|
||||
ffuf 输出字段:
|
||||
- url: 发现的目录/文件 URL
|
||||
- length: 响应内容长度
|
||||
- status: HTTP 状态码
|
||||
- words: 响应内容单词数
|
||||
- lines: 响应内容行数
|
||||
- content_type: 内容类型
|
||||
- duration: 请求耗时
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
enabled_tools: 启用的工具配置字典
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'scan_id': int,
|
||||
'target': str,
|
||||
'scan_workspace_dir': str,
|
||||
'sites_file': str,
|
||||
'site_count': int,
|
||||
'total_directories': int, # 发现的总目录数
|
||||
'processed_sites': int, # 成功处理的站点数
|
||||
'failed_sites_count': int, # 失败的站点数
|
||||
'executed_tasks': list
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: 参数错误
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始目录扫描\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Target: {target_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
# 参数验证
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
if not enabled_tools:
|
||||
raise ValueError("enabled_tools 不能为空")
|
||||
|
||||
# Step 0: 创建工作目录
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
directory_scan_dir = setup_scan_directory(scan_workspace_dir, 'directory_scan')
|
||||
|
||||
# Step 1: 导出站点 URL(支持懒加载)
|
||||
sites_file, site_count = _export_site_urls(target_id, target_name, directory_scan_dir)
|
||||
|
||||
if site_count == 0:
|
||||
logger.warning("目标下没有站点,跳过目录扫描")
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'sites_file': sites_file,
|
||||
'site_count': 0,
|
||||
'total_directories': 0,
|
||||
'processed_sites': 0,
|
||||
'failed_sites_count': 0,
|
||||
'executed_tasks': ['export_sites']
|
||||
}
|
||||
|
||||
# Step 2: 工具配置信息
|
||||
logger.info("Step 2: 工具配置信息")
|
||||
tool_info = []
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
mw = _get_max_workers(tool_config)
|
||||
tool_info.append(f"{tool_name}(max_workers={mw})")
|
||||
logger.info("✓ 启用工具: %s", ', '.join(tool_info))
|
||||
|
||||
# Step 3: 并发执行扫描工具并实时保存结果
|
||||
logger.info("Step 3: 并发执行扫描工具并实时保存结果")
|
||||
total_directories, processed_sites, failed_sites = _run_scans_concurrently(
|
||||
enabled_tools=enabled_tools,
|
||||
sites_file=sites_file,
|
||||
directory_scan_dir=directory_scan_dir,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
site_count=site_count,
|
||||
target_name=target_name
|
||||
)
|
||||
|
||||
# 检查是否所有站点都失败
|
||||
if processed_sites == 0 and site_count > 0:
|
||||
logger.warning("所有站点扫描均失败 - 总站点数: %d, 失败数: %d", site_count, len(failed_sites))
|
||||
# 不抛出异常,让扫描继续
|
||||
|
||||
logger.info("="*60 + "\n✓ 目录扫描完成\n" + "="*60)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'sites_file': sites_file,
|
||||
'site_count': site_count,
|
||||
'total_directories': total_directories,
|
||||
'processed_sites': processed_sites,
|
||||
'failed_sites_count': len(failed_sites),
|
||||
'executed_tasks': ['export_sites', 'run_and_stream_save_directories']
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("目录扫描失败: %s", e)
|
||||
raise
|
||||
@@ -1,380 +0,0 @@
|
||||
"""
|
||||
指纹识别 Flow
|
||||
|
||||
负责编排指纹识别的完整流程
|
||||
|
||||
架构:
|
||||
- Flow 负责编排多个原子 Task
|
||||
- 在 site_scan 后串行执行
|
||||
- 使用 xingfinger 工具识别技术栈
|
||||
- 流式处理输出,批量更新数据库
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
from prefect import flow
|
||||
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.tasks.fingerprint_detect import (
|
||||
export_urls_for_fingerprint_task,
|
||||
run_xingfinger_and_stream_update_tech_task,
|
||||
)
|
||||
from apps.scan.utils import build_scan_command
|
||||
from apps.scan.utils.fingerprint_helpers import get_fingerprint_paths
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def calculate_fingerprint_detect_timeout(
|
||||
url_count: int,
|
||||
base_per_url: float = 10.0,
|
||||
min_timeout: int = 300
|
||||
) -> int:
|
||||
"""
|
||||
根据 URL 数量计算超时时间
|
||||
|
||||
公式:超时时间 = URL 数量 × 每 URL 基础时间
|
||||
最小值:300秒
|
||||
无上限
|
||||
|
||||
Args:
|
||||
url_count: URL 数量
|
||||
base_per_url: 每 URL 基础时间(秒),默认 10秒
|
||||
min_timeout: 最小超时时间(秒),默认 300秒
|
||||
|
||||
Returns:
|
||||
int: 计算出的超时时间(秒)
|
||||
|
||||
"""
|
||||
timeout = int(url_count * base_per_url)
|
||||
return max(min_timeout, timeout)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _export_urls(
|
||||
target_id: int,
|
||||
fingerprint_dir: Path,
|
||||
source: str = 'website'
|
||||
) -> tuple[str, int]:
|
||||
"""
|
||||
导出 URL 到文件
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
fingerprint_dir: 指纹识别目录
|
||||
source: 数据源类型
|
||||
|
||||
Returns:
|
||||
tuple: (urls_file, total_count)
|
||||
"""
|
||||
logger.info("Step 1: 导出 URL 列表 (source=%s)", source)
|
||||
|
||||
urls_file = str(fingerprint_dir / 'urls.txt')
|
||||
export_result = export_urls_for_fingerprint_task(
|
||||
target_id=target_id,
|
||||
output_file=urls_file,
|
||||
source=source,
|
||||
batch_size=1000
|
||||
)
|
||||
|
||||
total_count = export_result['total_count']
|
||||
|
||||
logger.info(
|
||||
"✓ URL 导出完成 - 文件: %s, 数量: %d",
|
||||
export_result['output_file'],
|
||||
total_count
|
||||
)
|
||||
|
||||
return export_result['output_file'], total_count
|
||||
|
||||
|
||||
def _run_fingerprint_detect(
|
||||
enabled_tools: dict,
|
||||
urls_file: str,
|
||||
url_count: int,
|
||||
fingerprint_dir: Path,
|
||||
scan_id: int,
|
||||
target_id: int,
|
||||
source: str
|
||||
) -> tuple[dict, list]:
|
||||
"""
|
||||
执行指纹识别任务
|
||||
|
||||
Args:
|
||||
enabled_tools: 已启用的工具配置字典
|
||||
urls_file: URL 文件路径
|
||||
url_count: URL 总数
|
||||
fingerprint_dir: 指纹识别目录
|
||||
scan_id: 扫描任务 ID
|
||||
target_id: 目标 ID
|
||||
source: 数据源类型
|
||||
|
||||
Returns:
|
||||
tuple: (tool_stats, failed_tools)
|
||||
"""
|
||||
tool_stats = {}
|
||||
failed_tools = []
|
||||
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# 1. 获取指纹库路径
|
||||
lib_names = tool_config.get('fingerprint_libs', ['ehole'])
|
||||
fingerprint_paths = get_fingerprint_paths(lib_names)
|
||||
|
||||
if not fingerprint_paths:
|
||||
reason = f"没有可用的指纹库: {lib_names}"
|
||||
logger.warning(reason)
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
continue
|
||||
|
||||
# 2. 将指纹库路径合并到 tool_config(用于命令构建)
|
||||
tool_config_with_paths = {**tool_config, **fingerprint_paths}
|
||||
|
||||
# 3. 构建命令
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='fingerprint_detect',
|
||||
command_params={
|
||||
'urls_file': urls_file
|
||||
},
|
||||
tool_config=tool_config_with_paths
|
||||
)
|
||||
except Exception as e:
|
||||
reason = f"命令构建失败: {str(e)}"
|
||||
logger.error("构建 %s 命令失败: %s", tool_name, e)
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
continue
|
||||
|
||||
# 4. 计算超时时间
|
||||
timeout = calculate_fingerprint_detect_timeout(url_count)
|
||||
|
||||
# 5. 生成日志文件路径
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
log_file = fingerprint_dir / f"{tool_name}_{timestamp}.log"
|
||||
|
||||
logger.info(
|
||||
"开始执行 %s 指纹识别 - URL数: %d, 超时: %ds, 指纹库: %s",
|
||||
tool_name, url_count, timeout, list(fingerprint_paths.keys())
|
||||
)
|
||||
|
||||
# 6. 执行扫描任务
|
||||
try:
|
||||
result = run_xingfinger_and_stream_update_tech_task(
|
||||
cmd=command,
|
||||
tool_name=tool_name,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
source=source,
|
||||
cwd=str(fingerprint_dir),
|
||||
timeout=timeout,
|
||||
log_file=str(log_file),
|
||||
batch_size=100
|
||||
)
|
||||
|
||||
tool_stats[tool_name] = {
|
||||
'command': command,
|
||||
'result': result,
|
||||
'timeout': timeout,
|
||||
'fingerprint_libs': list(fingerprint_paths.keys())
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"✓ 工具 %s 执行完成 - 处理记录: %d, 更新: %d, 未找到: %d",
|
||||
tool_name,
|
||||
result.get('processed_records', 0),
|
||||
result.get('updated_count', 0),
|
||||
result.get('not_found_count', 0)
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
failed_tools.append({'tool': tool_name, 'reason': str(exc)})
|
||||
logger.error("工具 %s 执行失败: %s", tool_name, exc, exc_info=True)
|
||||
|
||||
if failed_tools:
|
||||
logger.warning(
|
||||
"以下指纹识别工具执行失败: %s",
|
||||
', '.join([f['tool'] for f in failed_tools])
|
||||
)
|
||||
|
||||
return tool_stats, failed_tools
|
||||
|
||||
|
||||
@flow(
|
||||
name="fingerprint_detect",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def fingerprint_detect_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: dict
|
||||
) -> dict:
|
||||
"""
|
||||
指纹识别 Flow
|
||||
|
||||
主要功能:
|
||||
1. 从数据库导出目标下所有 WebSite URL 到文件
|
||||
2. 使用 xingfinger 进行技术栈识别
|
||||
3. 解析结果并更新 WebSite.tech 字段(合并去重)
|
||||
|
||||
工作流程:
|
||||
Step 0: 创建工作目录
|
||||
Step 1: 导出 URL 列表
|
||||
Step 2: 解析配置,获取启用的工具
|
||||
Step 3: 执行 xingfinger 并解析结果
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
enabled_tools: 启用的工具配置(xingfinger)
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'scan_id': int,
|
||||
'target': str,
|
||||
'scan_workspace_dir': str,
|
||||
'urls_file': str,
|
||||
'url_count': int,
|
||||
'processed_records': int,
|
||||
'updated_count': int,
|
||||
'created_count': int,
|
||||
'snapshot_count': int,
|
||||
'executed_tasks': list,
|
||||
'tool_stats': dict
|
||||
}
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始指纹识别\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Target: {target_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
# 参数验证
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
|
||||
# 数据源类型(当前只支持 website)
|
||||
source = 'website'
|
||||
|
||||
# Step 0: 创建工作目录
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
fingerprint_dir = setup_scan_directory(scan_workspace_dir, 'fingerprint_detect')
|
||||
|
||||
# Step 1: 导出 URL(支持懒加载)
|
||||
urls_file, url_count = _export_urls(target_id, fingerprint_dir, source)
|
||||
|
||||
if url_count == 0:
|
||||
logger.warning("目标下没有可用的 URL,跳过指纹识别")
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'urls_file': urls_file,
|
||||
'url_count': 0,
|
||||
'processed_records': 0,
|
||||
'updated_count': 0,
|
||||
'created_count': 0,
|
||||
'snapshot_count': 0,
|
||||
'executed_tasks': ['export_urls_for_fingerprint'],
|
||||
'tool_stats': {
|
||||
'total': 0,
|
||||
'successful': 0,
|
||||
'failed': 0,
|
||||
'successful_tools': [],
|
||||
'failed_tools': [],
|
||||
'details': {}
|
||||
}
|
||||
}
|
||||
|
||||
# Step 2: 工具配置信息
|
||||
logger.info("Step 2: 工具配置信息")
|
||||
logger.info("✓ 启用工具: %s", ', '.join(enabled_tools.keys()))
|
||||
|
||||
# Step 3: 执行指纹识别
|
||||
logger.info("Step 3: 执行指纹识别")
|
||||
tool_stats, failed_tools = _run_fingerprint_detect(
|
||||
enabled_tools=enabled_tools,
|
||||
urls_file=urls_file,
|
||||
url_count=url_count,
|
||||
fingerprint_dir=fingerprint_dir,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
source=source
|
||||
)
|
||||
|
||||
logger.info("="*60 + "\n✓ 指纹识别完成\n" + "="*60)
|
||||
|
||||
# 动态生成已执行的任务列表
|
||||
executed_tasks = ['export_urls_for_fingerprint']
|
||||
executed_tasks.extend([f'run_xingfinger ({tool})' for tool in tool_stats.keys()])
|
||||
|
||||
# 汇总所有工具的结果
|
||||
total_processed = sum(stats['result'].get('processed_records', 0) for stats in tool_stats.values())
|
||||
total_updated = sum(stats['result'].get('updated_count', 0) for stats in tool_stats.values())
|
||||
total_created = sum(stats['result'].get('created_count', 0) for stats in tool_stats.values())
|
||||
total_snapshots = sum(stats['result'].get('snapshot_count', 0) for stats in tool_stats.values())
|
||||
|
||||
successful_tools = [name for name in enabled_tools.keys()
|
||||
if name not in [f['tool'] for f in failed_tools]]
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'urls_file': urls_file,
|
||||
'url_count': url_count,
|
||||
'processed_records': total_processed,
|
||||
'updated_count': total_updated,
|
||||
'created_count': total_created,
|
||||
'snapshot_count': total_snapshots,
|
||||
'executed_tasks': executed_tasks,
|
||||
'tool_stats': {
|
||||
'total': len(enabled_tools),
|
||||
'successful': len(successful_tools),
|
||||
'failed': len(failed_tools),
|
||||
'successful_tools': successful_tools,
|
||||
'failed_tools': failed_tools,
|
||||
'details': tool_stats
|
||||
}
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("配置错误: %s", e)
|
||||
raise
|
||||
except RuntimeError as e:
|
||||
logger.error("运行时错误: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("指纹识别失败: %s", e)
|
||||
raise
|
||||
@@ -1,282 +0,0 @@
|
||||
"""
|
||||
扫描初始化 Flow
|
||||
|
||||
负责编排扫描任务的初始化流程
|
||||
|
||||
职责:
|
||||
- 使用 FlowOrchestrator 解析 YAML 配置
|
||||
- 在 Prefect Flow 中执行子 Flow(Subflow)
|
||||
- 按照 YAML 顺序编排工作流
|
||||
- 不包含具体业务逻辑(由 Tasks 和 FlowOrchestrator 实现)
|
||||
|
||||
架构:
|
||||
- Flow: Prefect 编排层(本文件)
|
||||
- FlowOrchestrator: 配置解析和执行计划(apps/scan/services/)
|
||||
- Tasks: 执行层(apps/scan/tasks/)
|
||||
- Handlers: 状态管理(apps/scan/handlers/)
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
# 注意:动态扫描容器应使用 run_initiate_scan.py 启动,以便在导入前设置环境变量
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
from prefect import flow, task
|
||||
from pathlib import Path
|
||||
import logging
|
||||
|
||||
from apps.scan.handlers import (
|
||||
on_initiate_scan_flow_running,
|
||||
on_initiate_scan_flow_completed,
|
||||
on_initiate_scan_flow_failed,
|
||||
)
|
||||
from prefect.futures import wait
|
||||
from apps.scan.utils import setup_scan_workspace
|
||||
from apps.scan.orchestrators import FlowOrchestrator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="run_subflow")
|
||||
def _run_subflow_task(scan_type: str, flow_func, flow_kwargs: dict):
|
||||
"""包装子 Flow 的 Task,用于在并行阶段并发执行子 Flow。"""
|
||||
logger.info("开始执行子 Flow: %s", scan_type)
|
||||
return flow_func(**flow_kwargs)
|
||||
|
||||
|
||||
@flow(
|
||||
name='initiate_scan',
|
||||
description='扫描任务初始化流程',
|
||||
log_prints=True,
|
||||
on_running=[on_initiate_scan_flow_running],
|
||||
on_completion=[on_initiate_scan_flow_completed],
|
||||
on_failure=[on_initiate_scan_flow_failed],
|
||||
)
|
||||
def initiate_scan_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
engine_name: str,
|
||||
scheduled_scan_name: str | None = None,
|
||||
) -> dict:
|
||||
"""
|
||||
初始化扫描任务(动态工作流编排)
|
||||
|
||||
根据 YAML 配置动态编排工作流:
|
||||
- 从数据库获取 engine_config (YAML)
|
||||
- 检测启用的扫描类型
|
||||
- 按照定义的阶段执行:
|
||||
Stage 1: Discovery (顺序执行)
|
||||
- subdomain_discovery
|
||||
- port_scan
|
||||
- site_scan
|
||||
Stage 2: Analysis (并行执行)
|
||||
- url_fetch
|
||||
- directory_scan
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: Scan 工作空间目录路径
|
||||
engine_name: 引擎名称(用于显示)
|
||||
scheduled_scan_name: 定时扫描任务名称(可选,用于通知显示)
|
||||
|
||||
Returns:
|
||||
dict: 执行结果摘要
|
||||
|
||||
Raises:
|
||||
ValueError: 参数验证失败或配置无效
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
try:
|
||||
# ==================== 参数验证 ====================
|
||||
if not scan_id:
|
||||
raise ValueError("scan_id is required")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir is required")
|
||||
if not engine_name:
|
||||
raise ValueError("engine_name is required")
|
||||
|
||||
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始初始化扫描任务\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Target: {target_name}\n" +
|
||||
f" Engine: {engine_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
# ==================== Task 1: 创建 Scan 工作空间 ====================
|
||||
scan_workspace_path = setup_scan_workspace(scan_workspace_dir)
|
||||
|
||||
# ==================== Task 2: 获取引擎配置 ====================
|
||||
from apps.scan.models import Scan
|
||||
scan = Scan.objects.get(id=scan_id)
|
||||
engine_config = scan.merged_configuration
|
||||
|
||||
# 使用 engine_names 进行显示
|
||||
display_engine_name = ', '.join(scan.engine_names) if scan.engine_names else engine_name
|
||||
|
||||
# ==================== Task 3: 解析配置,生成执行计划 ====================
|
||||
orchestrator = FlowOrchestrator(engine_config)
|
||||
|
||||
# FlowOrchestrator 已经解析了所有工具配置
|
||||
enabled_tools_by_type = orchestrator.enabled_tools_by_type
|
||||
|
||||
logger.info(
|
||||
f"执行计划生成成功:\n"
|
||||
f" 扫描类型: {' → '.join(orchestrator.scan_types)}\n"
|
||||
f" 总共 {len(orchestrator.scan_types)} 个 Flow"
|
||||
)
|
||||
|
||||
# ==================== 初始化阶段进度 ====================
|
||||
# 在解析完配置后立即初始化,此时已有完整的 scan_types 列表
|
||||
from apps.scan.services import ScanService
|
||||
scan_service = ScanService()
|
||||
scan_service.init_stage_progress(scan_id, orchestrator.scan_types)
|
||||
logger.info(f"✓ 初始化阶段进度 - Stages: {orchestrator.scan_types}")
|
||||
|
||||
# ==================== 更新 Target 最后扫描时间 ====================
|
||||
# 在开始扫描时更新,表示"最后一次扫描开始时间"
|
||||
from apps.targets.services import TargetService
|
||||
target_service = TargetService()
|
||||
target_service.update_last_scanned_at(target_id)
|
||||
logger.info(f"✓ 更新 Target 最后扫描时间 - Target ID: {target_id}")
|
||||
|
||||
# ==================== Task 3: 执行 Flow(动态阶段执行)====================
|
||||
# 注意:各阶段状态更新由 scan_flow_handlers.py 自动处理(running/completed/failed)
|
||||
executed_flows = []
|
||||
results = {}
|
||||
|
||||
# 通用执行参数
|
||||
flow_kwargs = {
|
||||
'scan_id': scan_id,
|
||||
'target_name': target_name,
|
||||
'target_id': target_id,
|
||||
'scan_workspace_dir': str(scan_workspace_path)
|
||||
}
|
||||
|
||||
def record_flow_result(scan_type, result=None, error=None):
|
||||
"""
|
||||
统一的结果记录函数
|
||||
|
||||
Args:
|
||||
scan_type: 扫描类型名称
|
||||
result: 执行结果(成功时)
|
||||
error: 异常对象(失败时)
|
||||
"""
|
||||
if error:
|
||||
# 失败处理:记录错误但不抛出异常,让扫描继续执行后续阶段
|
||||
error_msg = f"{scan_type} 执行失败: {str(error)}"
|
||||
logger.warning(error_msg)
|
||||
executed_flows.append(f"{scan_type} (失败)")
|
||||
results[scan_type] = {'success': False, 'error': str(error)}
|
||||
# 不再抛出异常,让扫描继续
|
||||
else:
|
||||
# 成功处理
|
||||
executed_flows.append(scan_type)
|
||||
results[scan_type] = result
|
||||
logger.info(f"✓ {scan_type} 执行成功")
|
||||
|
||||
def get_valid_flows(flow_names):
|
||||
"""
|
||||
获取有效的 Flow 函数列表,并为每个 Flow 准备专属参数
|
||||
|
||||
Args:
|
||||
flow_names: 扫描类型名称列表
|
||||
|
||||
Returns:
|
||||
list: [(scan_type, flow_func, flow_specific_kwargs), ...] 有效的函数列表
|
||||
"""
|
||||
valid_flows = []
|
||||
for scan_type in flow_names:
|
||||
flow_func = orchestrator.get_flow_function(scan_type)
|
||||
if flow_func:
|
||||
# 为每个 Flow 准备专属的参数(包含对应的 enabled_tools)
|
||||
flow_specific_kwargs = dict(flow_kwargs)
|
||||
flow_specific_kwargs['enabled_tools'] = enabled_tools_by_type.get(scan_type, {})
|
||||
valid_flows.append((scan_type, flow_func, flow_specific_kwargs))
|
||||
else:
|
||||
logger.warning(f"跳过未实现的 Flow: {scan_type}")
|
||||
return valid_flows
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# 动态阶段执行(基于 FlowOrchestrator 定义)
|
||||
# ---------------------------------------------------------
|
||||
for mode, enabled_flows in orchestrator.get_execution_stages():
|
||||
if mode == 'sequential':
|
||||
# 顺序执行
|
||||
logger.info(f"\n{'='*60}\n顺序执行阶段: {', '.join(enabled_flows)}\n{'='*60}")
|
||||
for scan_type, flow_func, flow_specific_kwargs in get_valid_flows(enabled_flows):
|
||||
logger.info(f"\n{'='*60}\n执行 Flow: {scan_type}\n{'='*60}")
|
||||
try:
|
||||
result = flow_func(**flow_specific_kwargs)
|
||||
record_flow_result(scan_type, result=result)
|
||||
except Exception as e:
|
||||
record_flow_result(scan_type, error=e)
|
||||
|
||||
elif mode == 'parallel':
|
||||
# 并行执行阶段:通过 Task 包装子 Flow,并使用 Prefect TaskRunner 并发运行
|
||||
logger.info(f"\n{'='*60}\n并行执行阶段: {', '.join(enabled_flows)}\n{'='*60}")
|
||||
futures = []
|
||||
|
||||
# 提交所有并行子 Flow 任务
|
||||
for scan_type, flow_func, flow_specific_kwargs in get_valid_flows(enabled_flows):
|
||||
logger.info(f"\n{'='*60}\n提交并行子 Flow 任务: {scan_type}\n{'='*60}")
|
||||
future = _run_subflow_task.submit(
|
||||
scan_type=scan_type,
|
||||
flow_func=flow_func,
|
||||
flow_kwargs=flow_specific_kwargs,
|
||||
)
|
||||
futures.append((scan_type, future))
|
||||
|
||||
# 等待所有并行子 Flow 完成
|
||||
if futures:
|
||||
wait([f for _, f in futures])
|
||||
|
||||
# 检查结果(复用统一的结果处理逻辑)
|
||||
for scan_type, future in futures:
|
||||
try:
|
||||
result = future.result()
|
||||
record_flow_result(scan_type, result=result)
|
||||
except Exception as e:
|
||||
record_flow_result(scan_type, error=e)
|
||||
|
||||
# ==================== 完成 ====================
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"✓ 扫描任务初始化完成\n" +
|
||||
f" 执行的 Flow: {', '.join(executed_flows)}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
# ==================== 返回结果 ====================
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': str(scan_workspace_path),
|
||||
'executed_flows': executed_flows,
|
||||
'results': results
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
# 参数错误
|
||||
logger.error("参数错误: %s", e)
|
||||
raise
|
||||
except RuntimeError as e:
|
||||
# 执行失败
|
||||
logger.error("运行时错误: %s", e)
|
||||
raise
|
||||
except OSError as e:
|
||||
# 文件系统错误(工作空间创建失败)
|
||||
logger.error("文件系统错误: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
# 其他未预期错误
|
||||
logger.exception("初始化扫描任务失败: %s", e)
|
||||
# 注意:失败状态更新由 Prefect State Handlers 自动处理
|
||||
raise
|
||||
@@ -1,478 +0,0 @@
|
||||
|
||||
"""
|
||||
站点扫描 Flow
|
||||
|
||||
负责编排站点扫描的完整流程
|
||||
|
||||
架构:
|
||||
- Flow 负责编排多个原子 Task
|
||||
- 支持串行执行扫描工具(流式处理)
|
||||
- 每个 Task 可独立重试
|
||||
- 配置由 YAML 解析
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
from prefect import flow
|
||||
from apps.scan.tasks.site_scan import export_site_urls_task, run_and_stream_save_websites_task
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.utils import config_parser, build_scan_command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def calculate_timeout_by_line_count(
|
||||
tool_config: dict,
|
||||
file_path: str,
|
||||
base_per_time: int = 1,
|
||||
min_timeout: int = 60
|
||||
) -> int:
|
||||
"""
|
||||
根据文件行数计算 timeout
|
||||
|
||||
使用 wc -l 统计文件行数,根据行数和每行基础时间计算 timeout
|
||||
|
||||
Args:
|
||||
tool_config: 工具配置字典(此函数未使用,但保持接口一致性)
|
||||
file_path: 要统计行数的文件路径
|
||||
base_per_time: 每行的基础时间(秒),默认1秒
|
||||
min_timeout: 最小超时时间(秒),默认60秒
|
||||
|
||||
Returns:
|
||||
int: 计算出的超时时间(秒),不低于 min_timeout
|
||||
|
||||
Example:
|
||||
timeout = calculate_timeout_by_line_count(
|
||||
tool_config={},
|
||||
file_path='/path/to/urls.txt',
|
||||
base_per_time=2
|
||||
)
|
||||
"""
|
||||
try:
|
||||
# 使用 wc -l 快速统计行数
|
||||
result = subprocess.run(
|
||||
['wc', '-l', file_path],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
# wc -l 输出格式:行数 + 空格 + 文件名
|
||||
line_count = int(result.stdout.strip().split()[0])
|
||||
|
||||
# 计算 timeout:行数 × 每行基础时间,不低于最小值
|
||||
timeout = max(line_count * base_per_time, min_timeout)
|
||||
|
||||
logger.info(
|
||||
f"timeout 自动计算: 文件={file_path}, "
|
||||
f"行数={line_count}, 每行时间={base_per_time}秒, 最小值={min_timeout}秒, timeout={timeout}秒"
|
||||
)
|
||||
|
||||
return timeout
|
||||
|
||||
except Exception as e:
|
||||
# 如果 wc -l 失败,使用默认值
|
||||
logger.warning(f"wc -l 计算行数失败: {e},使用默认 timeout: {min_timeout}秒")
|
||||
return min_timeout
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _export_site_urls(target_id: int, site_scan_dir: Path, target_name: str = None) -> tuple[str, int, int]:
|
||||
"""
|
||||
导出站点 URL 到文件
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
site_scan_dir: 站点扫描目录
|
||||
target_name: 目标名称(用于懒加载时写入默认值)
|
||||
|
||||
Returns:
|
||||
tuple: (urls_file, total_urls, association_count)
|
||||
|
||||
Raises:
|
||||
ValueError: URL 数量为 0
|
||||
"""
|
||||
logger.info("Step 1: 导出站点URL列表")
|
||||
|
||||
urls_file = str(site_scan_dir / 'site_urls.txt')
|
||||
export_result = export_site_urls_task(
|
||||
target_id=target_id,
|
||||
output_file=urls_file,
|
||||
batch_size=1000 # 每次处理1000个子域名
|
||||
)
|
||||
|
||||
total_urls = export_result['total_urls']
|
||||
association_count = export_result['association_count'] # 主机端口关联数
|
||||
|
||||
logger.info(
|
||||
"✓ 站点URL导出完成 - 文件: %s, URL数量: %d, 关联数: %d",
|
||||
export_result['output_file'],
|
||||
total_urls,
|
||||
association_count
|
||||
)
|
||||
|
||||
if total_urls == 0:
|
||||
logger.warning("目标下没有可用的站点URL,无法执行站点扫描")
|
||||
# 不抛出异常,由上层决定如何处理
|
||||
# raise ValueError("目标下没有可用的站点URL,无法执行站点扫描")
|
||||
|
||||
return export_result['output_file'], total_urls, association_count
|
||||
|
||||
|
||||
def _run_scans_sequentially(
|
||||
enabled_tools: dict,
|
||||
urls_file: str,
|
||||
total_urls: int,
|
||||
site_scan_dir: Path,
|
||||
scan_id: int,
|
||||
target_id: int,
|
||||
target_name: str
|
||||
) -> tuple[dict, int, list, list]:
|
||||
"""
|
||||
串行执行站点扫描任务
|
||||
|
||||
Args:
|
||||
enabled_tools: 已启用的工具配置字典
|
||||
urls_file: URL 文件路径
|
||||
total_urls: URL 总数
|
||||
site_scan_dir: 站点扫描目录
|
||||
scan_id: 扫描任务 ID
|
||||
target_id: 目标 ID
|
||||
target_name: 目标名称(用于错误日志)
|
||||
|
||||
Returns:
|
||||
tuple: (tool_stats, processed_records, successful_tool_names, failed_tools)
|
||||
|
||||
Raises:
|
||||
RuntimeError: 所有工具均失败
|
||||
"""
|
||||
tool_stats = {}
|
||||
processed_records = 0
|
||||
failed_tools = []
|
||||
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# 1. 构建完整命令(变量替换)
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='site_scan',
|
||||
command_params={
|
||||
'url_file': urls_file
|
||||
},
|
||||
tool_config=tool_config
|
||||
)
|
||||
except Exception as e:
|
||||
reason = f"命令构建失败: {str(e)}"
|
||||
logger.error(f"构建 {tool_name} 命令失败: {e}")
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
continue
|
||||
|
||||
# 2. 获取超时时间(支持 'auto' 动态计算)
|
||||
config_timeout = tool_config.get('timeout', 300)
|
||||
if config_timeout == 'auto':
|
||||
# 动态计算超时时间
|
||||
timeout = calculate_timeout_by_line_count(tool_config, urls_file, base_per_time=1)
|
||||
logger.info(f"✓ 工具 {tool_name} 动态计算 timeout: {timeout}秒")
|
||||
else:
|
||||
# 使用配置的超时时间和动态计算的较大值
|
||||
dynamic_timeout = calculate_timeout_by_line_count(tool_config, urls_file, base_per_time=1)
|
||||
timeout = max(dynamic_timeout, config_timeout)
|
||||
|
||||
# 2.1 生成日志文件路径(类似端口扫描)
|
||||
from datetime import datetime
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
log_file = site_scan_dir / f"{tool_name}_{timestamp}.log"
|
||||
|
||||
logger.info(
|
||||
"开始执行 %s 站点扫描 - URL数: %d, 最终超时: %ds",
|
||||
tool_name, total_urls, timeout
|
||||
)
|
||||
|
||||
# 3. 执行扫描任务
|
||||
try:
|
||||
# 流式执行扫描并实时保存结果
|
||||
result = run_and_stream_save_websites_task(
|
||||
cmd=command,
|
||||
tool_name=tool_name, # 新增:工具名称
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
cwd=str(site_scan_dir),
|
||||
shell=True,
|
||||
batch_size=1000,
|
||||
timeout=timeout,
|
||||
log_file=str(log_file) # 新增:日志文件路径
|
||||
)
|
||||
|
||||
tool_stats[tool_name] = {
|
||||
'command': command,
|
||||
'result': result,
|
||||
'timeout': timeout
|
||||
}
|
||||
processed_records += result.get('processed_records', 0)
|
||||
|
||||
logger.info(
|
||||
"✓ 工具 %s 流式处理完成 - 处理记录: %d, 创建站点: %d, 跳过: %d",
|
||||
tool_name,
|
||||
result.get('processed_records', 0),
|
||||
result.get('created_websites', 0),
|
||||
result.get('skipped_no_subdomain', 0) + result.get('skipped_failed', 0)
|
||||
)
|
||||
|
||||
except subprocess.TimeoutExpired as exc:
|
||||
# 超时异常单独处理
|
||||
reason = f"执行超时(配置: {timeout}秒)"
|
||||
failed_tools.append({'tool': tool_name, 'reason': reason})
|
||||
logger.warning(
|
||||
"⚠️ 工具 %s 执行超时 - 超时配置: %d秒\n"
|
||||
"注意:超时前已解析的站点数据已保存到数据库,但扫描未完全完成。",
|
||||
tool_name, timeout
|
||||
)
|
||||
except Exception as exc:
|
||||
# 其他异常
|
||||
failed_tools.append({'tool': tool_name, 'reason': str(exc)})
|
||||
logger.error("工具 %s 执行失败: %s", tool_name, exc, exc_info=True)
|
||||
|
||||
if failed_tools:
|
||||
logger.warning(
|
||||
"以下扫描工具执行失败: %s",
|
||||
', '.join([f['tool'] for f in failed_tools])
|
||||
)
|
||||
|
||||
if not tool_stats:
|
||||
error_details = "; ".join([f"{f['tool']}: {f['reason']}" for f in failed_tools])
|
||||
logger.warning("所有站点扫描工具均失败 - 目标: %s, 失败工具: %s", target_name, error_details)
|
||||
# 返回空结果,不抛出异常,让扫描继续
|
||||
return {}, 0, [], failed_tools
|
||||
|
||||
# 动态计算成功的工具列表
|
||||
successful_tool_names = [name for name in enabled_tools.keys()
|
||||
if name not in [f['tool'] for f in failed_tools]]
|
||||
|
||||
logger.info(
|
||||
"✓ 串行站点扫描执行完成 - 成功: %d/%d (成功: %s, 失败: %s)",
|
||||
len(tool_stats), len(enabled_tools),
|
||||
', '.join(successful_tool_names) if successful_tool_names else '无',
|
||||
', '.join([f['tool'] for f in failed_tools]) if failed_tools else '无'
|
||||
)
|
||||
|
||||
return tool_stats, processed_records, successful_tool_names, failed_tools
|
||||
|
||||
|
||||
def calculate_timeout(url_count: int, base: int = 600, per_url: int = 1) -> int:
|
||||
"""
|
||||
根据 URL 数量动态计算扫描超时时间
|
||||
|
||||
规则:
|
||||
- 基础时间:默认 600 秒(10 分钟)
|
||||
- 每个 URL 额外增加:默认 1 秒
|
||||
|
||||
Args:
|
||||
url_count: URL 数量,必须为正整数
|
||||
base: 基础超时时间(秒),默认 600
|
||||
per_url: 每个 URL 增加的时间(秒),默认 1
|
||||
|
||||
Returns:
|
||||
int: 计算得到的超时时间(秒),不超过 max_timeout
|
||||
|
||||
Raises:
|
||||
ValueError: 当 url_count 为负数或 0 时抛出异常
|
||||
"""
|
||||
if url_count < 0:
|
||||
raise ValueError(f"URL数量不能为负数: {url_count}")
|
||||
if url_count == 0:
|
||||
raise ValueError("URL数量不能为0")
|
||||
|
||||
timeout = base + int(url_count * per_url)
|
||||
|
||||
# 不设置上限,由调用方根据需要控制
|
||||
return timeout
|
||||
|
||||
|
||||
@flow(
|
||||
name="site_scan",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def site_scan_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: dict
|
||||
) -> dict:
|
||||
"""
|
||||
站点扫描 Flow
|
||||
|
||||
主要功能:
|
||||
1. 从target获取所有子域名与其对应的端口号,拼接成URL写入文件
|
||||
2. 用httpx进行批量请求并实时保存到数据库(流式处理)
|
||||
|
||||
工作流程:
|
||||
Step 0: 创建工作目录
|
||||
Step 1: 导出站点 URL 列表
|
||||
Step 2: 解析配置,获取启用的工具
|
||||
Step 3: 串行执行扫描工具并实时保存结果
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: 扫描工作空间目录
|
||||
enabled_tools: 启用的工具配置字典
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'scan_id': int,
|
||||
'target': str,
|
||||
'scan_workspace_dir': str,
|
||||
'urls_file': str,
|
||||
'total_urls': int,
|
||||
'association_count': int,
|
||||
'processed_records': int,
|
||||
'created_websites': int,
|
||||
'skipped_no_subdomain': int,
|
||||
'skipped_failed': int,
|
||||
'executed_tasks': list,
|
||||
'tool_stats': {
|
||||
'total': int,
|
||||
'successful': int,
|
||||
'failed': int,
|
||||
'successful_tools': list[str],
|
||||
'failed_tools': list[dict]
|
||||
}
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: 配置错误
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始站点扫描\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Target: {target_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
# 参数验证
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
|
||||
# Step 0: 创建工作目录
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
site_scan_dir = setup_scan_directory(scan_workspace_dir, 'site_scan')
|
||||
|
||||
# Step 1: 导出站点 URL
|
||||
urls_file, total_urls, association_count = _export_site_urls(
|
||||
target_id, site_scan_dir, target_name
|
||||
)
|
||||
|
||||
if total_urls == 0:
|
||||
logger.warning("目标下没有可用的站点URL,跳过站点扫描")
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'urls_file': urls_file,
|
||||
'total_urls': 0,
|
||||
'association_count': association_count,
|
||||
'processed_records': 0,
|
||||
'created_websites': 0,
|
||||
'skipped_no_subdomain': 0,
|
||||
'skipped_failed': 0,
|
||||
'executed_tasks': ['export_site_urls'],
|
||||
'tool_stats': {
|
||||
'total': 0,
|
||||
'successful': 0,
|
||||
'failed': 0,
|
||||
'successful_tools': [],
|
||||
'failed_tools': [],
|
||||
'details': {}
|
||||
}
|
||||
}
|
||||
|
||||
# Step 2: 工具配置信息
|
||||
logger.info("Step 2: 工具配置信息")
|
||||
logger.info(
|
||||
"✓ 启用工具: %s",
|
||||
', '.join(enabled_tools.keys())
|
||||
)
|
||||
|
||||
# Step 3: 串行执行扫描工具
|
||||
logger.info("Step 3: 串行执行扫描工具并实时保存结果")
|
||||
tool_stats, processed_records, successful_tool_names, failed_tools = _run_scans_sequentially(
|
||||
enabled_tools=enabled_tools,
|
||||
urls_file=urls_file,
|
||||
total_urls=total_urls,
|
||||
site_scan_dir=site_scan_dir,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
target_name=target_name
|
||||
)
|
||||
|
||||
logger.info("="*60 + "\n✓ 站点扫描完成\n" + "="*60)
|
||||
|
||||
# 动态生成已执行的任务列表
|
||||
executed_tasks = ['export_site_urls', 'parse_config']
|
||||
executed_tasks.extend([f'run_and_stream_save_websites ({tool})' for tool in tool_stats.keys()])
|
||||
|
||||
# 汇总所有工具的结果
|
||||
total_created = sum(stats['result'].get('created_websites', 0) for stats in tool_stats.values())
|
||||
total_skipped_no_subdomain = sum(stats['result'].get('skipped_no_subdomain', 0) for stats in tool_stats.values())
|
||||
total_skipped_failed = sum(stats['result'].get('skipped_failed', 0) for stats in tool_stats.values())
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'urls_file': urls_file,
|
||||
'total_urls': total_urls,
|
||||
'association_count': association_count,
|
||||
'processed_records': processed_records,
|
||||
'created_websites': total_created,
|
||||
'skipped_no_subdomain': total_skipped_no_subdomain,
|
||||
'skipped_failed': total_skipped_failed,
|
||||
'executed_tasks': executed_tasks,
|
||||
'tool_stats': {
|
||||
'total': len(enabled_tools),
|
||||
'successful': len(successful_tool_names),
|
||||
'failed': len(failed_tools),
|
||||
'successful_tools': successful_tool_names,
|
||||
'failed_tools': failed_tools,
|
||||
'details': tool_stats
|
||||
}
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("配置错误: %s", e)
|
||||
raise
|
||||
except RuntimeError as e:
|
||||
logger.error("运行时错误: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("站点扫描失败: %s", e)
|
||||
raise
|
||||
@@ -1,744 +0,0 @@
|
||||
"""
|
||||
子域名发现扫描 Flow
|
||||
|
||||
负责编排子域名发现扫描的完整流程
|
||||
|
||||
架构:
|
||||
- Flow 负责编排多个原子 Task
|
||||
- 支持并行执行扫描工具
|
||||
- 每个 Task 可独立重试
|
||||
- 配置由 YAML 解析
|
||||
|
||||
增强流程(4 阶段):
|
||||
Stage 1: 被动收集(并行) - 必选
|
||||
Stage 2: 字典爆破(可选) - 子域名字典爆破
|
||||
Stage 3: 变异生成 + 验证(可选) - dnsgen + 通用存活验证
|
||||
Stage 4: DNS 存活验证(可选) - 通用存活验证
|
||||
|
||||
各阶段可灵活开关,最终结果根据实际执行的阶段动态决定
|
||||
"""
|
||||
|
||||
# Django 环境初始化(导入即生效)
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
from prefect import flow
|
||||
from pathlib import Path
|
||||
import logging
|
||||
import os
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.utils import build_scan_command, ensure_wordlist_local
|
||||
from apps.engine.services.wordlist_service import WordlistService
|
||||
from apps.common.normalizer import normalize_domain
|
||||
from apps.common.validators import validate_domain
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
import subprocess
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _validate_and_normalize_target(target_name: str) -> str:
|
||||
"""
|
||||
验证并规范化目标域名
|
||||
|
||||
Args:
|
||||
target_name: 原始目标域名
|
||||
|
||||
Returns:
|
||||
str: 规范化后的域名
|
||||
|
||||
Raises:
|
||||
ValueError: 域名无效时抛出异常
|
||||
|
||||
Example:
|
||||
>>> _validate_and_normalize_target('EXAMPLE.COM')
|
||||
'example.com'
|
||||
>>> _validate_and_normalize_target('http://example.com')
|
||||
'example.com'
|
||||
"""
|
||||
try:
|
||||
normalized_target = normalize_domain(target_name)
|
||||
validate_domain(normalized_target)
|
||||
logger.debug("域名验证通过: %s -> %s", target_name, normalized_target)
|
||||
return normalized_target
|
||||
except ValueError as e:
|
||||
error_msg = f"无效的目标域名: {target_name} - {e}"
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg) from e
|
||||
|
||||
|
||||
def _run_scans_parallel(
|
||||
enabled_tools: dict,
|
||||
domain_name: str,
|
||||
result_dir: Path
|
||||
) -> tuple[list, list, list]:
|
||||
"""
|
||||
并行运行所有启用的子域名扫描工具
|
||||
|
||||
Args:
|
||||
enabled_tools: 启用的工具配置字典 {'tool_name': {'timeout': 600, ...}}
|
||||
domain_name: 目标域名
|
||||
result_dir: 结果输出目录
|
||||
|
||||
Returns:
|
||||
tuple: (result_files, failed_tools, successful_tool_names)
|
||||
|
||||
Raises:
|
||||
RuntimeError: 所有工具均失败
|
||||
"""
|
||||
# 导入任务函数
|
||||
from apps.scan.tasks.subdomain_discovery import run_subdomain_discovery_task
|
||||
|
||||
# 生成时间戳(所有工具共用)
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
|
||||
failures = [] # 记录命令构建失败的工具
|
||||
futures = {}
|
||||
|
||||
# 1. 构建命令并提交并行任务
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# 1.1 生成唯一的输出文件路径(绝对路径)
|
||||
short_uuid = uuid.uuid4().hex[:4]
|
||||
output_file = str(result_dir / f"{tool_name}_{timestamp}_{short_uuid}.txt")
|
||||
|
||||
# 1.2 构建完整命令(变量替换)
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type='subdomain_discovery',
|
||||
command_params={
|
||||
'domain': domain_name, # 对应 {domain}
|
||||
'output_file': output_file # 对应 {output_file}
|
||||
},
|
||||
tool_config=tool_config
|
||||
)
|
||||
except Exception as e:
|
||||
failure_msg = f"{tool_name}: 命令构建失败 - {e}"
|
||||
failures.append(failure_msg)
|
||||
logger.error(f"构建 {tool_name} 命令失败: {e}")
|
||||
continue
|
||||
|
||||
# 1.3 获取超时时间(支持 'auto' 动态计算)
|
||||
timeout = tool_config['timeout']
|
||||
if timeout == 'auto':
|
||||
# 子域名发现工具通常运行时间较长,使用默认值 600 秒
|
||||
timeout = 600
|
||||
logger.info(f"✓ 工具 {tool_name} 使用默认 timeout: {timeout}秒")
|
||||
|
||||
# 1.4 提交任务
|
||||
logger.debug(
|
||||
f"提交任务 - 工具: {tool_name}, 超时: {timeout}s, 输出: {output_file}"
|
||||
)
|
||||
|
||||
future = run_subdomain_discovery_task.submit(
|
||||
tool=tool_name,
|
||||
command=command,
|
||||
timeout=timeout,
|
||||
output_file=output_file
|
||||
)
|
||||
futures[tool_name] = future
|
||||
|
||||
# 2. 检查是否有任何工具成功提交
|
||||
if not futures:
|
||||
logger.warning(
|
||||
"所有扫描工具均无法启动 - 目标: %s, 失败详情: %s",
|
||||
domain_name, "; ".join(failures)
|
||||
)
|
||||
# 返回空结果,不抛出异常,让扫描继续
|
||||
return [], [{'tool': 'all', 'reason': '所有工具均无法启动'}], []
|
||||
|
||||
# 3. 等待并行任务完成,获取结果
|
||||
result_files = []
|
||||
failed_tools = []
|
||||
|
||||
for tool_name, future in futures.items():
|
||||
try:
|
||||
result = future.result() # 返回文件路径(字符串)或 ""(失败)
|
||||
if result:
|
||||
result_files.append(result)
|
||||
logger.info("✓ 扫描工具 %s 执行成功: %s", tool_name, result)
|
||||
else:
|
||||
failure_msg = f"{tool_name}: 未生成结果文件"
|
||||
failures.append(failure_msg)
|
||||
failed_tools.append({'tool': tool_name, 'reason': '未生成结果文件'})
|
||||
logger.warning("⚠️ 扫描工具 %s 未生成结果文件", tool_name)
|
||||
except Exception as e:
|
||||
failure_msg = f"{tool_name}: {str(e)}"
|
||||
failures.append(failure_msg)
|
||||
failed_tools.append({'tool': tool_name, 'reason': str(e)})
|
||||
logger.warning("⚠️ 扫描工具 %s 执行失败: %s", tool_name, str(e))
|
||||
|
||||
# 4. 检查是否有成功的工具
|
||||
if not result_files:
|
||||
logger.warning(
|
||||
"所有扫描工具均失败 - 目标: %s, 失败详情: %s",
|
||||
domain_name, "; ".join(failures)
|
||||
)
|
||||
# 返回空结果,不抛出异常,让扫描继续
|
||||
return [], failed_tools, []
|
||||
|
||||
# 5. 动态计算成功的工具列表
|
||||
successful_tool_names = [name for name in futures.keys()
|
||||
if name not in [f['tool'] for f in failed_tools]]
|
||||
|
||||
logger.info(
|
||||
"✓ 扫描工具并行执行完成 - 成功: %d/%d (成功: %s, 失败: %s)",
|
||||
len(result_files), len(futures),
|
||||
', '.join(successful_tool_names) if successful_tool_names else '无',
|
||||
', '.join([f['tool'] for f in failed_tools]) if failed_tools else '无'
|
||||
)
|
||||
|
||||
return result_files, failed_tools, successful_tool_names
|
||||
|
||||
|
||||
def _run_single_tool(
|
||||
tool_name: str,
|
||||
tool_config: dict,
|
||||
command_params: dict,
|
||||
result_dir: Path,
|
||||
scan_type: str = 'subdomain_discovery'
|
||||
) -> str:
|
||||
"""
|
||||
运行单个扫描工具
|
||||
|
||||
Args:
|
||||
tool_name: 工具名称
|
||||
tool_config: 工具配置
|
||||
command_params: 命令参数
|
||||
result_dir: 结果目录
|
||||
scan_type: 扫描类型
|
||||
|
||||
Returns:
|
||||
str: 输出文件路径,失败返回空字符串
|
||||
"""
|
||||
from apps.scan.tasks.subdomain_discovery import run_subdomain_discovery_task
|
||||
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
short_uuid = uuid.uuid4().hex[:4]
|
||||
output_file = str(result_dir / f"{tool_name}_{timestamp}_{short_uuid}.txt")
|
||||
|
||||
# 添加 output_file 到参数
|
||||
command_params['output_file'] = output_file
|
||||
|
||||
try:
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type=scan_type,
|
||||
command_params=command_params,
|
||||
tool_config=tool_config
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"构建 {tool_name} 命令失败: {e}")
|
||||
return ""
|
||||
|
||||
timeout = tool_config.get('timeout', 3600)
|
||||
if timeout == 'auto':
|
||||
timeout = 3600
|
||||
|
||||
logger.info(f"执行 {tool_name}: timeout={timeout}s")
|
||||
|
||||
try:
|
||||
result = run_subdomain_discovery_task(
|
||||
tool=tool_name,
|
||||
command=command,
|
||||
timeout=timeout,
|
||||
output_file=output_file
|
||||
)
|
||||
return result if result else ""
|
||||
except Exception as e:
|
||||
logger.warning(f"{tool_name} 执行失败: {e}")
|
||||
return ""
|
||||
|
||||
|
||||
def _count_lines(file_path: str) -> int:
|
||||
"""
|
||||
统计文件非空行数
|
||||
|
||||
Args:
|
||||
file_path: 文件路径
|
||||
|
||||
Returns:
|
||||
int: 非空行数量
|
||||
"""
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
return sum(1 for line in f if line.strip())
|
||||
except Exception as e:
|
||||
logger.warning(f"统计文件行数失败: {file_path} - {e}")
|
||||
return 0
|
||||
|
||||
|
||||
def _merge_files(file_list: list, output_file: str) -> str:
|
||||
"""
|
||||
合并多个文件并去重
|
||||
|
||||
Args:
|
||||
file_list: 文件路径列表
|
||||
output_file: 输出文件路径
|
||||
|
||||
Returns:
|
||||
str: 输出文件路径
|
||||
"""
|
||||
domains = set()
|
||||
for f in file_list:
|
||||
if f and Path(f).exists():
|
||||
with open(f, 'r', encoding='utf-8', errors='ignore') as fp:
|
||||
for line in fp:
|
||||
line = line.strip()
|
||||
if line:
|
||||
domains.add(line)
|
||||
|
||||
with open(output_file, 'w', encoding='utf-8') as fp:
|
||||
for domain in sorted(domains):
|
||||
fp.write(domain + '\n')
|
||||
|
||||
logger.info(f"合并完成: {len(domains)} 个域名 -> {output_file}")
|
||||
return output_file
|
||||
|
||||
|
||||
@flow(
|
||||
name="subdomain_discovery",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def subdomain_discovery_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: dict
|
||||
) -> dict:
|
||||
"""子域名发现扫描流程
|
||||
|
||||
工作流程(4 阶段):
|
||||
Stage 1: 被动收集(并行) - 必选
|
||||
Stage 2: 字典爆破(可选) - 子域名字典爆破
|
||||
Stage 3: 变异生成 + 验证(可选) - dnsgen + 通用存活验证
|
||||
Stage 4: DNS 存活验证(可选) - 通用存活验证
|
||||
Final: 保存到数据库
|
||||
|
||||
注意:
|
||||
- 子域名发现只对 DOMAIN 类型目标有意义
|
||||
- IP 和 CIDR 类型目标会自动跳过
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
target_name: 目标名称(域名)
|
||||
target_id: 目标 ID
|
||||
scan_workspace_dir: Scan 工作空间目录(由 Service 层创建)
|
||||
enabled_tools: 扫描配置字典:
|
||||
{
|
||||
'passive_tools': {...},
|
||||
'bruteforce': {...},
|
||||
'permutation': {...},
|
||||
'resolve': {...}
|
||||
}
|
||||
|
||||
Returns:
|
||||
dict: 扫描结果
|
||||
|
||||
Raises:
|
||||
ValueError: 配置错误
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
try:
|
||||
# ==================== 参数验证 ====================
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
if enabled_tools is None:
|
||||
raise ValueError("enabled_tools 不能为空")
|
||||
|
||||
scan_config = enabled_tools
|
||||
|
||||
# 如果未提供目标域名,跳过扫描
|
||||
if not target_name:
|
||||
logger.warning("未提供目标域名,跳过子域名发现扫描")
|
||||
return _empty_result(scan_id, '', scan_workspace_dir)
|
||||
|
||||
# ==================== 检查 Target 类型 ====================
|
||||
# 子域名发现只对 DOMAIN 类型有意义,IP 和 CIDR 类型跳过
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
|
||||
if target and target.type != Target.TargetType.DOMAIN:
|
||||
logger.info(
|
||||
"跳过子域名发现扫描: Target 类型为 %s (ID=%d, Name=%s),子域名发现仅适用于域名类型",
|
||||
target.type, target_id, target_name
|
||||
)
|
||||
return _empty_result(scan_id, target_name, scan_workspace_dir)
|
||||
|
||||
# 导入任务函数
|
||||
from apps.scan.tasks.subdomain_discovery import (
|
||||
run_subdomain_discovery_task,
|
||||
merge_and_validate_task,
|
||||
save_domains_task
|
||||
)
|
||||
|
||||
# Step 0: 准备工作
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
result_dir = setup_scan_directory(scan_workspace_dir, 'subdomain_discovery')
|
||||
|
||||
# 验证并规范化目标域名
|
||||
try:
|
||||
domain_name = _validate_and_normalize_target(target_name)
|
||||
except ValueError as e:
|
||||
logger.warning("目标域名无效,跳过子域名发现扫描: %s", e)
|
||||
return _empty_result(scan_id, target_name, scan_workspace_dir)
|
||||
|
||||
# 验证成功后打印日志
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始子域名发现扫描\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Domain: {domain_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
"="*60
|
||||
)
|
||||
|
||||
# 解析配置
|
||||
passive_tools = scan_config.get('passive_tools', {})
|
||||
bruteforce_config = scan_config.get('bruteforce', {})
|
||||
permutation_config = scan_config.get('permutation', {})
|
||||
resolve_config = scan_config.get('resolve', {})
|
||||
|
||||
# 过滤出启用的被动工具
|
||||
enabled_passive_tools = {
|
||||
k: v for k, v in passive_tools.items()
|
||||
if v.get('enabled', True)
|
||||
}
|
||||
|
||||
executed_tasks = []
|
||||
all_result_files = []
|
||||
failed_tools = []
|
||||
successful_tool_names = []
|
||||
|
||||
# ==================== Stage 1: 被动收集(并行)====================
|
||||
logger.info("=" * 40)
|
||||
logger.info("Stage 1: 被动收集(并行)")
|
||||
logger.info("=" * 40)
|
||||
|
||||
if enabled_passive_tools:
|
||||
logger.info("启用工具: %s", ', '.join(enabled_passive_tools.keys()))
|
||||
result_files, stage1_failed, stage1_success = _run_scans_parallel(
|
||||
enabled_tools=enabled_passive_tools,
|
||||
domain_name=domain_name,
|
||||
result_dir=result_dir
|
||||
)
|
||||
all_result_files.extend(result_files)
|
||||
failed_tools.extend(stage1_failed)
|
||||
successful_tool_names.extend(stage1_success)
|
||||
executed_tasks.extend([f'passive ({tool})' for tool in stage1_success])
|
||||
else:
|
||||
logger.warning("未启用任何被动收集工具")
|
||||
|
||||
# 合并 Stage 1 结果
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
current_result = str(result_dir / f"subs_passive_{timestamp}.txt")
|
||||
if all_result_files:
|
||||
current_result = _merge_files(all_result_files, current_result)
|
||||
executed_tasks.append('merge_passive')
|
||||
else:
|
||||
# 创建空文件
|
||||
Path(current_result).touch()
|
||||
logger.warning("Stage 1 无结果,创建空文件")
|
||||
|
||||
# ==================== Stage 2: 字典爆破(可选)====================
|
||||
bruteforce_enabled = bruteforce_config.get('enabled', False)
|
||||
if bruteforce_enabled:
|
||||
logger.info("=" * 40)
|
||||
logger.info("Stage 2: 字典爆破")
|
||||
logger.info("=" * 40)
|
||||
|
||||
bruteforce_tool_config = bruteforce_config.get('subdomain_bruteforce', {})
|
||||
wordlist_name = bruteforce_tool_config.get('wordlist_name', 'dns_wordlist.txt')
|
||||
|
||||
try:
|
||||
# 确保本地存在字典文件(含 hash 校验)
|
||||
local_wordlist_path = ensure_wordlist_local(wordlist_name)
|
||||
|
||||
# 获取字典记录用于计算 timeout
|
||||
wordlist_service = WordlistService()
|
||||
wordlist = wordlist_service.get_wordlist_by_name(wordlist_name)
|
||||
|
||||
timeout_value = bruteforce_tool_config.get('timeout', 3600)
|
||||
if timeout_value == 'auto' and wordlist:
|
||||
line_count = getattr(wordlist, 'line_count', None)
|
||||
if line_count is None:
|
||||
try:
|
||||
with open(local_wordlist_path, 'rb') as f:
|
||||
line_count = sum(1 for _ in f)
|
||||
except OSError:
|
||||
line_count = 0
|
||||
|
||||
try:
|
||||
line_count_int = int(line_count)
|
||||
except (TypeError, ValueError):
|
||||
line_count_int = 0
|
||||
|
||||
timeout_value = line_count_int * 3 if line_count_int > 0 else 3600
|
||||
bruteforce_tool_config = {
|
||||
**bruteforce_tool_config,
|
||||
'timeout': timeout_value,
|
||||
}
|
||||
logger.info(
|
||||
"subdomain_bruteforce 使用自动 timeout: %s 秒 (字典行数=%s, 3秒/行)",
|
||||
timeout_value,
|
||||
line_count_int,
|
||||
)
|
||||
|
||||
brute_output = str(result_dir / f"subs_brute_{timestamp}.txt")
|
||||
brute_result = _run_single_tool(
|
||||
tool_name='subdomain_bruteforce',
|
||||
tool_config=bruteforce_tool_config,
|
||||
command_params={
|
||||
'domain': domain_name,
|
||||
'wordlist': local_wordlist_path,
|
||||
'output_file': brute_output
|
||||
},
|
||||
result_dir=result_dir
|
||||
)
|
||||
|
||||
if brute_result:
|
||||
# 合并 Stage 1 + Stage 2
|
||||
current_result = _merge_files(
|
||||
[current_result, brute_result],
|
||||
str(result_dir / f"subs_merged_{timestamp}.txt")
|
||||
)
|
||||
successful_tool_names.append('subdomain_bruteforce')
|
||||
executed_tasks.append('bruteforce')
|
||||
else:
|
||||
failed_tools.append({'tool': 'subdomain_bruteforce', 'reason': '执行失败'})
|
||||
except Exception as exc:
|
||||
logger.warning("字典准备失败,跳过字典爆破: %s", exc)
|
||||
failed_tools.append({'tool': 'subdomain_bruteforce', 'reason': str(exc)})
|
||||
|
||||
# ==================== Stage 3: 变异生成 + 验证(可选)====================
|
||||
permutation_enabled = permutation_config.get('enabled', False)
|
||||
if permutation_enabled:
|
||||
logger.info("=" * 40)
|
||||
logger.info("Stage 3: 变异生成 + 存活验证(流式管道)")
|
||||
logger.info("=" * 40)
|
||||
|
||||
permutation_tool_config = permutation_config.get('subdomain_permutation_resolve', {})
|
||||
|
||||
# === Step 3.1: 泛解析采样检测 ===
|
||||
# 生成原文件 100 倍的变异样本,检查解析结果是否超过 50 倍
|
||||
before_count = _count_lines(current_result)
|
||||
|
||||
# 配置参数
|
||||
SAMPLE_MULTIPLIER = 100 # 采样数量 = 原文件 × 100
|
||||
EXPANSION_THRESHOLD = 50 # 膨胀阈值 = 原文件 × 50
|
||||
SAMPLE_TIMEOUT = 7200 # 采样超时 2 小时
|
||||
|
||||
sample_size = before_count * SAMPLE_MULTIPLIER
|
||||
max_allowed = before_count * EXPANSION_THRESHOLD
|
||||
|
||||
sample_output = str(result_dir / f"subs_permuted_sample_{timestamp}.txt")
|
||||
sample_cmd = (
|
||||
f"cat {current_result} | dnsgen - | head -n {sample_size} | "
|
||||
f"puredns resolve -r /app/backend/resources/resolvers.txt "
|
||||
f"--write {sample_output} --wildcard-tests 50 --wildcard-batch 1000000 --quiet"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"泛解析采样检测: 原文件 {before_count} 个, "
|
||||
f"采样 {sample_size} 个, 阈值 {max_allowed} 个"
|
||||
)
|
||||
|
||||
try:
|
||||
subprocess.run(
|
||||
sample_cmd,
|
||||
shell=True,
|
||||
timeout=SAMPLE_TIMEOUT,
|
||||
check=False,
|
||||
capture_output=True
|
||||
)
|
||||
sample_result_count = _count_lines(sample_output) if Path(sample_output).exists() else 0
|
||||
|
||||
logger.info(
|
||||
f"采样结果: {sample_result_count} 个域名存活 "
|
||||
f"(原文件: {before_count}, 阈值: {max_allowed})"
|
||||
)
|
||||
|
||||
if sample_result_count > max_allowed:
|
||||
# 采样结果超过阈值,说明存在泛解析,跳过完整变异
|
||||
ratio = sample_result_count / before_count if before_count > 0 else sample_result_count
|
||||
logger.warning(
|
||||
f"跳过变异: 采样检测到泛解析 "
|
||||
f"({sample_result_count} > {max_allowed}, 膨胀率 {ratio:.1f}x)"
|
||||
)
|
||||
failed_tools.append({
|
||||
'tool': 'subdomain_permutation_resolve',
|
||||
'reason': f"采样检测到泛解析 (膨胀率 {ratio:.1f}x)"
|
||||
})
|
||||
else:
|
||||
# === Step 3.2: 采样通过,执行完整变异 ===
|
||||
logger.info("采样检测通过,执行完整变异...")
|
||||
|
||||
permuted_output = str(result_dir / f"subs_permuted_{timestamp}.txt")
|
||||
|
||||
permuted_result = _run_single_tool(
|
||||
tool_name='subdomain_permutation_resolve',
|
||||
tool_config=permutation_tool_config,
|
||||
command_params={
|
||||
'input_file': current_result,
|
||||
'output_file': permuted_output,
|
||||
},
|
||||
result_dir=result_dir
|
||||
)
|
||||
|
||||
if permuted_result:
|
||||
# 合并原结果 + 变异验证结果
|
||||
current_result = _merge_files(
|
||||
[current_result, permuted_result],
|
||||
str(result_dir / f"subs_with_permuted_{timestamp}.txt")
|
||||
)
|
||||
successful_tool_names.append('subdomain_permutation_resolve')
|
||||
executed_tasks.append('permutation')
|
||||
else:
|
||||
failed_tools.append({'tool': 'subdomain_permutation_resolve', 'reason': '执行失败'})
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning(f"采样检测超时 ({SAMPLE_TIMEOUT}秒),跳过变异")
|
||||
failed_tools.append({'tool': 'subdomain_permutation_resolve', 'reason': '采样检测超时'})
|
||||
except Exception as e:
|
||||
logger.warning(f"采样检测失败: {e},跳过变异")
|
||||
failed_tools.append({'tool': 'subdomain_permutation_resolve', 'reason': f'采样检测失败: {e}'})
|
||||
|
||||
# ==================== Stage 4: DNS 存活验证(可选)====================
|
||||
# 无论是否启用 Stage 3,只要 resolve.enabled 为 true 就会执行,对当前所有候选子域做统一 DNS 验证
|
||||
resolve_enabled = resolve_config.get('enabled', False)
|
||||
if resolve_enabled:
|
||||
logger.info("=" * 40)
|
||||
logger.info("Stage 4: DNS 存活验证")
|
||||
logger.info("=" * 40)
|
||||
|
||||
resolve_tool_config = resolve_config.get('subdomain_resolve', {})
|
||||
|
||||
# 根据当前候选子域数量动态计算 timeout(支持 timeout: auto)
|
||||
timeout_value = resolve_tool_config.get('timeout', 3600)
|
||||
if timeout_value == 'auto':
|
||||
line_count = 0
|
||||
try:
|
||||
with open(current_result, 'rb') as f:
|
||||
line_count = sum(1 for _ in f)
|
||||
except OSError:
|
||||
line_count = 0
|
||||
|
||||
try:
|
||||
line_count_int = int(line_count)
|
||||
except (TypeError, ValueError):
|
||||
line_count_int = 0
|
||||
|
||||
timeout_value = line_count_int * 3 if line_count_int > 0 else 3600
|
||||
resolve_tool_config = {
|
||||
**resolve_tool_config,
|
||||
'timeout': timeout_value,
|
||||
}
|
||||
logger.info(
|
||||
"subdomain_resolve 使用自动 timeout: %s 秒 (候选子域数=%s, 3秒/域名)",
|
||||
timeout_value,
|
||||
line_count_int,
|
||||
)
|
||||
|
||||
alive_output = str(result_dir / f"subs_alive_{timestamp}.txt")
|
||||
|
||||
alive_result = _run_single_tool(
|
||||
tool_name='subdomain_resolve',
|
||||
tool_config=resolve_tool_config,
|
||||
command_params={
|
||||
'input_file': current_result,
|
||||
'output_file': alive_output,
|
||||
},
|
||||
result_dir=result_dir
|
||||
)
|
||||
|
||||
if alive_result:
|
||||
current_result = alive_result
|
||||
successful_tool_names.append('subdomain_resolve')
|
||||
executed_tasks.append('resolve')
|
||||
else:
|
||||
failed_tools.append({'tool': 'subdomain_resolve', 'reason': '执行失败'})
|
||||
|
||||
# ==================== Final: 保存到数据库 ====================
|
||||
logger.info("=" * 40)
|
||||
logger.info("Final: 保存到数据库")
|
||||
logger.info("=" * 40)
|
||||
|
||||
# 最终验证和保存
|
||||
final_file = merge_and_validate_task(
|
||||
result_files=[current_result],
|
||||
result_dir=str(result_dir)
|
||||
)
|
||||
|
||||
save_result = save_domains_task(
|
||||
domains_file=final_file,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id
|
||||
)
|
||||
processed_domains = save_result.get('processed_records', 0)
|
||||
executed_tasks.append('save_domains')
|
||||
|
||||
logger.info("="*60 + "\n✓ 子域名发现扫描完成\n" + "="*60)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': domain_name,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'total': processed_domains,
|
||||
'executed_tasks': executed_tasks,
|
||||
'tool_stats': {
|
||||
'total': len(enabled_passive_tools) + (1 if bruteforce_enabled else 0) +
|
||||
(1 if permutation_enabled else 0) + (1 if resolve_enabled else 0),
|
||||
'successful': len(successful_tool_names),
|
||||
'failed': len(failed_tools),
|
||||
'successful_tools': successful_tool_names,
|
||||
'failed_tools': failed_tools
|
||||
}
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("配置错误: %s", e)
|
||||
raise
|
||||
except RuntimeError as e:
|
||||
logger.error("运行时错误: %s", e)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("子域名发现扫描失败: %s", e)
|
||||
raise
|
||||
|
||||
|
||||
def _empty_result(scan_id: int, target: str, scan_workspace_dir: str) -> dict:
|
||||
"""返回空结果"""
|
||||
return {
|
||||
'success': True,
|
||||
'scan_id': scan_id,
|
||||
'target': target,
|
||||
'scan_workspace_dir': scan_workspace_dir,
|
||||
'total': 0,
|
||||
'executed_tasks': [],
|
||||
'tool_stats': {
|
||||
'total': 0,
|
||||
'successful': 0,
|
||||
'failed': 0,
|
||||
'successful_tools': [],
|
||||
'failed_tools': []
|
||||
}
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from prefect import flow
|
||||
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.utils import build_scan_command, ensure_nuclei_templates_local
|
||||
from apps.scan.tasks.vuln_scan import (
|
||||
export_endpoints_task,
|
||||
run_vuln_tool_task,
|
||||
run_and_stream_save_dalfox_vulns_task,
|
||||
run_and_stream_save_nuclei_vulns_task,
|
||||
)
|
||||
from .utils import calculate_timeout_by_line_count
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@flow(
|
||||
name="endpoints_vuln_scan_flow",
|
||||
log_prints=True,
|
||||
)
|
||||
def endpoints_vuln_scan_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: Dict[str, dict],
|
||||
) -> dict:
|
||||
"""基于 Endpoint 的漏洞扫描 Flow(串行执行 Dalfox 等工具)。"""
|
||||
try:
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
if not enabled_tools:
|
||||
raise ValueError("enabled_tools 不能为空")
|
||||
|
||||
from apps.scan.utils import setup_scan_directory
|
||||
vuln_scan_dir = setup_scan_directory(scan_workspace_dir, 'vuln_scan')
|
||||
endpoints_file = vuln_scan_dir / "input_endpoints.txt"
|
||||
|
||||
# Step 1: 导出 Endpoint URL
|
||||
export_result = export_endpoints_task(
|
||||
target_id=target_id,
|
||||
output_file=str(endpoints_file),
|
||||
)
|
||||
total_endpoints = export_result.get("total_count", 0)
|
||||
|
||||
if total_endpoints == 0 or not endpoints_file.exists() or endpoints_file.stat().st_size == 0:
|
||||
logger.warning("目标下没有可用 Endpoint,跳过漏洞扫描")
|
||||
return {
|
||||
"success": True,
|
||||
"scan_id": scan_id,
|
||||
"target": target_name,
|
||||
"scan_workspace_dir": scan_workspace_dir,
|
||||
"endpoints_file": str(endpoints_file),
|
||||
"endpoint_count": 0,
|
||||
"executed_tools": [],
|
||||
"tool_results": {},
|
||||
}
|
||||
|
||||
logger.info("Endpoint 导出完成,共 %d 条,开始执行漏洞扫描", total_endpoints)
|
||||
|
||||
tool_results: Dict[str, dict] = {}
|
||||
|
||||
# Step 2: 并行执行每个漏洞扫描工具(目前主要是 Dalfox)
|
||||
# 1)先为每个工具 submit Prefect Task,让 Worker 并行调度
|
||||
# 2)再统一收集各自的结果,组装成 tool_results
|
||||
tool_futures: Dict[str, dict] = {}
|
||||
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
# Nuclei 需要先确保本地模板存在(支持多个模板仓库)
|
||||
template_args = ""
|
||||
if tool_name == "nuclei":
|
||||
repo_names = tool_config.get("template_repo_names")
|
||||
if not repo_names or not isinstance(repo_names, (list, tuple)):
|
||||
logger.error("Nuclei 配置缺少 template_repo_names(数组),跳过")
|
||||
continue
|
||||
template_paths = []
|
||||
try:
|
||||
for repo_name in repo_names:
|
||||
path = ensure_nuclei_templates_local(repo_name)
|
||||
template_paths.append(path)
|
||||
logger.info("Nuclei 模板路径 [%s]: %s", repo_name, path)
|
||||
except Exception as e:
|
||||
logger.error("获取 Nuclei 模板失败: %s,跳过 nuclei 扫描", e)
|
||||
continue
|
||||
template_args = " ".join(f"-t {p}" for p in template_paths)
|
||||
|
||||
# 构建命令参数
|
||||
command_params = {"endpoints_file": str(endpoints_file)}
|
||||
if template_args:
|
||||
command_params["template_args"] = template_args
|
||||
|
||||
command = build_scan_command(
|
||||
tool_name=tool_name,
|
||||
scan_type="vuln_scan",
|
||||
command_params=command_params,
|
||||
tool_config=tool_config,
|
||||
)
|
||||
|
||||
raw_timeout = tool_config.get("timeout", 600)
|
||||
|
||||
if isinstance(raw_timeout, str) and raw_timeout == "auto":
|
||||
# timeout=auto 时,根据 endpoints_file 行数自动计算超时时间
|
||||
# Dalfox: 每行 100 秒,Nuclei: 每行 30 秒
|
||||
base_per_time = 30 if tool_name == "nuclei" else 100
|
||||
timeout = calculate_timeout_by_line_count(
|
||||
tool_config=tool_config,
|
||||
file_path=str(endpoints_file),
|
||||
base_per_time=base_per_time,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
timeout = int(raw_timeout)
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(
|
||||
f"工具 {tool_name} 的 timeout 配置无效: {raw_timeout!r}"
|
||||
) from e
|
||||
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
log_file = vuln_scan_dir / f"{tool_name}_{timestamp}.log"
|
||||
|
||||
# Dalfox XSS 使用流式任务,一边解析一边保存漏洞结果
|
||||
if tool_name == "dalfox_xss":
|
||||
logger.info("开始执行漏洞扫描工具 %s(流式保存漏洞结果,已提交任务)", tool_name)
|
||||
future = run_and_stream_save_dalfox_vulns_task.submit(
|
||||
cmd=command,
|
||||
tool_name=tool_name,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
cwd=str(vuln_scan_dir),
|
||||
shell=True,
|
||||
batch_size=1,
|
||||
timeout=timeout,
|
||||
log_file=str(log_file),
|
||||
)
|
||||
|
||||
tool_futures[tool_name] = {
|
||||
"future": future,
|
||||
"command": command,
|
||||
"timeout": timeout,
|
||||
"log_file": str(log_file),
|
||||
"mode": "streaming",
|
||||
}
|
||||
elif tool_name == "nuclei":
|
||||
# Nuclei 使用流式任务
|
||||
logger.info("开始执行漏洞扫描工具 %s(流式保存漏洞结果,已提交任务)", tool_name)
|
||||
future = run_and_stream_save_nuclei_vulns_task.submit(
|
||||
cmd=command,
|
||||
tool_name=tool_name,
|
||||
scan_id=scan_id,
|
||||
target_id=target_id,
|
||||
cwd=str(vuln_scan_dir),
|
||||
shell=True,
|
||||
batch_size=1,
|
||||
timeout=timeout,
|
||||
log_file=str(log_file),
|
||||
)
|
||||
|
||||
tool_futures[tool_name] = {
|
||||
"future": future,
|
||||
"command": command,
|
||||
"timeout": timeout,
|
||||
"log_file": str(log_file),
|
||||
"mode": "streaming",
|
||||
}
|
||||
else:
|
||||
# 其他工具仍使用非流式执行逻辑
|
||||
logger.info("开始执行漏洞扫描工具 %s(已提交任务)", tool_name)
|
||||
future = run_vuln_tool_task.submit(
|
||||
tool_name=tool_name,
|
||||
command=command,
|
||||
timeout=timeout,
|
||||
log_file=str(log_file),
|
||||
)
|
||||
|
||||
tool_futures[tool_name] = {
|
||||
"future": future,
|
||||
"command": command,
|
||||
"timeout": timeout,
|
||||
"log_file": str(log_file),
|
||||
"mode": "normal",
|
||||
}
|
||||
|
||||
# 统一收集所有工具的执行结果
|
||||
for tool_name, meta in tool_futures.items():
|
||||
future = meta["future"]
|
||||
result = future.result()
|
||||
|
||||
if meta["mode"] == "streaming":
|
||||
tool_results[tool_name] = {
|
||||
"command": meta["command"],
|
||||
"timeout": meta["timeout"],
|
||||
"processed_records": result.get("processed_records"),
|
||||
"created_vulns": result.get("created_vulns"),
|
||||
"command_log_file": meta["log_file"],
|
||||
}
|
||||
else:
|
||||
tool_results[tool_name] = {
|
||||
"command": meta["command"],
|
||||
"timeout": meta["timeout"],
|
||||
"duration": result.get("duration"),
|
||||
"returncode": result.get("returncode"),
|
||||
"command_log_file": result.get("command_log_file"),
|
||||
}
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"scan_id": scan_id,
|
||||
"target": target_name,
|
||||
"scan_workspace_dir": scan_workspace_dir,
|
||||
"endpoints_file": str(endpoints_file),
|
||||
"endpoint_count": total_endpoints,
|
||||
"executed_tools": list(enabled_tools.keys()),
|
||||
"tool_results": tool_results,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Endpoint 漏洞扫描失败: %s", e)
|
||||
raise
|
||||
@@ -1,107 +0,0 @@
|
||||
from apps.common.prefect_django_setup import setup_django_for_prefect
|
||||
|
||||
import logging
|
||||
from typing import Dict, Tuple
|
||||
|
||||
from prefect import flow
|
||||
|
||||
from apps.scan.handlers.scan_flow_handlers import (
|
||||
on_scan_flow_running,
|
||||
on_scan_flow_completed,
|
||||
on_scan_flow_failed,
|
||||
)
|
||||
from apps.scan.configs.command_templates import get_command_template
|
||||
from .endpoints_vuln_scan_flow import endpoints_vuln_scan_flow
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _classify_vuln_tools(enabled_tools: Dict[str, dict]) -> Tuple[Dict[str, dict], Dict[str, dict]]:
|
||||
"""根据命令模板中的 input_type 对漏洞扫描工具进行分类。
|
||||
|
||||
当前支持:
|
||||
- endpoints_file: 以端点列表文件为输入(例如 Dalfox XSS)
|
||||
预留:
|
||||
- 其他 input_type 将被归类到 other_tools,暂不处理。
|
||||
"""
|
||||
endpoints_tools: Dict[str, dict] = {}
|
||||
other_tools: Dict[str, dict] = {}
|
||||
|
||||
for tool_name, tool_config in enabled_tools.items():
|
||||
template = get_command_template("vuln_scan", tool_name) or {}
|
||||
input_type = template.get("input_type", "endpoints_file")
|
||||
|
||||
if input_type == "endpoints_file":
|
||||
endpoints_tools[tool_name] = tool_config
|
||||
else:
|
||||
other_tools[tool_name] = tool_config
|
||||
|
||||
return endpoints_tools, other_tools
|
||||
|
||||
|
||||
@flow(
|
||||
name="vuln_scan",
|
||||
log_prints=True,
|
||||
on_running=[on_scan_flow_running],
|
||||
on_completion=[on_scan_flow_completed],
|
||||
on_failure=[on_scan_flow_failed],
|
||||
)
|
||||
def vuln_scan_flow(
|
||||
scan_id: int,
|
||||
target_name: str,
|
||||
target_id: int,
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: Dict[str, dict],
|
||||
) -> dict:
|
||||
"""漏洞扫描主 Flow:串行编排各类漏洞扫描子 Flow。
|
||||
|
||||
支持工具:
|
||||
- dalfox_xss: XSS 漏洞扫描(流式保存)
|
||||
- nuclei: 通用漏洞扫描(流式保存,支持模板 commit hash 同步)
|
||||
"""
|
||||
try:
|
||||
if scan_id is None:
|
||||
raise ValueError("scan_id 不能为空")
|
||||
if not target_name:
|
||||
raise ValueError("target_name 不能为空")
|
||||
if target_id is None:
|
||||
raise ValueError("target_id 不能为空")
|
||||
if not scan_workspace_dir:
|
||||
raise ValueError("scan_workspace_dir 不能为空")
|
||||
if not enabled_tools:
|
||||
raise ValueError("enabled_tools 不能为空")
|
||||
|
||||
# Step 1: 分类工具
|
||||
endpoints_tools, other_tools = _classify_vuln_tools(enabled_tools)
|
||||
|
||||
logger.info(
|
||||
"漏洞扫描工具分类 - endpoints_file: %s, 其他: %s",
|
||||
list(endpoints_tools.keys()) or "无",
|
||||
list(other_tools.keys()) or "无",
|
||||
)
|
||||
|
||||
if other_tools:
|
||||
logger.warning(
|
||||
"存在暂不支持输入类型的漏洞扫描工具,将被忽略: %s",
|
||||
list(other_tools.keys()),
|
||||
)
|
||||
|
||||
if not endpoints_tools:
|
||||
raise ValueError("漏洞扫描需要至少启用一个以 endpoints_file 为输入的工具(如 dalfox_xss、nuclei)。")
|
||||
|
||||
# Step 2: 执行 Endpoint 漏洞扫描子 Flow(串行)
|
||||
endpoint_result = endpoints_vuln_scan_flow(
|
||||
scan_id=scan_id,
|
||||
target_name=target_name,
|
||||
target_id=target_id,
|
||||
scan_workspace_dir=scan_workspace_dir,
|
||||
enabled_tools=endpoints_tools,
|
||||
)
|
||||
|
||||
# 目前只有一个子 Flow,直接返回其结果
|
||||
return endpoint_result
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("漏洞扫描主 Flow 失败: %s", e)
|
||||
raise
|
||||
@@ -1,182 +0,0 @@
|
||||
"""
|
||||
扫描流程处理器
|
||||
|
||||
负责处理扫描流程(端口扫描、子域名发现等)的状态变化和通知
|
||||
|
||||
职责:
|
||||
- 更新各阶段的进度状态(running/completed/failed)
|
||||
- 发送扫描阶段的通知
|
||||
- 记录 Flow 性能指标
|
||||
"""
|
||||
|
||||
import logging
|
||||
from prefect import Flow
|
||||
from prefect.client.schemas import FlowRun, State
|
||||
|
||||
from apps.scan.utils.performance import FlowPerformanceTracker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 存储每个 flow_run 的性能追踪器
|
||||
_flow_trackers: dict[str, FlowPerformanceTracker] = {}
|
||||
|
||||
|
||||
def _get_stage_from_flow_name(flow_name: str) -> str | None:
|
||||
"""
|
||||
从 Flow name 获取对应的 stage
|
||||
|
||||
Flow name 直接作为 stage(与 engine_config 的 key 一致)
|
||||
排除主 Flow(initiate_scan)
|
||||
"""
|
||||
# 排除主 Flow,它不是阶段 Flow
|
||||
if flow_name == 'initiate_scan':
|
||||
return None
|
||||
return flow_name
|
||||
|
||||
|
||||
def on_scan_flow_running(flow: Flow, flow_run: FlowRun, state: State) -> None:
|
||||
"""
|
||||
扫描流程开始运行时的回调
|
||||
|
||||
职责:
|
||||
- 更新阶段进度为 running
|
||||
- 发送扫描开始通知
|
||||
- 启动性能追踪
|
||||
|
||||
Args:
|
||||
flow: Prefect Flow 对象
|
||||
flow_run: Flow 运行实例
|
||||
state: Flow 当前状态
|
||||
"""
|
||||
logger.info("🚀 扫描流程开始运行 - Flow: %s, Run ID: %s", flow.name, flow_run.id)
|
||||
|
||||
# 提取流程参数
|
||||
flow_params = flow_run.parameters or {}
|
||||
scan_id = flow_params.get('scan_id')
|
||||
target_name = flow_params.get('target_name', 'unknown')
|
||||
target_id = flow_params.get('target_id')
|
||||
|
||||
# 启动性能追踪
|
||||
if scan_id:
|
||||
tracker = FlowPerformanceTracker(flow.name, scan_id)
|
||||
tracker.start(target_id=target_id, target_name=target_name)
|
||||
_flow_trackers[str(flow_run.id)] = tracker
|
||||
|
||||
# 更新阶段进度
|
||||
stage = _get_stage_from_flow_name(flow.name)
|
||||
if scan_id and stage:
|
||||
try:
|
||||
from apps.scan.services import ScanService
|
||||
service = ScanService()
|
||||
service.start_stage(scan_id, stage)
|
||||
logger.info(f"✓ 阶段进度已更新为 running - Scan ID: {scan_id}, Stage: {stage}")
|
||||
except Exception as e:
|
||||
logger.error(f"更新阶段进度失败 - Scan ID: {scan_id}, Stage: {stage}: {e}")
|
||||
|
||||
|
||||
def on_scan_flow_completed(flow: Flow, flow_run: FlowRun, state: State) -> None:
|
||||
"""
|
||||
扫描流程完成时的回调
|
||||
|
||||
职责:
|
||||
- 更新阶段进度为 completed
|
||||
- 发送扫描完成通知(可选)
|
||||
- 记录性能指标
|
||||
|
||||
Args:
|
||||
flow: Prefect Flow 对象
|
||||
flow_run: Flow 运行实例
|
||||
state: Flow 当前状态
|
||||
"""
|
||||
logger.info("✅ 扫描流程完成 - Flow: %s, Run ID: %s", flow.name, flow_run.id)
|
||||
|
||||
# 提取流程参数
|
||||
flow_params = flow_run.parameters or {}
|
||||
scan_id = flow_params.get('scan_id')
|
||||
|
||||
# 获取 flow result
|
||||
result = None
|
||||
try:
|
||||
result = state.result() if state.result else None
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 记录性能指标
|
||||
tracker = _flow_trackers.pop(str(flow_run.id), None)
|
||||
if tracker:
|
||||
tracker.finish(success=True)
|
||||
|
||||
# 更新阶段进度
|
||||
stage = _get_stage_from_flow_name(flow.name)
|
||||
if scan_id and stage:
|
||||
try:
|
||||
from apps.scan.services import ScanService
|
||||
service = ScanService()
|
||||
# 从 flow result 中提取 detail(如果有)
|
||||
detail = None
|
||||
if isinstance(result, dict):
|
||||
detail = result.get('detail')
|
||||
service.complete_stage(scan_id, stage, detail)
|
||||
logger.info(f"✓ 阶段进度已更新为 completed - Scan ID: {scan_id}, Stage: {stage}")
|
||||
# 每个阶段完成后刷新缓存统计,便于前端实时看到增量
|
||||
try:
|
||||
service.update_cached_stats(scan_id)
|
||||
logger.info("✓ 阶段完成后已刷新缓存统计 - Scan ID: %s", scan_id)
|
||||
except Exception as e:
|
||||
logger.error("阶段完成后刷新缓存统计失败 - Scan ID: %s, 错误: %s", scan_id, e)
|
||||
except Exception as e:
|
||||
logger.error(f"更新阶段进度失败 - Scan ID: {scan_id}, Stage: {stage}: {e}")
|
||||
|
||||
|
||||
def on_scan_flow_failed(flow: Flow, flow_run: FlowRun, state: State) -> None:
|
||||
"""
|
||||
扫描流程失败时的回调
|
||||
|
||||
职责:
|
||||
- 更新阶段进度为 failed
|
||||
- 发送扫描失败通知
|
||||
- 记录性能指标(含错误信息)
|
||||
|
||||
Args:
|
||||
flow: Prefect Flow 对象
|
||||
flow_run: Flow 运行实例
|
||||
state: Flow 当前状态
|
||||
"""
|
||||
logger.info("❌ 扫描流程失败 - Flow: %s, Run ID: %s", flow.name, flow_run.id)
|
||||
|
||||
# 提取流程参数
|
||||
flow_params = flow_run.parameters or {}
|
||||
scan_id = flow_params.get('scan_id')
|
||||
target_name = flow_params.get('target_name', 'unknown')
|
||||
|
||||
# 提取错误信息
|
||||
error_message = str(state.message) if state.message else "未知错误"
|
||||
|
||||
# 记录性能指标(失败情况)
|
||||
tracker = _flow_trackers.pop(str(flow_run.id), None)
|
||||
if tracker:
|
||||
tracker.finish(success=False, error_message=error_message)
|
||||
|
||||
# 更新阶段进度
|
||||
stage = _get_stage_from_flow_name(flow.name)
|
||||
if scan_id and stage:
|
||||
try:
|
||||
from apps.scan.services import ScanService
|
||||
service = ScanService()
|
||||
service.fail_stage(scan_id, stage, error_message)
|
||||
logger.info(f"✓ 阶段进度已更新为 failed - Scan ID: {scan_id}, Stage: {stage}")
|
||||
except Exception as e:
|
||||
logger.error(f"更新阶段进度失败 - Scan ID: {scan_id}, Stage: {stage}: {e}")
|
||||
|
||||
# 发送通知
|
||||
try:
|
||||
from apps.scan.notifications import create_notification, NotificationLevel
|
||||
message = f"任务:{flow.name}\n状态:执行失败\n错误:{error_message}"
|
||||
create_notification(
|
||||
title=target_name,
|
||||
message=message,
|
||||
level=NotificationLevel.HIGH
|
||||
)
|
||||
logger.error(f"✓ 扫描失败通知已发送 - Target: {target_name}, Flow: {flow.name}, Error: {error_message}")
|
||||
except Exception as e:
|
||||
logger.error(f"发送扫描失败通知失败 - Flow: {flow.name}: {e}")
|
||||
@@ -1,195 +0,0 @@
|
||||
from django.db import models
|
||||
from django.contrib.postgres.fields import ArrayField
|
||||
|
||||
from ..common.definitions import ScanStatus
|
||||
|
||||
|
||||
|
||||
|
||||
class SoftDeleteManager(models.Manager):
|
||||
"""软删除管理器:默认只返回未删除的记录"""
|
||||
|
||||
def get_queryset(self):
|
||||
return super().get_queryset().filter(deleted_at__isnull=True)
|
||||
|
||||
|
||||
class Scan(models.Model):
|
||||
"""扫描任务模型"""
|
||||
|
||||
id = models.AutoField(primary_key=True)
|
||||
|
||||
target = models.ForeignKey('targets.Target', on_delete=models.CASCADE, related_name='scans', help_text='扫描目标')
|
||||
|
||||
# 多引擎支持字段
|
||||
engine_ids = ArrayField(
|
||||
models.IntegerField(),
|
||||
default=list,
|
||||
help_text='引擎 ID 列表'
|
||||
)
|
||||
engine_names = models.JSONField(
|
||||
default=list,
|
||||
help_text='引擎名称列表,如 ["引擎A", "引擎B"]'
|
||||
)
|
||||
merged_configuration = models.TextField(
|
||||
default='',
|
||||
help_text='合并后的 YAML 配置'
|
||||
)
|
||||
|
||||
created_at = models.DateTimeField(auto_now_add=True, help_text='任务创建时间')
|
||||
stopped_at = models.DateTimeField(null=True, blank=True, help_text='扫描结束时间')
|
||||
|
||||
status = models.CharField(
|
||||
max_length=20,
|
||||
choices=ScanStatus.choices,
|
||||
default=ScanStatus.INITIATED,
|
||||
db_index=True,
|
||||
help_text='任务状态'
|
||||
)
|
||||
|
||||
results_dir = models.CharField(max_length=100, blank=True, default='', help_text='结果存储目录')
|
||||
|
||||
container_ids = ArrayField(
|
||||
models.CharField(max_length=100),
|
||||
blank=True,
|
||||
default=list,
|
||||
help_text='容器 ID 列表(Docker Container ID)'
|
||||
)
|
||||
|
||||
worker = models.ForeignKey(
|
||||
'engine.WorkerNode',
|
||||
on_delete=models.SET_NULL,
|
||||
related_name='scans',
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text='执行扫描的 Worker 节点'
|
||||
)
|
||||
|
||||
error_message = models.CharField(max_length=2000, blank=True, default='', help_text='错误信息')
|
||||
|
||||
# ==================== 软删除字段 ====================
|
||||
deleted_at = models.DateTimeField(null=True, blank=True, db_index=True, help_text='删除时间(NULL表示未删除)')
|
||||
|
||||
# ==================== 管理器 ====================
|
||||
objects = SoftDeleteManager() # 默认管理器:只返回未删除的记录
|
||||
all_objects = models.Manager() # 全量管理器:包括已删除的记录(用于硬删除)
|
||||
|
||||
# ==================== 进度跟踪字段 ====================
|
||||
progress = models.IntegerField(default=0, help_text='扫描进度 0-100')
|
||||
current_stage = models.CharField(max_length=50, blank=True, default='', help_text='当前扫描阶段')
|
||||
stage_progress = models.JSONField(default=dict, help_text='各阶段进度详情')
|
||||
|
||||
# ==================== 缓存统计字段 ====================
|
||||
cached_subdomains_count = models.IntegerField(default=0, help_text='缓存的子域名数量')
|
||||
cached_websites_count = models.IntegerField(default=0, help_text='缓存的网站数量')
|
||||
cached_endpoints_count = models.IntegerField(default=0, help_text='缓存的端点数量')
|
||||
cached_ips_count = models.IntegerField(default=0, help_text='缓存的IP地址数量')
|
||||
cached_directories_count = models.IntegerField(default=0, help_text='缓存的目录数量')
|
||||
cached_vulns_total = models.IntegerField(default=0, help_text='缓存的漏洞总数')
|
||||
cached_vulns_critical = models.IntegerField(default=0, help_text='缓存的严重漏洞数量')
|
||||
cached_vulns_high = models.IntegerField(default=0, help_text='缓存的高危漏洞数量')
|
||||
cached_vulns_medium = models.IntegerField(default=0, help_text='缓存的中危漏洞数量')
|
||||
cached_vulns_low = models.IntegerField(default=0, help_text='缓存的低危漏洞数量')
|
||||
stats_updated_at = models.DateTimeField(null=True, blank=True, help_text='统计数据最后更新时间')
|
||||
|
||||
class Meta:
|
||||
db_table = 'scan'
|
||||
verbose_name = '扫描任务'
|
||||
verbose_name_plural = '扫描任务'
|
||||
ordering = ['-created_at']
|
||||
indexes = [
|
||||
models.Index(fields=['-created_at']), # 优化按创建时间降序排序(list 查询的默认排序)
|
||||
models.Index(fields=['target']), # 优化按目标查询扫描任务
|
||||
models.Index(fields=['deleted_at', '-created_at']), # 软删除 + 时间索引
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"Scan #{self.id} - {self.target.name}"
|
||||
|
||||
|
||||
class ScheduledScan(models.Model):
|
||||
"""
|
||||
定时扫描任务模型
|
||||
|
||||
调度机制:
|
||||
- APScheduler 每分钟检查 next_run_time
|
||||
- 到期任务通过 task_distributor 分发到 Worker 执行
|
||||
- 支持 cron 表达式进行灵活调度
|
||||
|
||||
扫描模式(二选一):
|
||||
- 组织扫描:设置 organization,执行时动态获取组织下所有目标
|
||||
- 目标扫描:设置 target,扫描单个目标
|
||||
- organization 优先级高于 target
|
||||
"""
|
||||
|
||||
id = models.AutoField(primary_key=True)
|
||||
|
||||
# 基本信息
|
||||
name = models.CharField(max_length=200, help_text='任务名称')
|
||||
|
||||
# 多引擎支持字段
|
||||
engine_ids = ArrayField(
|
||||
models.IntegerField(),
|
||||
default=list,
|
||||
help_text='引擎 ID 列表'
|
||||
)
|
||||
engine_names = models.JSONField(
|
||||
default=list,
|
||||
help_text='引擎名称列表,如 ["引擎A", "引擎B"]'
|
||||
)
|
||||
merged_configuration = models.TextField(
|
||||
default='',
|
||||
help_text='合并后的 YAML 配置'
|
||||
)
|
||||
|
||||
# 关联的组织(组织扫描模式:执行时动态获取组织下所有目标)
|
||||
organization = models.ForeignKey(
|
||||
'targets.Organization',
|
||||
on_delete=models.CASCADE,
|
||||
related_name='scheduled_scans',
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text='扫描组织(设置后执行时动态获取组织下所有目标)'
|
||||
)
|
||||
|
||||
# 关联的目标(目标扫描模式:扫描单个目标)
|
||||
target = models.ForeignKey(
|
||||
'targets.Target',
|
||||
on_delete=models.CASCADE,
|
||||
related_name='scheduled_scans',
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text='扫描单个目标(与 organization 二选一)'
|
||||
)
|
||||
|
||||
# 调度配置 - 直接使用 Cron 表达式
|
||||
cron_expression = models.CharField(
|
||||
max_length=100,
|
||||
default='0 2 * * *',
|
||||
help_text='Cron 表达式,格式:分 时 日 月 周'
|
||||
)
|
||||
|
||||
# 状态
|
||||
is_enabled = models.BooleanField(default=True, db_index=True, help_text='是否启用')
|
||||
|
||||
# 执行统计
|
||||
run_count = models.IntegerField(default=0, help_text='已执行次数')
|
||||
last_run_time = models.DateTimeField(null=True, blank=True, help_text='上次执行时间')
|
||||
next_run_time = models.DateTimeField(null=True, blank=True, help_text='下次执行时间')
|
||||
|
||||
# 时间戳
|
||||
created_at = models.DateTimeField(auto_now_add=True, help_text='创建时间')
|
||||
updated_at = models.DateTimeField(auto_now=True, help_text='更新时间')
|
||||
|
||||
class Meta:
|
||||
db_table = 'scheduled_scan'
|
||||
verbose_name = '定时扫描任务'
|
||||
verbose_name_plural = '定时扫描任务'
|
||||
ordering = ['-created_at']
|
||||
indexes = [
|
||||
models.Index(fields=['-created_at']),
|
||||
models.Index(fields=['is_enabled', '-created_at']),
|
||||
models.Index(fields=['name']), # 优化 name 搜索
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"ScheduledScan #{self.id} - {self.name}"
|
||||
@@ -1,189 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
扫描任务启动脚本
|
||||
|
||||
用于动态扫描容器启动时执行。
|
||||
必须在 Django 导入之前获取配置并设置环境变量。
|
||||
"""
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
|
||||
|
||||
def diagnose_prefect_environment():
|
||||
"""诊断 Prefect 运行环境,输出详细信息用于排查问题"""
|
||||
print("\n" + "="*60)
|
||||
print("Prefect 环境诊断")
|
||||
print("="*60)
|
||||
|
||||
# 1. 检查 Prefect 相关环境变量
|
||||
print("\n[诊断] Prefect 环境变量:")
|
||||
prefect_vars = [
|
||||
'PREFECT_HOME',
|
||||
'PREFECT_API_URL',
|
||||
'PREFECT_SERVER_EPHEMERAL_ENABLED',
|
||||
'PREFECT_SERVER_EPHEMERAL_STARTUP_TIMEOUT_SECONDS',
|
||||
'PREFECT_SERVER_DATABASE_CONNECTION_URL',
|
||||
'PREFECT_LOGGING_LEVEL',
|
||||
'PREFECT_DEBUG_MODE',
|
||||
]
|
||||
for var in prefect_vars:
|
||||
value = os.environ.get(var, 'NOT SET')
|
||||
print(f" {var}={value}")
|
||||
|
||||
# 2. 检查 PREFECT_HOME 目录
|
||||
prefect_home = os.environ.get('PREFECT_HOME', os.path.expanduser('~/.prefect'))
|
||||
print(f"\n[诊断] PREFECT_HOME 目录: {prefect_home}")
|
||||
if os.path.exists(prefect_home):
|
||||
print(f" ✓ 目录存在")
|
||||
print(f" 可写: {os.access(prefect_home, os.W_OK)}")
|
||||
try:
|
||||
files = os.listdir(prefect_home)
|
||||
print(f" 文件列表: {files[:10]}{'...' if len(files) > 10 else ''}")
|
||||
except Exception as e:
|
||||
print(f" ✗ 无法列出文件: {e}")
|
||||
else:
|
||||
print(f" 目录不存在,尝试创建...")
|
||||
try:
|
||||
os.makedirs(prefect_home, exist_ok=True)
|
||||
print(f" ✓ 创建成功")
|
||||
except Exception as e:
|
||||
print(f" ✗ 创建失败: {e}")
|
||||
|
||||
# 3. 检查 uvicorn 是否可用
|
||||
print(f"\n[诊断] uvicorn 可用性:")
|
||||
import shutil
|
||||
uvicorn_path = shutil.which('uvicorn')
|
||||
if uvicorn_path:
|
||||
print(f" ✓ uvicorn 路径: {uvicorn_path}")
|
||||
else:
|
||||
print(f" ✗ uvicorn 不在 PATH 中")
|
||||
print(f" PATH: {os.environ.get('PATH', 'NOT SET')}")
|
||||
|
||||
# 4. 检查 Prefect 版本
|
||||
print(f"\n[诊断] Prefect 版本:")
|
||||
try:
|
||||
import prefect
|
||||
print(f" ✓ prefect=={prefect.__version__}")
|
||||
except Exception as e:
|
||||
print(f" ✗ 无法导入 prefect: {e}")
|
||||
|
||||
# 5. 检查 SQLite 支持
|
||||
print(f"\n[诊断] SQLite 支持:")
|
||||
try:
|
||||
import sqlite3
|
||||
print(f" ✓ sqlite3 版本: {sqlite3.sqlite_version}")
|
||||
# 测试创建数据库
|
||||
test_db = os.path.join(prefect_home, 'test.db')
|
||||
conn = sqlite3.connect(test_db)
|
||||
conn.execute('CREATE TABLE IF NOT EXISTS test (id INTEGER)')
|
||||
conn.close()
|
||||
os.remove(test_db)
|
||||
print(f" ✓ SQLite 读写测试通过")
|
||||
except Exception as e:
|
||||
print(f" ✗ SQLite 测试失败: {e}")
|
||||
|
||||
# 6. 检查端口绑定能力
|
||||
print(f"\n[诊断] 端口绑定测试:")
|
||||
try:
|
||||
import socket
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.bind(('127.0.0.1', 0))
|
||||
port = sock.getsockname()[1]
|
||||
sock.close()
|
||||
print(f" ✓ 可以绑定 127.0.0.1 端口 (测试端口: {port})")
|
||||
except Exception as e:
|
||||
print(f" ✗ 端口绑定失败: {e}")
|
||||
|
||||
# 7. 检查内存情况
|
||||
print(f"\n[诊断] 系统资源:")
|
||||
try:
|
||||
import psutil
|
||||
mem = psutil.virtual_memory()
|
||||
print(f" 内存总量: {mem.total / 1024 / 1024:.0f} MB")
|
||||
print(f" 可用内存: {mem.available / 1024 / 1024:.0f} MB")
|
||||
print(f" 内存使用率: {mem.percent}%")
|
||||
except ImportError:
|
||||
print(f" psutil 未安装,跳过内存检查")
|
||||
except Exception as e:
|
||||
print(f" ✗ 资源检查失败: {e}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("诊断完成")
|
||||
print("="*60 + "\n")
|
||||
|
||||
|
||||
def main():
|
||||
print("="*60)
|
||||
print("run_initiate_scan.py 启动")
|
||||
print(f" Python: {sys.version}")
|
||||
print(f" CWD: {os.getcwd()}")
|
||||
print(f" SERVER_URL: {os.environ.get('SERVER_URL', 'NOT SET')}")
|
||||
print("="*60)
|
||||
|
||||
# 1. 从配置中心获取配置并初始化 Django(必须在 Django 导入之前)
|
||||
print("[1/4] 从配置中心获取配置...")
|
||||
try:
|
||||
from apps.common.container_bootstrap import fetch_config_and_setup_django
|
||||
fetch_config_and_setup_django()
|
||||
print("[1/4] ✓ 配置获取成功")
|
||||
except Exception as e:
|
||||
print(f"[1/4] ✗ 配置获取失败: {e}")
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# 2. 解析命令行参数
|
||||
print("[2/4] 解析命令行参数...")
|
||||
parser = argparse.ArgumentParser(description="执行扫描初始化 Flow")
|
||||
parser.add_argument("--scan_id", type=int, required=True, help="扫描任务 ID")
|
||||
parser.add_argument("--target_name", type=str, required=True, help="目标名称")
|
||||
parser.add_argument("--target_id", type=int, required=True, help="目标 ID")
|
||||
parser.add_argument("--scan_workspace_dir", type=str, required=True, help="扫描工作目录")
|
||||
parser.add_argument("--engine_name", type=str, required=True, help="引擎名称")
|
||||
parser.add_argument("--scheduled_scan_name", type=str, default=None, help="定时扫描任务名称(可选)")
|
||||
|
||||
args = parser.parse_args()
|
||||
print(f"[2/4] ✓ 参数解析成功:")
|
||||
print(f" scan_id: {args.scan_id}")
|
||||
print(f" target_name: {args.target_name}")
|
||||
print(f" target_id: {args.target_id}")
|
||||
print(f" scan_workspace_dir: {args.scan_workspace_dir}")
|
||||
print(f" engine_name: {args.engine_name}")
|
||||
print(f" scheduled_scan_name: {args.scheduled_scan_name}")
|
||||
|
||||
# 2.5. 运行 Prefect 环境诊断(仅在 DEBUG 模式下)
|
||||
if os.environ.get('DEBUG', '').lower() == 'true':
|
||||
diagnose_prefect_environment()
|
||||
|
||||
# 3. 现在可以安全导入 Django 相关模块
|
||||
print("[3/4] 导入 initiate_scan_flow...")
|
||||
try:
|
||||
from apps.scan.flows.initiate_scan_flow import initiate_scan_flow
|
||||
print("[3/4] ✓ 导入成功")
|
||||
except Exception as e:
|
||||
print(f"[3/4] ✗ 导入失败: {e}")
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# 4. 执行 Flow
|
||||
print("[4/4] 执行 initiate_scan_flow...")
|
||||
try:
|
||||
result = initiate_scan_flow(
|
||||
scan_id=args.scan_id,
|
||||
target_name=args.target_name,
|
||||
target_id=args.target_id,
|
||||
scan_workspace_dir=args.scan_workspace_dir,
|
||||
engine_name=args.engine_name,
|
||||
scheduled_scan_name=args.scheduled_scan_name,
|
||||
)
|
||||
print("[4/4] ✓ Flow 执行完成")
|
||||
print(f"结果: {result}")
|
||||
except Exception as e:
|
||||
print(f"[4/4] ✗ Flow 执行失败: {e}")
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,266 +0,0 @@
|
||||
from rest_framework import serializers
|
||||
from django.db.models import Count
|
||||
|
||||
from .models import Scan, ScheduledScan
|
||||
|
||||
|
||||
class ScanSerializer(serializers.ModelSerializer):
|
||||
"""扫描任务序列化器"""
|
||||
target_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Scan
|
||||
fields = [
|
||||
'id', 'target', 'target_name', 'engine_ids', 'engine_names',
|
||||
'created_at', 'stopped_at', 'status', 'results_dir',
|
||||
'container_ids', 'error_message'
|
||||
]
|
||||
read_only_fields = [
|
||||
'id', 'created_at', 'stopped_at', 'results_dir',
|
||||
'container_ids', 'error_message', 'status'
|
||||
]
|
||||
|
||||
def get_target_name(self, obj):
|
||||
"""获取目标名称"""
|
||||
return obj.target.name if obj.target else None
|
||||
|
||||
|
||||
class ScanHistorySerializer(serializers.ModelSerializer):
|
||||
"""扫描历史列表专用序列化器
|
||||
|
||||
为前端扫描历史页面提供优化的数据格式,包括:
|
||||
- 扫描汇总统计(子域名、端点、漏洞数量)
|
||||
- 进度百分比和当前阶段
|
||||
- 执行节点信息
|
||||
"""
|
||||
|
||||
# 字段映射
|
||||
target_name = serializers.CharField(source='target.name', read_only=True)
|
||||
worker_name = serializers.CharField(source='worker.name', read_only=True, allow_null=True)
|
||||
|
||||
# 计算字段
|
||||
summary = serializers.SerializerMethodField()
|
||||
|
||||
# 进度跟踪字段(直接从模型读取)
|
||||
progress = serializers.IntegerField(read_only=True)
|
||||
current_stage = serializers.CharField(read_only=True)
|
||||
stage_progress = serializers.JSONField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Scan
|
||||
fields = [
|
||||
'id', 'target', 'target_name', 'engine_ids', 'engine_names',
|
||||
'worker_name', 'created_at', 'status', 'error_message', 'summary',
|
||||
'progress', 'current_stage', 'stage_progress'
|
||||
]
|
||||
|
||||
def get_summary(self, obj):
|
||||
"""获取扫描汇总数据。
|
||||
|
||||
设计原则:
|
||||
- 子域名/网站/端点/IP/目录使用缓存字段(避免实时 COUNT)
|
||||
- 漏洞统计使用 Scan 上的缓存字段,在扫描结束时统一聚合
|
||||
"""
|
||||
# 1. 使用缓存字段构建基础统计(子域名、网站、端点、IP、目录)
|
||||
summary = {
|
||||
'subdomains': obj.cached_subdomains_count or 0,
|
||||
'websites': obj.cached_websites_count or 0,
|
||||
'endpoints': obj.cached_endpoints_count or 0,
|
||||
'ips': obj.cached_ips_count or 0,
|
||||
'directories': obj.cached_directories_count or 0,
|
||||
}
|
||||
|
||||
# 2. 使用 Scan 模型上的缓存漏洞统计(按严重性聚合)
|
||||
summary['vulnerabilities'] = {
|
||||
'total': obj.cached_vulns_total or 0,
|
||||
'critical': obj.cached_vulns_critical or 0,
|
||||
'high': obj.cached_vulns_high or 0,
|
||||
'medium': obj.cached_vulns_medium or 0,
|
||||
'low': obj.cached_vulns_low or 0,
|
||||
}
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
class QuickScanSerializer(serializers.Serializer):
|
||||
"""
|
||||
快速扫描序列化器
|
||||
|
||||
功能:
|
||||
- 接收目标列表和引擎配置
|
||||
- 自动创建/获取目标
|
||||
- 立即发起扫描
|
||||
"""
|
||||
|
||||
# 批量创建的最大数量限制
|
||||
MAX_BATCH_SIZE = 1000
|
||||
|
||||
# 目标列表
|
||||
targets = serializers.ListField(
|
||||
child=serializers.DictField(),
|
||||
help_text='目标列表,每个目标包含 name 字段'
|
||||
)
|
||||
|
||||
# 扫描引擎 ID 列表
|
||||
engine_ids = serializers.ListField(
|
||||
child=serializers.IntegerField(),
|
||||
required=True,
|
||||
help_text='使用的扫描引擎 ID 列表 (必填)'
|
||||
)
|
||||
|
||||
def validate_targets(self, value):
|
||||
"""验证目标列表"""
|
||||
if not value:
|
||||
raise serializers.ValidationError("目标列表不能为空")
|
||||
|
||||
# 检查数量限制,防止服务器过载
|
||||
if len(value) > self.MAX_BATCH_SIZE:
|
||||
raise serializers.ValidationError(
|
||||
f"快速扫描最多支持 {self.MAX_BATCH_SIZE} 个目标,当前提交了 {len(value)} 个"
|
||||
)
|
||||
|
||||
# 验证每个目标的必填字段
|
||||
for idx, target in enumerate(value):
|
||||
if 'name' not in target:
|
||||
raise serializers.ValidationError(f"第 {idx + 1} 个目标缺少 name 字段")
|
||||
if not target['name']:
|
||||
raise serializers.ValidationError(f"第 {idx + 1} 个目标的 name 不能为空")
|
||||
|
||||
return value
|
||||
|
||||
def validate_engine_ids(self, value):
|
||||
"""验证引擎 ID 列表"""
|
||||
if not value:
|
||||
raise serializers.ValidationError("engine_ids 不能为空")
|
||||
return value
|
||||
|
||||
|
||||
# ==================== 定时扫描序列化器 ====================
|
||||
|
||||
class ScheduledScanSerializer(serializers.ModelSerializer):
|
||||
"""定时扫描任务序列化器(用于列表和详情)"""
|
||||
|
||||
# 关联字段
|
||||
organization_id = serializers.IntegerField(source='organization.id', read_only=True, allow_null=True)
|
||||
organization_name = serializers.CharField(source='organization.name', read_only=True, allow_null=True)
|
||||
target_id = serializers.IntegerField(source='target.id', read_only=True, allow_null=True)
|
||||
target_name = serializers.CharField(source='target.name', read_only=True, allow_null=True)
|
||||
scan_mode = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = ScheduledScan
|
||||
fields = [
|
||||
'id', 'name',
|
||||
'engine_ids', 'engine_names',
|
||||
'organization_id', 'organization_name',
|
||||
'target_id', 'target_name',
|
||||
'scan_mode',
|
||||
'cron_expression',
|
||||
'is_enabled',
|
||||
'run_count', 'last_run_time', 'next_run_time',
|
||||
'created_at', 'updated_at'
|
||||
]
|
||||
read_only_fields = [
|
||||
'id', 'run_count',
|
||||
'last_run_time', 'next_run_time',
|
||||
'created_at', 'updated_at'
|
||||
]
|
||||
|
||||
def get_scan_mode(self, obj):
|
||||
"""获取扫描模式:organization 或 target"""
|
||||
return 'organization' if obj.organization_id else 'target'
|
||||
|
||||
|
||||
class CreateScheduledScanSerializer(serializers.Serializer):
|
||||
"""创建定时扫描任务序列化器
|
||||
|
||||
扫描模式(二选一):
|
||||
- 组织扫描:提供 organization_id,执行时动态获取组织下所有目标
|
||||
- 目标扫描:提供 target_id,扫描单个目标
|
||||
"""
|
||||
|
||||
name = serializers.CharField(max_length=200, help_text='任务名称')
|
||||
engine_ids = serializers.ListField(
|
||||
child=serializers.IntegerField(),
|
||||
help_text='扫描引擎 ID 列表'
|
||||
)
|
||||
|
||||
# 组织扫描模式
|
||||
organization_id = serializers.IntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
help_text='组织 ID(组织扫描模式:执行时动态获取组织下所有目标)'
|
||||
)
|
||||
|
||||
# 目标扫描模式
|
||||
target_id = serializers.IntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
help_text='目标 ID(目标扫描模式:扫描单个目标)'
|
||||
)
|
||||
|
||||
cron_expression = serializers.CharField(
|
||||
max_length=100,
|
||||
default='0 2 * * *',
|
||||
help_text='Cron 表达式,格式:分 时 日 月 周'
|
||||
)
|
||||
is_enabled = serializers.BooleanField(default=True, help_text='是否立即启用')
|
||||
|
||||
def validate_engine_ids(self, value):
|
||||
"""验证引擎 ID 列表"""
|
||||
if not value:
|
||||
raise serializers.ValidationError("engine_ids 不能为空")
|
||||
return value
|
||||
|
||||
def validate(self, data):
|
||||
"""验证 organization_id 和 target_id 互斥"""
|
||||
organization_id = data.get('organization_id')
|
||||
target_id = data.get('target_id')
|
||||
|
||||
if not organization_id and not target_id:
|
||||
raise serializers.ValidationError('必须提供 organization_id 或 target_id 其中之一')
|
||||
|
||||
if organization_id and target_id:
|
||||
raise serializers.ValidationError('organization_id 和 target_id 只能提供其中之一')
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class UpdateScheduledScanSerializer(serializers.Serializer):
|
||||
"""更新定时扫描任务序列化器"""
|
||||
|
||||
name = serializers.CharField(max_length=200, required=False, help_text='任务名称')
|
||||
engine_ids = serializers.ListField(
|
||||
child=serializers.IntegerField(),
|
||||
required=False,
|
||||
help_text='扫描引擎 ID 列表'
|
||||
)
|
||||
|
||||
# 组织扫描模式
|
||||
organization_id = serializers.IntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
help_text='组织 ID(设置后清空 target_id)'
|
||||
)
|
||||
|
||||
# 目标扫描模式
|
||||
target_id = serializers.IntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
help_text='目标 ID(设置后清空 organization_id)'
|
||||
)
|
||||
|
||||
cron_expression = serializers.CharField(max_length=100, required=False, help_text='Cron 表达式')
|
||||
is_enabled = serializers.BooleanField(required=False, help_text='是否启用')
|
||||
|
||||
def validate_engine_ids(self, value):
|
||||
"""验证引擎 ID 列表"""
|
||||
if value is not None and not value:
|
||||
raise serializers.ValidationError("engine_ids 不能为空")
|
||||
return value
|
||||
|
||||
|
||||
class ToggleScheduledScanSerializer(serializers.Serializer):
|
||||
"""切换定时扫描启用状态序列化器"""
|
||||
|
||||
is_enabled = serializers.BooleanField(help_text='是否启用')
|
||||
@@ -1,82 +0,0 @@
|
||||
"""
|
||||
黑名单过滤服务
|
||||
|
||||
过滤敏感域名(如 .gov、.edu、.mil 等)
|
||||
|
||||
当前版本使用默认规则,后续将支持从前端配置加载。
|
||||
"""
|
||||
|
||||
from typing import List, Optional
|
||||
from django.db.models import QuerySet
|
||||
import re
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlacklistService:
|
||||
"""
|
||||
黑名单过滤服务 - 过滤敏感域名
|
||||
|
||||
TODO: 后续版本支持从前端配置加载黑名单规则
|
||||
- 用户在开始扫描时配置黑名单 URL、域名、IP
|
||||
- 黑名单规则存储在数据库中,与 Scan 或 Engine 关联
|
||||
"""
|
||||
|
||||
# 默认黑名单正则规则
|
||||
DEFAULT_PATTERNS = [
|
||||
r'\.gov$', # .gov 结尾
|
||||
r'\.gov\.[a-z]{2}$', # .gov.cn, .gov.uk 等
|
||||
]
|
||||
|
||||
def __init__(self, patterns: Optional[List[str]] = None):
|
||||
"""
|
||||
初始化黑名单服务
|
||||
|
||||
Args:
|
||||
patterns: 正则表达式列表,None 使用默认规则
|
||||
"""
|
||||
self.patterns = patterns or self.DEFAULT_PATTERNS
|
||||
self._compiled_patterns = [re.compile(p) for p in self.patterns]
|
||||
|
||||
def filter_queryset(
|
||||
self,
|
||||
queryset: QuerySet,
|
||||
url_field: str = 'url'
|
||||
) -> QuerySet:
|
||||
"""
|
||||
数据库层面过滤 queryset
|
||||
|
||||
使用 PostgreSQL 正则表达式排除黑名单 URL
|
||||
|
||||
Args:
|
||||
queryset: 原始 queryset
|
||||
url_field: URL 字段名
|
||||
|
||||
Returns:
|
||||
QuerySet: 过滤后的 queryset
|
||||
"""
|
||||
for pattern in self.patterns:
|
||||
queryset = queryset.exclude(**{f'{url_field}__regex': pattern})
|
||||
return queryset
|
||||
|
||||
def filter_url(self, url: str) -> bool:
|
||||
"""
|
||||
检查单个 URL 是否通过黑名单过滤
|
||||
|
||||
Args:
|
||||
url: 要检查的 URL
|
||||
|
||||
Returns:
|
||||
bool: True 表示通过(不在黑名单),False 表示被过滤
|
||||
"""
|
||||
for pattern in self._compiled_patterns:
|
||||
if pattern.search(url):
|
||||
return False
|
||||
return True
|
||||
|
||||
# TODO: 后续版本实现
|
||||
# @classmethod
|
||||
# def from_scan(cls, scan_id: int) -> 'BlacklistService':
|
||||
# """从数据库加载扫描配置的黑名单规则"""
|
||||
# pass
|
||||
@@ -1,295 +0,0 @@
|
||||
"""
|
||||
快速扫描服务
|
||||
|
||||
负责解析用户输入(URL、域名、IP、CIDR)并创建对应的资产数据
|
||||
"""
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Literal, List, Dict, Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from django.db import transaction
|
||||
|
||||
from apps.common.validators import validate_url, detect_input_type, validate_domain, validate_ip, validate_cidr, is_valid_ip
|
||||
from apps.targets.services.target_service import TargetService
|
||||
from apps.targets.models import Target
|
||||
from apps.asset.dtos import WebSiteDTO
|
||||
from apps.asset.dtos.asset import EndpointDTO
|
||||
from apps.asset.repositories.asset.website_repository import DjangoWebSiteRepository
|
||||
from apps.asset.repositories.asset.endpoint_repository import DjangoEndpointRepository
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedInputDTO:
|
||||
"""
|
||||
解析输入 DTO
|
||||
|
||||
只在快速扫描流程中使用
|
||||
"""
|
||||
original_input: str
|
||||
input_type: Literal['url', 'domain', 'ip', 'cidr']
|
||||
target_name: str # host/domain/ip/cidr
|
||||
target_type: Literal['domain', 'ip', 'cidr']
|
||||
website_url: Optional[str] = None # 根 URL(scheme://host[:port])
|
||||
endpoint_url: Optional[str] = None # 完整 URL(含路径)
|
||||
is_valid: bool = True
|
||||
error: Optional[str] = None
|
||||
line_number: Optional[int] = None
|
||||
|
||||
|
||||
class QuickScanService:
|
||||
"""快速扫描服务 - 解析输入并创建资产"""
|
||||
|
||||
def __init__(self):
|
||||
self.target_service = TargetService()
|
||||
self.website_repo = DjangoWebSiteRepository()
|
||||
self.endpoint_repo = DjangoEndpointRepository()
|
||||
|
||||
def parse_inputs(self, inputs: List[str]) -> List[ParsedInputDTO]:
|
||||
"""
|
||||
解析多行输入
|
||||
|
||||
Args:
|
||||
inputs: 输入字符串列表(每行一个)
|
||||
|
||||
Returns:
|
||||
解析结果列表(跳过空行)
|
||||
"""
|
||||
results = []
|
||||
for line_number, input_str in enumerate(inputs, start=1):
|
||||
input_str = input_str.strip()
|
||||
|
||||
# 空行跳过
|
||||
if not input_str:
|
||||
continue
|
||||
|
||||
try:
|
||||
# 检测输入类型
|
||||
input_type = detect_input_type(input_str)
|
||||
|
||||
if input_type == 'url':
|
||||
dto = self._parse_url_input(input_str, line_number)
|
||||
else:
|
||||
dto = self._parse_target_input(input_str, input_type, line_number)
|
||||
|
||||
results.append(dto)
|
||||
except ValueError as e:
|
||||
# 解析失败,记录错误
|
||||
results.append(ParsedInputDTO(
|
||||
original_input=input_str,
|
||||
input_type='domain', # 默认类型
|
||||
target_name=input_str,
|
||||
target_type='domain',
|
||||
is_valid=False,
|
||||
error=str(e),
|
||||
line_number=line_number
|
||||
))
|
||||
|
||||
return results
|
||||
|
||||
def _parse_url_input(self, url_str: str, line_number: int) -> ParsedInputDTO:
|
||||
"""
|
||||
解析 URL 输入
|
||||
|
||||
Args:
|
||||
url_str: URL 字符串
|
||||
line_number: 行号
|
||||
|
||||
Returns:
|
||||
ParsedInputDTO
|
||||
"""
|
||||
# 验证 URL 格式
|
||||
validate_url(url_str)
|
||||
|
||||
# 使用标准库解析
|
||||
parsed = urlparse(url_str)
|
||||
|
||||
host = parsed.hostname # 不含端口
|
||||
has_path = parsed.path and parsed.path != '/'
|
||||
|
||||
# 构建 root_url: scheme://host[:port]
|
||||
root_url = f"{parsed.scheme}://{parsed.netloc}"
|
||||
|
||||
# 检测 host 类型(domain 或 ip)
|
||||
target_type = 'ip' if is_valid_ip(host) else 'domain'
|
||||
|
||||
return ParsedInputDTO(
|
||||
original_input=url_str,
|
||||
input_type='url',
|
||||
target_name=host,
|
||||
target_type=target_type,
|
||||
website_url=root_url,
|
||||
endpoint_url=url_str if has_path else None,
|
||||
line_number=line_number
|
||||
)
|
||||
|
||||
def _parse_target_input(
|
||||
self,
|
||||
input_str: str,
|
||||
input_type: str,
|
||||
line_number: int
|
||||
) -> ParsedInputDTO:
|
||||
"""
|
||||
解析非 URL 输入(domain/ip/cidr)
|
||||
|
||||
Args:
|
||||
input_str: 输入字符串
|
||||
input_type: 输入类型
|
||||
line_number: 行号
|
||||
|
||||
Returns:
|
||||
ParsedInputDTO
|
||||
"""
|
||||
# 验证格式
|
||||
if input_type == 'domain':
|
||||
validate_domain(input_str)
|
||||
target_type = 'domain'
|
||||
elif input_type == 'ip':
|
||||
validate_ip(input_str)
|
||||
target_type = 'ip'
|
||||
elif input_type == 'cidr':
|
||||
validate_cidr(input_str)
|
||||
target_type = 'cidr'
|
||||
else:
|
||||
raise ValueError(f"未知的输入类型: {input_type}")
|
||||
|
||||
return ParsedInputDTO(
|
||||
original_input=input_str,
|
||||
input_type=input_type,
|
||||
target_name=input_str,
|
||||
target_type=target_type,
|
||||
website_url=None,
|
||||
endpoint_url=None,
|
||||
line_number=line_number
|
||||
)
|
||||
|
||||
@transaction.atomic
|
||||
def process_quick_scan(
|
||||
self,
|
||||
inputs: List[str],
|
||||
engine_id: int
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
处理快速扫描请求
|
||||
|
||||
Args:
|
||||
inputs: 输入字符串列表
|
||||
engine_id: 扫描引擎 ID
|
||||
|
||||
Returns:
|
||||
处理结果字典
|
||||
"""
|
||||
# 1. 解析输入
|
||||
parsed_inputs = self.parse_inputs(inputs)
|
||||
|
||||
# 分离有效和无效输入
|
||||
valid_inputs = [p for p in parsed_inputs if p.is_valid]
|
||||
invalid_inputs = [p for p in parsed_inputs if not p.is_valid]
|
||||
|
||||
if not valid_inputs:
|
||||
return {
|
||||
'targets': [],
|
||||
'target_stats': {'created': 0, 'reused': 0, 'failed': len(invalid_inputs)},
|
||||
'asset_stats': {'websites_created': 0, 'endpoints_created': 0},
|
||||
'errors': [
|
||||
{'line_number': p.line_number, 'input': p.original_input, 'error': p.error}
|
||||
for p in invalid_inputs
|
||||
]
|
||||
}
|
||||
|
||||
# 2. 创建资产
|
||||
asset_result = self.create_assets_from_parsed_inputs(valid_inputs)
|
||||
|
||||
# 3. 返回结果
|
||||
return {
|
||||
'targets': asset_result['targets'],
|
||||
'target_stats': asset_result['target_stats'],
|
||||
'asset_stats': asset_result['asset_stats'],
|
||||
'errors': [
|
||||
{'line_number': p.line_number, 'input': p.original_input, 'error': p.error}
|
||||
for p in invalid_inputs
|
||||
]
|
||||
}
|
||||
|
||||
def create_assets_from_parsed_inputs(
|
||||
self,
|
||||
parsed_inputs: List[ParsedInputDTO]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
从解析结果创建资产
|
||||
|
||||
Args:
|
||||
parsed_inputs: 解析结果列表(只包含有效输入)
|
||||
|
||||
Returns:
|
||||
创建结果字典
|
||||
"""
|
||||
# 1. 收集所有 target 数据(内存操作,去重)
|
||||
targets_data = {}
|
||||
for dto in parsed_inputs:
|
||||
if dto.target_name not in targets_data:
|
||||
targets_data[dto.target_name] = {'name': dto.target_name, 'type': dto.target_type}
|
||||
|
||||
targets_list = list(targets_data.values())
|
||||
|
||||
# 2. 批量创建 Target(复用现有方法)
|
||||
target_result = self.target_service.batch_create_targets(targets_list)
|
||||
|
||||
# 3. 查询刚创建的 Target,建立 name → id 映射
|
||||
target_names = [d['name'] for d in targets_list]
|
||||
targets = Target.objects.filter(name__in=target_names)
|
||||
target_id_map = {t.name: t.id for t in targets}
|
||||
|
||||
# 4. 收集 Website DTO(内存操作,去重)
|
||||
website_dtos = []
|
||||
seen_websites = set()
|
||||
for dto in parsed_inputs:
|
||||
if dto.website_url and dto.website_url not in seen_websites:
|
||||
seen_websites.add(dto.website_url)
|
||||
target_id = target_id_map.get(dto.target_name)
|
||||
if target_id:
|
||||
website_dtos.append(WebSiteDTO(
|
||||
target_id=target_id,
|
||||
url=dto.website_url,
|
||||
host=dto.target_name
|
||||
))
|
||||
|
||||
# 5. 批量创建 Website(存在即跳过)
|
||||
websites_created = 0
|
||||
if website_dtos:
|
||||
websites_created = self.website_repo.bulk_create_ignore_conflicts(website_dtos)
|
||||
|
||||
# 6. 收集 Endpoint DTO(内存操作,去重)
|
||||
endpoint_dtos = []
|
||||
seen_endpoints = set()
|
||||
for dto in parsed_inputs:
|
||||
if dto.endpoint_url and dto.endpoint_url not in seen_endpoints:
|
||||
seen_endpoints.add(dto.endpoint_url)
|
||||
target_id = target_id_map.get(dto.target_name)
|
||||
if target_id:
|
||||
endpoint_dtos.append(EndpointDTO(
|
||||
target_id=target_id,
|
||||
url=dto.endpoint_url,
|
||||
host=dto.target_name
|
||||
))
|
||||
|
||||
# 7. 批量创建 Endpoint(存在即跳过)
|
||||
endpoints_created = 0
|
||||
if endpoint_dtos:
|
||||
endpoints_created = self.endpoint_repo.bulk_create_ignore_conflicts(endpoint_dtos)
|
||||
|
||||
return {
|
||||
'targets': list(targets),
|
||||
'target_stats': {
|
||||
'created': target_result['created_count'],
|
||||
'reused': 0, # bulk_create 无法区分新建和复用
|
||||
'failed': target_result['failed_count']
|
||||
},
|
||||
'asset_stats': {
|
||||
'websites_created': websites_created,
|
||||
'endpoints_created': endpoints_created
|
||||
}
|
||||
}
|
||||
@@ -1,258 +0,0 @@
|
||||
"""
|
||||
扫描任务服务
|
||||
|
||||
负责 Scan 模型的所有业务逻辑
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
from typing import Dict, List, TYPE_CHECKING
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from django.conf import settings
|
||||
from django.db import transaction
|
||||
from django.db.utils import DatabaseError, IntegrityError, OperationalError
|
||||
from django.core.exceptions import ValidationError, ObjectDoesNotExist
|
||||
|
||||
from apps.scan.models import Scan
|
||||
from apps.scan.repositories import DjangoScanRepository
|
||||
from apps.targets.repositories import DjangoTargetRepository, DjangoOrganizationRepository
|
||||
from apps.engine.repositories import DjangoEngineRepository
|
||||
from apps.targets.models import Target
|
||||
from apps.engine.models import ScanEngine
|
||||
from apps.common.definitions import ScanStatus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ScanService:
|
||||
"""
|
||||
扫描任务服务(协调者)
|
||||
|
||||
职责:
|
||||
- 协调各个子服务
|
||||
- 提供统一的公共接口
|
||||
- 保持向后兼容
|
||||
|
||||
注意:
|
||||
- 具体业务逻辑已拆分到子服务
|
||||
- 本类主要负责委托和协调
|
||||
"""
|
||||
|
||||
# 终态集合:这些状态一旦设置,不应该被覆盖
|
||||
FINAL_STATUSES = {
|
||||
ScanStatus.COMPLETED,
|
||||
ScanStatus.FAILED,
|
||||
ScanStatus.CANCELLED
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
初始化服务
|
||||
"""
|
||||
# 初始化子服务
|
||||
from apps.scan.services.scan_creation_service import ScanCreationService
|
||||
from apps.scan.services.scan_state_service import ScanStateService
|
||||
from apps.scan.services.scan_control_service import ScanControlService
|
||||
from apps.scan.services.scan_stats_service import ScanStatsService
|
||||
|
||||
self.creation_service = ScanCreationService()
|
||||
self.state_service = ScanStateService()
|
||||
self.control_service = ScanControlService()
|
||||
self.stats_service = ScanStatsService()
|
||||
|
||||
# 保留 ScanRepository(用于 get_scan 方法)
|
||||
self.scan_repo = DjangoScanRepository()
|
||||
|
||||
def get_scan(self, scan_id: int, prefetch_relations: bool) -> Scan | None:
|
||||
"""
|
||||
获取扫描任务(包含关联对象)
|
||||
|
||||
自动预加载 engine 和 target,避免 N+1 查询问题
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
|
||||
Returns:
|
||||
Scan 对象(包含 engine 和 target)或 None
|
||||
"""
|
||||
return self.scan_repo.get_by_id(scan_id, prefetch_relations)
|
||||
|
||||
def get_all_scans(self, prefetch_relations: bool = True):
|
||||
return self.scan_repo.get_all(prefetch_relations=prefetch_relations)
|
||||
|
||||
def prepare_initiate_scan(
|
||||
self,
|
||||
organization_id: int | None = None,
|
||||
target_id: int | None = None,
|
||||
engine_id: int | None = None
|
||||
) -> tuple[List[Target], ScanEngine]:
|
||||
"""
|
||||
为创建扫描任务做准备,返回所需的目标列表和扫描引擎
|
||||
"""
|
||||
return self.creation_service.prepare_initiate_scan(
|
||||
organization_id, target_id, engine_id
|
||||
)
|
||||
|
||||
def prepare_initiate_scan_multi_engine(
|
||||
self,
|
||||
organization_id: int | None = None,
|
||||
target_id: int | None = None,
|
||||
engine_ids: List[int] | None = None
|
||||
) -> tuple[List[Target], str, List[str], List[int]]:
|
||||
"""
|
||||
为创建多引擎扫描任务做准备
|
||||
|
||||
Returns:
|
||||
(目标列表, 合并配置, 引擎名称列表, 引擎ID列表)
|
||||
"""
|
||||
return self.creation_service.prepare_initiate_scan_multi_engine(
|
||||
organization_id, target_id, engine_ids
|
||||
)
|
||||
|
||||
def create_scans(
|
||||
self,
|
||||
targets: List[Target],
|
||||
engine_ids: List[int],
|
||||
engine_names: List[str],
|
||||
merged_configuration: str,
|
||||
scheduled_scan_name: str | None = None
|
||||
) -> List[Scan]:
|
||||
"""批量创建扫描任务(委托给 ScanCreationService)"""
|
||||
return self.creation_service.create_scans(
|
||||
targets, engine_ids, engine_names, merged_configuration, scheduled_scan_name
|
||||
)
|
||||
|
||||
# ==================== 状态管理方法(委托给 ScanStateService) ====================
|
||||
|
||||
def update_status(
|
||||
self,
|
||||
scan_id: int,
|
||||
status: ScanStatus,
|
||||
error_message: str | None = None,
|
||||
stopped_at: datetime | None = None
|
||||
) -> bool:
|
||||
"""更新 Scan 状态(委托给 ScanStateService)"""
|
||||
return self.state_service.update_status(
|
||||
scan_id, status, error_message, stopped_at
|
||||
)
|
||||
|
||||
def update_status_if_match(
|
||||
self,
|
||||
scan_id: int,
|
||||
current_status: ScanStatus,
|
||||
new_status: ScanStatus,
|
||||
stopped_at: datetime | None = None
|
||||
) -> bool:
|
||||
"""条件更新 Scan 状态(委托给 ScanStateService)"""
|
||||
return self.state_service.update_status_if_match(
|
||||
scan_id, current_status, new_status, stopped_at
|
||||
)
|
||||
|
||||
def update_cached_stats(self, scan_id: int) -> dict | None:
|
||||
"""更新缓存统计数据(委托给 ScanStateService),返回统计数据字典"""
|
||||
return self.state_service.update_cached_stats(scan_id)
|
||||
|
||||
# ==================== 进度跟踪方法(委托给 ScanStateService) ====================
|
||||
|
||||
def init_stage_progress(self, scan_id: int, stages: list[str]) -> bool:
|
||||
"""初始化阶段进度(委托给 ScanStateService)"""
|
||||
return self.state_service.init_stage_progress(scan_id, stages)
|
||||
|
||||
def start_stage(self, scan_id: int, stage: str) -> bool:
|
||||
"""开始执行某个阶段(委托给 ScanStateService)"""
|
||||
return self.state_service.start_stage(scan_id, stage)
|
||||
|
||||
def complete_stage(self, scan_id: int, stage: str, detail: str | None = None) -> bool:
|
||||
"""完成某个阶段(委托给 ScanStateService)"""
|
||||
return self.state_service.complete_stage(scan_id, stage, detail)
|
||||
|
||||
def fail_stage(self, scan_id: int, stage: str, error: str | None = None) -> bool:
|
||||
"""标记某个阶段失败(委托给 ScanStateService)"""
|
||||
return self.state_service.fail_stage(scan_id, stage, error)
|
||||
|
||||
def cancel_running_stages(self, scan_id: int, final_status: str = "cancelled") -> bool:
|
||||
"""取消所有正在运行的阶段(委托给 ScanStateService)"""
|
||||
return self.state_service.cancel_running_stages(scan_id, final_status)
|
||||
|
||||
# TODO:待接入
|
||||
def add_command_to_scan(self, scan_id: int, stage_name: str, tool_name: str, command: str) -> bool:
|
||||
"""
|
||||
增量添加命令到指定扫描阶段
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务ID
|
||||
stage_name: 阶段名称(如 'subdomain_discovery', 'port_scan')
|
||||
tool_name: 工具名称
|
||||
command: 执行命令
|
||||
|
||||
Returns:
|
||||
bool: 是否成功添加
|
||||
"""
|
||||
try:
|
||||
scan = self.get_scan(scan_id, prefetch_relations=False)
|
||||
if not scan:
|
||||
logger.error(f"扫描任务不存在: {scan_id}")
|
||||
return False
|
||||
|
||||
stage_progress = scan.stage_progress or {}
|
||||
|
||||
# 确保指定阶段存在
|
||||
if stage_name not in stage_progress:
|
||||
stage_progress[stage_name] = {'status': 'running', 'commands': []}
|
||||
|
||||
# 确保 commands 列表存在
|
||||
if 'commands' not in stage_progress[stage_name]:
|
||||
stage_progress[stage_name]['commands'] = []
|
||||
|
||||
# 增量添加命令
|
||||
command_entry = f"{tool_name}: {command}"
|
||||
stage_progress[stage_name]['commands'].append(command_entry)
|
||||
|
||||
scan.stage_progress = stage_progress
|
||||
scan.save(update_fields=['stage_progress'])
|
||||
|
||||
command_count = len(stage_progress[stage_name]['commands'])
|
||||
logger.info(f"✓ 记录命令: {stage_name}.{tool_name} (总计: {command_count})")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"记录命令失败: {e}")
|
||||
return False
|
||||
|
||||
# ==================== 删除和控制方法(委托给 ScanControlService) ====================
|
||||
|
||||
def delete_scans_two_phase(self, scan_ids: List[int]) -> dict:
|
||||
"""两阶段删除扫描任务(委托给 ScanControlService)"""
|
||||
return self.control_service.delete_scans_two_phase(scan_ids)
|
||||
|
||||
def stop_scan(self, scan_id: int) -> tuple[bool, int]:
|
||||
"""停止扫描任务(委托给 ScanControlService)"""
|
||||
return self.control_service.stop_scan(scan_id)
|
||||
|
||||
def hard_delete_scans(self, scan_ids: List[int]) -> tuple[int, Dict[str, int]]:
|
||||
"""
|
||||
硬删除扫描任务(真正删除数据)
|
||||
|
||||
用于 Worker 容器中执行,删除已软删除的扫描及其关联数据。
|
||||
|
||||
Args:
|
||||
scan_ids: 扫描任务 ID 列表
|
||||
|
||||
Returns:
|
||||
(删除数量, 详情字典)
|
||||
"""
|
||||
return self.scan_repo.hard_delete_by_ids(scan_ids)
|
||||
|
||||
# ==================== 统计方法(委托给 ScanStatsService) ====================
|
||||
|
||||
def get_statistics(self) -> dict:
|
||||
"""获取扫描统计数据(委托给 ScanStatsService)"""
|
||||
return self.stats_service.get_statistics()
|
||||
|
||||
|
||||
|
||||
# 导出接口
|
||||
__all__ = ['ScanService']
|
||||
@@ -1,364 +0,0 @@
|
||||
"""
|
||||
目标导出服务
|
||||
|
||||
提供统一的目标提取和文件导出功能,支持:
|
||||
- URL 导出(流式写入 + 默认值回退)
|
||||
- 域名/IP 导出(用于端口扫描)
|
||||
- 黑名单过滤集成
|
||||
"""
|
||||
|
||||
import ipaddress
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional, Iterator
|
||||
|
||||
from django.db.models import QuerySet
|
||||
|
||||
from .blacklist_service import BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TargetExportService:
|
||||
"""
|
||||
目标导出服务 - 提供统一的目标提取和文件导出功能
|
||||
|
||||
使用方式:
|
||||
# Task 层决定数据源
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用导出服务
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
result = export_service.export_urls(target_id, output_path, queryset)
|
||||
"""
|
||||
|
||||
def __init__(self, blacklist_service: Optional[BlacklistService] = None):
|
||||
"""
|
||||
初始化导出服务
|
||||
|
||||
Args:
|
||||
blacklist_service: 黑名单过滤服务,None 表示禁用过滤
|
||||
"""
|
||||
self.blacklist_service = blacklist_service
|
||||
|
||||
def export_urls(
|
||||
self,
|
||||
target_id: int,
|
||||
output_path: str,
|
||||
queryset: QuerySet,
|
||||
url_field: str = 'url',
|
||||
batch_size: int = 1000
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
统一 URL 导出函数
|
||||
|
||||
自动判断数据库有无数据:
|
||||
- 有数据:流式写入数据库数据到文件
|
||||
- 无数据:调用默认值生成器生成 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_path: 输出文件路径
|
||||
queryset: 数据源 queryset(由 Task 层构建,应为 values_list flat=True)
|
||||
url_field: URL 字段名(用于黑名单过滤)
|
||||
batch_size: 批次大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_count': int
|
||||
}
|
||||
|
||||
Raises:
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
output_file = Path(output_path)
|
||||
output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
logger.info("开始导出 URL - target_id=%s, output=%s", target_id, output_path)
|
||||
|
||||
# 应用黑名单过滤(数据库层面)
|
||||
if self.blacklist_service:
|
||||
# 注意:queryset 应该是原始 queryset,不是 values_list
|
||||
# 这里假设 Task 层传入的是 values_list,需要在 Task 层处理过滤
|
||||
pass
|
||||
|
||||
total_count = 0
|
||||
try:
|
||||
with open(output_file, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for url in queryset.iterator(chunk_size=batch_size):
|
||||
if url:
|
||||
# Python 层面黑名单过滤
|
||||
if self.blacklist_service and not self.blacklist_service.filter_url(url):
|
||||
continue
|
||||
f.write(f"{url}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个 URL...", total_count)
|
||||
except IOError as e:
|
||||
logger.error("文件写入失败: %s - %s", output_path, e)
|
||||
raise
|
||||
|
||||
# 默认值回退模式
|
||||
if total_count == 0:
|
||||
total_count = self._generate_default_urls(target_id, output_file)
|
||||
|
||||
logger.info("✓ URL 导出完成 - 数量: %d, 文件: %s", total_count, output_path)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_file),
|
||||
'total_count': total_count
|
||||
}
|
||||
|
||||
def _generate_default_urls(
|
||||
self,
|
||||
target_id: int,
|
||||
output_path: Path
|
||||
) -> int:
|
||||
"""
|
||||
默认值生成器(内部函数)
|
||||
|
||||
根据 Target 类型生成默认 URL:
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 http(s)://ip
|
||||
- URL: 直接使用目标 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_path: 输出文件路径
|
||||
|
||||
Returns:
|
||||
int: 写入的 URL 总数
|
||||
"""
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
|
||||
if not target:
|
||||
logger.warning("Target ID %d 不存在,无法生成默认 URL", target_id)
|
||||
return 0
|
||||
|
||||
target_name = target.name
|
||||
target_type = target.type
|
||||
|
||||
logger.info("懒加载模式:Target 类型=%s, 名称=%s", target_type, target_name)
|
||||
|
||||
total_urls = 0
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
urls = [f"http://{target_name}", f"https://{target_name}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
elif target_type == Target.TargetType.IP:
|
||||
urls = [f"http://{target_name}", f"https://{target_name}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
try:
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
|
||||
for ip in network.hosts():
|
||||
urls = [f"http://{ip}", f"https://{ip}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
if total_urls % 10000 == 0:
|
||||
logger.info("已生成 %d 个 URL...", total_urls)
|
||||
|
||||
# /32 或 /128 特殊处理
|
||||
if total_urls == 0:
|
||||
ip = str(network.network_address)
|
||||
urls = [f"http://{ip}", f"https://{ip}"]
|
||||
for url in urls:
|
||||
if self._should_write_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
except ValueError as e:
|
||||
logger.error("CIDR 解析失败: %s - %s", target_name, e)
|
||||
raise ValueError(f"无效的 CIDR: {target_name}") from e
|
||||
|
||||
elif target_type == Target.TargetType.URL:
|
||||
if self._should_write_url(target_name):
|
||||
f.write(f"{target_name}\n")
|
||||
total_urls = 1
|
||||
else:
|
||||
logger.warning("不支持的 Target 类型: %s", target_type)
|
||||
|
||||
logger.info("✓ 懒加载生成默认 URL - 数量: %d", total_urls)
|
||||
return total_urls
|
||||
|
||||
def _should_write_url(self, url: str) -> bool:
|
||||
"""检查 URL 是否应该写入(通过黑名单过滤)"""
|
||||
if self.blacklist_service:
|
||||
return self.blacklist_service.filter_url(url)
|
||||
return True
|
||||
|
||||
def export_targets(
|
||||
self,
|
||||
target_id: int,
|
||||
output_path: str,
|
||||
batch_size: int = 1000
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
域名/IP 导出函数(用于端口扫描)
|
||||
|
||||
根据 Target 类型选择导出逻辑:
|
||||
- DOMAIN: 从 Subdomain 表流式导出子域名
|
||||
- IP: 直接写入 IP 地址
|
||||
- CIDR: 展开为所有主机 IP
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_path: 输出文件路径
|
||||
batch_size: 批次大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_count': int,
|
||||
'target_type': str
|
||||
}
|
||||
"""
|
||||
from apps.targets.services import TargetService
|
||||
from apps.targets.models import Target
|
||||
from apps.asset.services.asset.subdomain_service import SubdomainService
|
||||
|
||||
output_file = Path(output_path)
|
||||
output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 获取 Target 信息
|
||||
target_service = TargetService()
|
||||
target = target_service.get_target(target_id)
|
||||
|
||||
if not target:
|
||||
raise ValueError(f"Target ID {target_id} 不存在")
|
||||
|
||||
target_type = target.type
|
||||
target_name = target.name
|
||||
|
||||
logger.info(
|
||||
"开始导出扫描目标 - Target ID: %d, Name: %s, Type: %s, 输出文件: %s",
|
||||
target_id, target_name, target_type, output_path
|
||||
)
|
||||
|
||||
total_count = 0
|
||||
|
||||
if target_type == Target.TargetType.DOMAIN:
|
||||
total_count = self._export_domains(target_id, target_name, output_file, batch_size)
|
||||
type_desc = "域名"
|
||||
|
||||
elif target_type == Target.TargetType.IP:
|
||||
total_count = self._export_ip(target_name, output_file)
|
||||
type_desc = "IP"
|
||||
|
||||
elif target_type == Target.TargetType.CIDR:
|
||||
total_count = self._export_cidr(target_name, output_file)
|
||||
type_desc = "CIDR IP"
|
||||
|
||||
else:
|
||||
raise ValueError(f"不支持的目标类型: {target_type}")
|
||||
|
||||
logger.info(
|
||||
"✓ 扫描目标导出完成 - 类型: %s, 总数: %d, 文件: %s",
|
||||
type_desc, total_count, output_path
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_file),
|
||||
'total_count': total_count,
|
||||
'target_type': target_type
|
||||
}
|
||||
|
||||
def _export_domains(
|
||||
self,
|
||||
target_id: int,
|
||||
target_name: str,
|
||||
output_path: Path,
|
||||
batch_size: int
|
||||
) -> int:
|
||||
"""导出域名类型目标的子域名"""
|
||||
from apps.asset.services.asset.subdomain_service import SubdomainService
|
||||
|
||||
subdomain_service = SubdomainService()
|
||||
domain_iterator = subdomain_service.iter_subdomain_names_by_target(
|
||||
target_id=target_id,
|
||||
chunk_size=batch_size
|
||||
)
|
||||
|
||||
total_count = 0
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for domain_name in domain_iterator:
|
||||
if self._should_write_target(domain_name):
|
||||
f.write(f"{domain_name}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个域名...", total_count)
|
||||
|
||||
# 默认值模式:如果没有子域名,使用根域名
|
||||
if total_count == 0:
|
||||
logger.info("采用默认域名:%s (target_id=%d)", target_name, target_id)
|
||||
if self._should_write_target(target_name):
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{target_name}\n")
|
||||
total_count = 1
|
||||
|
||||
return total_count
|
||||
|
||||
def _export_ip(self, target_name: str, output_path: Path) -> int:
|
||||
"""导出 IP 类型目标"""
|
||||
if self._should_write_target(target_name):
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{target_name}\n")
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def _export_cidr(self, target_name: str, output_path: Path) -> int:
|
||||
"""导出 CIDR 类型目标,展开为每个 IP"""
|
||||
network = ipaddress.ip_network(target_name, strict=False)
|
||||
total_count = 0
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for ip in network.hosts():
|
||||
ip_str = str(ip)
|
||||
if self._should_write_target(ip_str):
|
||||
f.write(f"{ip_str}\n")
|
||||
total_count += 1
|
||||
|
||||
if total_count % 10000 == 0:
|
||||
logger.info("已导出 %d 个 IP...", total_count)
|
||||
|
||||
# /32 或 /128 特殊处理
|
||||
if total_count == 0:
|
||||
ip_str = str(network.network_address)
|
||||
if self._should_write_target(ip_str):
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(f"{ip_str}\n")
|
||||
total_count = 1
|
||||
|
||||
return total_count
|
||||
|
||||
def _should_write_target(self, target: str) -> bool:
|
||||
"""检查目标是否应该写入(通过黑名单过滤)"""
|
||||
if self.blacklist_service:
|
||||
return self.blacklist_service.filter_url(target)
|
||||
return True
|
||||
@@ -1,71 +0,0 @@
|
||||
"""
|
||||
导出站点 URL 到 TXT 文件的 Task
|
||||
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
数据源: WebSite.url
|
||||
"""
|
||||
import logging
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.models import WebSite
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="export_sites")
|
||||
def export_sites_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
batch_size: int = 1000,
|
||||
) -> dict:
|
||||
"""
|
||||
导出目标下的所有站点 URL 到 TXT 文件
|
||||
|
||||
数据源: WebSite.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
batch_size: 每次读取的批次大小,默认 1000
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_count': int
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: 参数错误
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用 TargetExportService 处理导出
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'success': result['success'],
|
||||
'output_file': result['output_file'],
|
||||
'total_count': result['total_count']
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
"""
|
||||
导出 URL 任务
|
||||
|
||||
用于指纹识别前导出目标下的 URL 到文件
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.models import WebSite
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="export_urls_for_fingerprint")
|
||||
def export_urls_for_fingerprint_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
source: str = 'website',
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出目标下的 URL 到文件(用于指纹识别)
|
||||
|
||||
数据源: WebSite.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
- URL: 直接使用目标 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径
|
||||
source: 数据源类型(保留参数,兼容旧调用)
|
||||
batch_size: 批量读取大小
|
||||
|
||||
Returns:
|
||||
dict: {'output_file': str, 'total_count': int, 'source': str}
|
||||
"""
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用 TargetExportService 处理导出
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'output_file': result['output_file'],
|
||||
'total_count': result['total_count'],
|
||||
'source': source
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
"""
|
||||
导出扫描目标到 TXT 文件的 Task
|
||||
|
||||
使用 TargetExportService.export_targets() 统一处理导出逻辑
|
||||
|
||||
根据 Target 类型决定导出内容:
|
||||
- DOMAIN: 从 Subdomain 表导出子域名
|
||||
- IP: 直接写入 target.name
|
||||
- CIDR: 展开 CIDR 范围内的所有 IP
|
||||
"""
|
||||
import logging
|
||||
from prefect import task
|
||||
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="export_scan_targets")
|
||||
def export_scan_targets_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出扫描目标到 TXT 文件
|
||||
|
||||
根据 Target 类型自动决定导出内容:
|
||||
- DOMAIN: 从 Subdomain 表导出子域名(流式处理,支持 10万+ 域名)
|
||||
- IP: 直接写入 target.name(单个 IP)
|
||||
- CIDR: 展开 CIDR 范围内的所有可用 IP
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
batch_size: 每次读取的批次大小,默认 1000(仅对 DOMAIN 类型有效)
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_count': int,
|
||||
'target_type': str
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: Target 不存在
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
# 使用 TargetExportService 处理导出
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
|
||||
result = export_service.export_targets(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'success': result['success'],
|
||||
'output_file': result['output_file'],
|
||||
'total_count': result['total_count'],
|
||||
'target_type': result['target_type']
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
"""
|
||||
导出站点URL到文件的Task
|
||||
|
||||
直接使用 HostPortMapping 表查询 host+port 组合,拼接成URL格式写入文件
|
||||
使用 TargetExportService 处理默认值回退逻辑
|
||||
|
||||
特殊逻辑:
|
||||
- 80 端口:只生成 HTTP URL(省略端口号)
|
||||
- 443 端口:只生成 HTTPS URL(省略端口号)
|
||||
- 其他端口:生成 HTTP 和 HTTPS 两个URL(带端口号)
|
||||
"""
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.services import HostPortMappingService
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _generate_urls_from_port(host: str, port: int) -> list[str]:
|
||||
"""
|
||||
根据端口生成 URL 列表
|
||||
|
||||
- 80 端口:只生成 HTTP URL(省略端口号)
|
||||
- 443 端口:只生成 HTTPS URL(省略端口号)
|
||||
- 其他端口:生成 HTTP 和 HTTPS 两个URL(带端口号)
|
||||
"""
|
||||
if port == 80:
|
||||
return [f"http://{host}"]
|
||||
elif port == 443:
|
||||
return [f"https://{host}"]
|
||||
else:
|
||||
return [f"http://{host}:{port}", f"https://{host}:{port}"]
|
||||
|
||||
|
||||
@task(name="export_site_urls")
|
||||
def export_site_urls_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出目标下的所有站点URL到文件(基于 HostPortMapping 表)
|
||||
|
||||
数据源: HostPortMapping (host + port)
|
||||
|
||||
特殊逻辑:
|
||||
- 80 端口:只生成 HTTP URL(省略端口号)
|
||||
- 443 端口:只生成 HTTPS URL(省略端口号)
|
||||
- 其他端口:生成 HTTP 和 HTTPS 两个URL(带端口号)
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
batch_size: 每次处理的批次大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': bool,
|
||||
'output_file': str,
|
||||
'total_urls': int,
|
||||
'association_count': int # 主机端口关联数量
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: 参数错误
|
||||
IOError: 文件写入失败
|
||||
"""
|
||||
logger.info("开始统计站点URL - Target ID: %d, 输出文件: %s", target_id, output_file)
|
||||
|
||||
# 确保输出目录存在
|
||||
output_path = Path(output_file)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 初始化黑名单服务
|
||||
blacklist_service = BlacklistService()
|
||||
|
||||
# 直接查询 HostPortMapping 表,按 host 排序
|
||||
service = HostPortMappingService()
|
||||
associations = service.iter_host_port_by_target(
|
||||
target_id=target_id,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
|
||||
total_urls = 0
|
||||
association_count = 0
|
||||
|
||||
# 流式写入文件(特殊端口逻辑)
|
||||
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
|
||||
for assoc in associations:
|
||||
association_count += 1
|
||||
host = assoc['host']
|
||||
port = assoc['port']
|
||||
|
||||
# 根据端口号生成URL
|
||||
for url in _generate_urls_from_port(host, port):
|
||||
if blacklist_service.filter_url(url):
|
||||
f.write(f"{url}\n")
|
||||
total_urls += 1
|
||||
|
||||
if association_count % 1000 == 0:
|
||||
logger.info("已处理 %d 条关联,生成 %d 个URL...", association_count, total_urls)
|
||||
|
||||
logger.info(
|
||||
"✓ 站点URL导出完成 - 关联数: %d, 总URL数: %d, 文件: %s",
|
||||
association_count, total_urls, str(output_path)
|
||||
)
|
||||
|
||||
# 默认值回退模式:使用 TargetExportService
|
||||
if total_urls == 0:
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
total_urls = export_service._generate_default_urls(target_id, output_path)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'output_file': str(output_path),
|
||||
'total_urls': total_urls,
|
||||
'association_count': association_count
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
"""
|
||||
合并并去重域名任务
|
||||
|
||||
合并 merge + parse + validate 三个步骤,优化性能:
|
||||
- 单命令实现(LC_ALL=C sort -u)
|
||||
- C语言级性能,单进程高效
|
||||
- 无临时文件,零额外开销
|
||||
- 支持千万级数据处理
|
||||
|
||||
性能优势:
|
||||
- LC_ALL=C 字节序比较(比locale快20-30%)
|
||||
- 单进程直接处理多文件(无管道开销)
|
||||
- 内存占用恒定(~50MB for 50万域名)
|
||||
- 50万域名处理时间:~0.5秒(相比 Python 提升 ~67%)
|
||||
|
||||
Note:
|
||||
- 工具(amass/subfinder)输出已标准化(小写,无空行)
|
||||
- sort -u 自动处理去重和排序
|
||||
- 无需额外过滤,性能最优
|
||||
"""
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from prefect import task
|
||||
from typing import List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 注:使用纯系统命令实现,无需 Python 缓冲区配置
|
||||
# 工具(amass/subfinder)输出已是小写且标准化
|
||||
|
||||
@task(
|
||||
name='merge_and_deduplicate',
|
||||
retries=1,
|
||||
log_prints=True
|
||||
)
|
||||
def merge_and_validate_task(
|
||||
result_files: List[str],
|
||||
result_dir: str
|
||||
) -> str:
|
||||
"""
|
||||
合并扫描结果并去重(高性能流式处理)
|
||||
|
||||
流程:
|
||||
1. 使用 LC_ALL=C sort -u 直接处理多文件
|
||||
2. 排序去重一步完成
|
||||
3. 返回去重后的文件路径
|
||||
|
||||
命令:LC_ALL=C sort -u file1 file2 file3 -o output
|
||||
注:工具输出已标准化(小写,无空行),无需额外处理
|
||||
|
||||
Args:
|
||||
result_files: 结果文件路径列表
|
||||
result_dir: 结果目录
|
||||
|
||||
Returns:
|
||||
str: 去重后的域名文件路径
|
||||
|
||||
Raises:
|
||||
RuntimeError: 处理失败
|
||||
|
||||
Performance:
|
||||
- 纯系统命令(C语言实现),单进程极简
|
||||
- LC_ALL=C: 字节序比较
|
||||
- sort -u: 直接处理多文件(无管道开销)
|
||||
|
||||
Design:
|
||||
- 极简单命令,无冗余处理
|
||||
- 单进程直接执行(无管道/重定向开销)
|
||||
- 内存占用仅在 sort 阶段(外部排序,不会 OOM)
|
||||
"""
|
||||
logger.info("开始合并并去重 %d 个结果文件(系统命令优化)", len(result_files))
|
||||
|
||||
result_path = Path(result_dir)
|
||||
|
||||
# 验证文件存在性
|
||||
valid_files = []
|
||||
for file_path_str in result_files:
|
||||
file_path = Path(file_path_str)
|
||||
if file_path.exists():
|
||||
valid_files.append(str(file_path))
|
||||
else:
|
||||
logger.warning("结果文件不存在: %s", file_path)
|
||||
|
||||
if not valid_files:
|
||||
raise RuntimeError("所有结果文件都不存在")
|
||||
|
||||
# 生成输出文件路径
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
short_uuid = uuid.uuid4().hex[:4]
|
||||
merged_file = result_path / f"merged_{timestamp}_{short_uuid}.txt"
|
||||
|
||||
try:
|
||||
# ==================== 使用系统命令一步完成:排序去重 ====================
|
||||
# LC_ALL=C: 使用字节序比较(比locale快20-30%)
|
||||
# sort -u: 直接处理多文件,排序去重
|
||||
# -o: 安全输出(比重定向更可靠)
|
||||
cmd = f"LC_ALL=C sort -u {' '.join(valid_files)} -o {merged_file}"
|
||||
|
||||
logger.debug("执行命令: %s", cmd)
|
||||
|
||||
# 按输入文件总行数动态计算超时时间
|
||||
total_lines = 0
|
||||
for file_path in valid_files:
|
||||
try:
|
||||
line_count_proc = subprocess.run(
|
||||
["wc", "-l", file_path],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
total_lines += int(line_count_proc.stdout.strip().split()[0])
|
||||
except (subprocess.CalledProcessError, ValueError, IndexError):
|
||||
continue
|
||||
|
||||
timeout = 3600
|
||||
if total_lines > 0:
|
||||
# 按行数线性计算:每行约 0.1 秒
|
||||
base_per_line = 0.1
|
||||
est = int(total_lines * base_per_line)
|
||||
timeout = max(600, est)
|
||||
|
||||
logger.info(
|
||||
"Subdomain 合并去重 timeout 自动计算: 输入总行数=%d, timeout=%d秒",
|
||||
total_lines,
|
||||
timeout,
|
||||
)
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
check=True,
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
logger.debug("✓ 合并去重完成")
|
||||
|
||||
# ==================== 统计结果 ====================
|
||||
if not merged_file.exists():
|
||||
raise RuntimeError("合并文件未被创建")
|
||||
|
||||
# 统计行数(使用系统命令提升大文件性能)
|
||||
try:
|
||||
line_count_proc = subprocess.run(
|
||||
["wc", "-l", str(merged_file)],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
unique_count = int(line_count_proc.stdout.strip().split()[0])
|
||||
except (subprocess.CalledProcessError, ValueError, IndexError) as e:
|
||||
logger.warning(
|
||||
"wc -l 统计失败(文件: %s),降级为 Python 逐行统计 - 错误: %s",
|
||||
merged_file, e
|
||||
)
|
||||
unique_count = 0
|
||||
with open(merged_file, 'r', encoding='utf-8') as file_obj:
|
||||
for _ in file_obj:
|
||||
unique_count += 1
|
||||
|
||||
if unique_count == 0:
|
||||
raise RuntimeError("未找到任何有效域名")
|
||||
|
||||
file_size = merged_file.stat().st_size
|
||||
|
||||
logger.info(
|
||||
"✓ 合并去重完成 - 去重后: %d 个域名, 文件大小: %.2f KB",
|
||||
unique_count,
|
||||
file_size / 1024
|
||||
)
|
||||
|
||||
return str(merged_file)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
error_msg = "合并去重超时(>60分钟),请检查数据量或系统资源"
|
||||
logger.warning(error_msg) # 超时是可预期的
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
error_msg = f"系统命令执行失败: {e.stderr if e.stderr else str(e)}"
|
||||
logger.warning(error_msg) # 超时是可预期的
|
||||
raise RuntimeError(error_msg) from e
|
||||
|
||||
except IOError as e:
|
||||
error_msg = f"文件读写失败: {e}"
|
||||
logger.warning(error_msg) # 超时是可预期的
|
||||
raise RuntimeError(error_msg) from e
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"合并去重失败: {e}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
raise
|
||||
@@ -1,74 +0,0 @@
|
||||
"""
|
||||
导出站点 URL 列表任务
|
||||
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
数据源: WebSite.url(用于 katana 等爬虫工具)
|
||||
"""
|
||||
|
||||
import logging
|
||||
from prefect import task
|
||||
from typing import Optional
|
||||
|
||||
from apps.asset.models import WebSite
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(
|
||||
name='export_sites_for_url_fetch',
|
||||
retries=1,
|
||||
log_prints=True
|
||||
)
|
||||
def export_sites_task(
|
||||
output_file: str,
|
||||
target_id: int,
|
||||
scan_id: int,
|
||||
batch_size: int = 1000
|
||||
) -> dict:
|
||||
"""
|
||||
导出站点 URL 列表到文件(用于 katana 等爬虫工具)
|
||||
|
||||
数据源: WebSite.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
output_file: 输出文件路径
|
||||
target_id: 目标 ID
|
||||
scan_id: 扫描 ID(保留参数,兼容旧调用)
|
||||
batch_size: 批次大小(内存优化)
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'output_file': str, # 输出文件路径
|
||||
'asset_count': int, # 资产数量
|
||||
}
|
||||
|
||||
Raises:
|
||||
ValueError: 参数错误
|
||||
RuntimeError: 执行失败
|
||||
"""
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用 TargetExportService 处理导出
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
'output_file': result['output_file'],
|
||||
'asset_count': result['total_count'],
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
"""导出 Endpoint URL 到文件的 Task
|
||||
|
||||
使用 TargetExportService 统一处理导出逻辑和默认值回退
|
||||
数据源: Endpoint.url
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Optional
|
||||
|
||||
from prefect import task
|
||||
|
||||
from apps.asset.models import Endpoint
|
||||
from apps.scan.services import TargetExportService, BlacklistService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(name="export_endpoints")
|
||||
def export_endpoints_task(
|
||||
target_id: int,
|
||||
output_file: str,
|
||||
batch_size: int = 1000,
|
||||
) -> Dict[str, object]:
|
||||
"""导出目标下的所有 Endpoint URL 到文本文件。
|
||||
|
||||
数据源: Endpoint.url
|
||||
|
||||
懒加载模式:
|
||||
- 如果数据库为空,根据 Target 类型生成默认 URL
|
||||
- DOMAIN: http(s)://domain
|
||||
- IP: http(s)://ip
|
||||
- CIDR: 展开为所有 IP 的 URL
|
||||
|
||||
Args:
|
||||
target_id: 目标 ID
|
||||
output_file: 输出文件路径(绝对路径)
|
||||
batch_size: 每次从数据库迭代的批大小
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
"success": bool,
|
||||
"output_file": str,
|
||||
"total_count": int,
|
||||
}
|
||||
"""
|
||||
# 构建数据源 queryset(Task 层决定数据源)
|
||||
queryset = Endpoint.objects.filter(target_id=target_id).values_list('url', flat=True)
|
||||
|
||||
# 使用 TargetExportService 处理导出
|
||||
blacklist_service = BlacklistService()
|
||||
export_service = TargetExportService(blacklist_service=blacklist_service)
|
||||
|
||||
result = export_service.export_urls(
|
||||
target_id=target_id,
|
||||
output_path=output_file,
|
||||
queryset=queryset,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
# 保持返回值格式不变(向后兼容)
|
||||
return {
|
||||
"success": result['success'],
|
||||
"output_file": result['output_file'],
|
||||
"total_count": result['total_count'],
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
"""
|
||||
扫描模块工具包
|
||||
|
||||
提供扫描相关的工具函数。
|
||||
"""
|
||||
|
||||
from .directory_cleanup import remove_directory
|
||||
from .command_builder import build_scan_command
|
||||
from .command_executor import execute_and_wait, execute_stream
|
||||
from .wordlist_helpers import ensure_wordlist_local
|
||||
from .nuclei_helpers import ensure_nuclei_templates_local
|
||||
from .performance import FlowPerformanceTracker, CommandPerformanceTracker
|
||||
from .workspace_utils import setup_scan_workspace, setup_scan_directory
|
||||
from . import config_parser
|
||||
|
||||
__all__ = [
|
||||
# 目录清理
|
||||
'remove_directory',
|
||||
# 工作空间
|
||||
'setup_scan_workspace', # 创建 Scan 根工作空间
|
||||
'setup_scan_directory', # 创建扫描子目录
|
||||
# 命令构建
|
||||
'build_scan_command', # 扫描工具命令构建(基于 f-string)
|
||||
# 命令执行
|
||||
'execute_and_wait', # 等待式执行(文件输出)
|
||||
'execute_stream', # 流式执行(实时处理)
|
||||
# 字典文件
|
||||
'ensure_wordlist_local', # 确保本地字典文件(含 hash 校验)
|
||||
# Nuclei 模板
|
||||
'ensure_nuclei_templates_local', # 确保本地模板(含 commit hash 校验)
|
||||
# 性能监控
|
||||
'FlowPerformanceTracker', # Flow 性能追踪器(含系统资源采样)
|
||||
'CommandPerformanceTracker', # 命令性能追踪器
|
||||
# 配置解析
|
||||
'config_parser',
|
||||
]
|
||||
|
||||
@@ -1,514 +0,0 @@
|
||||
from rest_framework import viewsets, status
|
||||
from rest_framework.decorators import action
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.exceptions import NotFound, APIException
|
||||
from rest_framework.filters import SearchFilter
|
||||
from django.core.exceptions import ObjectDoesNotExist, ValidationError
|
||||
from django.db.utils import DatabaseError, IntegrityError, OperationalError
|
||||
import logging
|
||||
|
||||
from apps.common.response_helpers import success_response, error_response
|
||||
from apps.common.error_codes import ErrorCodes
|
||||
from apps.scan.utils.config_merger import ConfigConflictError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from ..models import Scan, ScheduledScan
|
||||
from ..serializers import (
|
||||
ScanSerializer, ScanHistorySerializer, QuickScanSerializer,
|
||||
ScheduledScanSerializer, CreateScheduledScanSerializer,
|
||||
UpdateScheduledScanSerializer, ToggleScheduledScanSerializer
|
||||
)
|
||||
from ..services.scan_service import ScanService
|
||||
from ..services.scheduled_scan_service import ScheduledScanService
|
||||
from ..repositories import ScheduledScanDTO
|
||||
from apps.targets.services.target_service import TargetService
|
||||
from apps.targets.services.organization_service import OrganizationService
|
||||
from apps.engine.services.engine_service import EngineService
|
||||
from apps.common.definitions import ScanStatus
|
||||
from apps.common.pagination import BasePagination
|
||||
|
||||
|
||||
class ScanViewSet(viewsets.ModelViewSet):
|
||||
"""扫描任务视图集"""
|
||||
serializer_class = ScanSerializer
|
||||
pagination_class = BasePagination
|
||||
filter_backends = [SearchFilter]
|
||||
search_fields = ['target__name'] # 按目标名称搜索
|
||||
|
||||
def get_queryset(self):
|
||||
"""优化查询集,提升API性能
|
||||
|
||||
查询优化策略:
|
||||
- select_related: 预加载 target 和 engine(一对一/多对一关系,使用 JOIN)
|
||||
- 移除 prefetch_related: 避免加载大量资产数据到内存
|
||||
- order_by: 按创建时间降序排列(最新创建的任务排在最前面)
|
||||
|
||||
性能优化原理:
|
||||
- 列表页:使用缓存统计字段(cached_*_count),避免实时 COUNT 查询
|
||||
- 序列化器:严格验证缓存字段,确保数据一致性
|
||||
- 分页场景:每页只显示10条记录,查询高效
|
||||
- 避免大数据加载:不再预加载所有关联的资产数据
|
||||
"""
|
||||
# 只保留必要的 select_related,移除所有 prefetch_related
|
||||
scan_service = ScanService()
|
||||
queryset = scan_service.get_all_scans(prefetch_relations=True)
|
||||
|
||||
return queryset
|
||||
|
||||
def get_serializer_class(self):
|
||||
"""根据不同的 action 返回不同的序列化器
|
||||
|
||||
- list action: 使用 ScanHistorySerializer(包含 summary 和 progress)
|
||||
- retrieve action: 使用 ScanHistorySerializer(包含 summary 和 progress)
|
||||
- 其他 action: 使用标准的 ScanSerializer
|
||||
"""
|
||||
if self.action in ['list', 'retrieve']:
|
||||
return ScanHistorySerializer
|
||||
return ScanSerializer
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
"""
|
||||
删除单个扫描任务(两阶段删除)
|
||||
|
||||
1. 软删除:立即对用户不可见
|
||||
2. 硬删除:后台异步执行
|
||||
"""
|
||||
try:
|
||||
scan = self.get_object()
|
||||
scan_service = ScanService()
|
||||
result = scan_service.delete_scans_two_phase([scan.id])
|
||||
|
||||
return success_response(
|
||||
data={
|
||||
'scanId': scan.id,
|
||||
'deletedCount': result['soft_deleted_count'],
|
||||
'deletedScans': result['scan_names']
|
||||
}
|
||||
)
|
||||
|
||||
except Scan.DoesNotExist:
|
||||
return error_response(
|
||||
code=ErrorCodes.NOT_FOUND,
|
||||
status_code=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
except ValueError as e:
|
||||
return error_response(
|
||||
code=ErrorCodes.NOT_FOUND,
|
||||
message=str(e),
|
||||
status_code=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("删除扫描任务时发生错误")
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
@action(detail=False, methods=['post'])
|
||||
def quick(self, request):
|
||||
"""
|
||||
快速扫描接口
|
||||
|
||||
功能:
|
||||
1. 接收目标列表和引擎配置
|
||||
2. 自动解析输入(支持 URL、域名、IP、CIDR)
|
||||
3. 批量创建 Target、Website、Endpoint 资产
|
||||
4. 立即发起批量扫描
|
||||
|
||||
请求参数:
|
||||
{
|
||||
"targets": [{"name": "example.com"}, {"name": "https://example.com/api"}],
|
||||
"engine_ids": [1, 2]
|
||||
}
|
||||
|
||||
支持的输入格式:
|
||||
- 域名: example.com
|
||||
- IP: 192.168.1.1
|
||||
- CIDR: 10.0.0.0/8
|
||||
- URL: https://example.com/api/v1
|
||||
"""
|
||||
from ..services.quick_scan_service import QuickScanService
|
||||
|
||||
serializer = QuickScanSerializer(data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
|
||||
targets_data = serializer.validated_data['targets']
|
||||
engine_ids = serializer.validated_data.get('engine_ids')
|
||||
|
||||
try:
|
||||
# 提取输入字符串列表
|
||||
inputs = [t['name'] for t in targets_data]
|
||||
|
||||
# 1. 使用 QuickScanService 解析输入并创建资产
|
||||
quick_scan_service = QuickScanService()
|
||||
result = quick_scan_service.process_quick_scan(inputs, engine_ids[0] if engine_ids else None)
|
||||
|
||||
targets = result['targets']
|
||||
|
||||
if not targets:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='No valid targets for scanning',
|
||||
details=result.get('errors', []),
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
# 2. 准备多引擎扫描
|
||||
scan_service = ScanService()
|
||||
_, merged_configuration, engine_names, engine_ids = scan_service.prepare_initiate_scan_multi_engine(
|
||||
target_id=targets[0].id, # 使用第一个目标来验证引擎
|
||||
engine_ids=engine_ids
|
||||
)
|
||||
|
||||
# 3. 批量发起扫描
|
||||
created_scans = scan_service.create_scans(
|
||||
targets=targets,
|
||||
engine_ids=engine_ids,
|
||||
engine_names=engine_names,
|
||||
merged_configuration=merged_configuration
|
||||
)
|
||||
|
||||
# 检查是否成功创建扫描任务
|
||||
if not created_scans:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='No scan tasks were created. All targets may already have active scans.',
|
||||
details={
|
||||
'targetStats': result['target_stats'],
|
||||
'assetStats': result['asset_stats'],
|
||||
'errors': result.get('errors', [])
|
||||
},
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY
|
||||
)
|
||||
|
||||
# 序列化返回结果
|
||||
scan_serializer = ScanSerializer(created_scans, many=True)
|
||||
|
||||
return success_response(
|
||||
data={
|
||||
'count': len(created_scans),
|
||||
'targetStats': result['target_stats'],
|
||||
'assetStats': result['asset_stats'],
|
||||
'errors': result.get('errors', []),
|
||||
'scans': scan_serializer.data
|
||||
},
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
|
||||
except ConfigConflictError as e:
|
||||
return error_response(
|
||||
code='CONFIG_CONFLICT',
|
||||
message=str(e),
|
||||
details=[
|
||||
{'key': k, 'engines': [e1, e2]}
|
||||
for k, e1, e2 in e.conflicts
|
||||
],
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
except ValidationError as e:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message=str(e),
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("快速扫描启动失败")
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
@action(detail=False, methods=['post'])
|
||||
def initiate(self, request):
|
||||
"""
|
||||
发起扫描任务
|
||||
|
||||
请求参数:
|
||||
- organization_id: 组织ID (int, 可选)
|
||||
- target_id: 目标ID (int, 可选)
|
||||
- engine_ids: 扫描引擎ID列表 (list[int], 必填)
|
||||
|
||||
注意: organization_id 和 target_id 二选一
|
||||
|
||||
返回:
|
||||
- 扫描任务详情(单个或多个)
|
||||
"""
|
||||
# 获取请求数据
|
||||
organization_id = request.data.get('organization_id')
|
||||
target_id = request.data.get('target_id')
|
||||
engine_ids = request.data.get('engine_ids')
|
||||
|
||||
# 验证 engine_ids
|
||||
if not engine_ids:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='缺少必填参数: engine_ids',
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
if not isinstance(engine_ids, list):
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='engine_ids 必须是数组',
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
try:
|
||||
# 步骤1:准备多引擎扫描所需的数据
|
||||
scan_service = ScanService()
|
||||
targets, merged_configuration, engine_names, engine_ids = scan_service.prepare_initiate_scan_multi_engine(
|
||||
organization_id=organization_id,
|
||||
target_id=target_id,
|
||||
engine_ids=engine_ids
|
||||
)
|
||||
|
||||
# 步骤2:批量创建扫描记录并分发扫描任务
|
||||
created_scans = scan_service.create_scans(
|
||||
targets=targets,
|
||||
engine_ids=engine_ids,
|
||||
engine_names=engine_names,
|
||||
merged_configuration=merged_configuration
|
||||
)
|
||||
|
||||
# 检查是否成功创建扫描任务
|
||||
if not created_scans:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='No scan tasks were created. All targets may already have active scans.',
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY
|
||||
)
|
||||
|
||||
# 序列化返回结果
|
||||
scan_serializer = ScanSerializer(created_scans, many=True)
|
||||
|
||||
return success_response(
|
||||
data={
|
||||
'count': len(created_scans),
|
||||
'scans': scan_serializer.data
|
||||
},
|
||||
status_code=status.HTTP_201_CREATED
|
||||
)
|
||||
|
||||
except ConfigConflictError as e:
|
||||
return error_response(
|
||||
code='CONFIG_CONFLICT',
|
||||
message=str(e),
|
||||
details=[
|
||||
{'key': k, 'engines': [e1, e2]}
|
||||
for k, e1, e2 in e.conflicts
|
||||
],
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
except ObjectDoesNotExist as e:
|
||||
# 资源不存在错误(由 service 层抛出)
|
||||
return error_response(
|
||||
code=ErrorCodes.NOT_FOUND,
|
||||
message=str(e),
|
||||
status_code=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
except ValidationError as e:
|
||||
# 参数验证错误(由 service 层抛出)
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message=str(e),
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
except (DatabaseError, IntegrityError, OperationalError):
|
||||
# 数据库错误
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
message='Database error',
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE
|
||||
)
|
||||
|
||||
# 所有快照相关的 action 和 export 已迁移到 asset/views.py 中的快照 ViewSet
|
||||
# GET /api/scans/{id}/subdomains/ -> SubdomainSnapshotViewSet
|
||||
# GET /api/scans/{id}/subdomains/export/ -> SubdomainSnapshotViewSet.export
|
||||
# GET /api/scans/{id}/websites/ -> WebsiteSnapshotViewSet
|
||||
# GET /api/scans/{id}/websites/export/ -> WebsiteSnapshotViewSet.export
|
||||
# GET /api/scans/{id}/directories/ -> DirectorySnapshotViewSet
|
||||
# GET /api/scans/{id}/directories/export/ -> DirectorySnapshotViewSet.export
|
||||
# GET /api/scans/{id}/endpoints/ -> EndpointSnapshotViewSet
|
||||
# GET /api/scans/{id}/endpoints/export/ -> EndpointSnapshotViewSet.export
|
||||
# GET /api/scans/{id}/ip-addresses/ -> HostPortMappingSnapshotViewSet
|
||||
# GET /api/scans/{id}/ip-addresses/export/ -> HostPortMappingSnapshotViewSet.export
|
||||
# GET /api/scans/{id}/vulnerabilities/ -> VulnerabilitySnapshotViewSet
|
||||
|
||||
@action(detail=False, methods=['post', 'delete'], url_path='bulk-delete')
|
||||
def bulk_delete(self, request):
|
||||
"""
|
||||
批量删除扫描记录
|
||||
|
||||
请求参数:
|
||||
- ids: 扫描ID列表 (list[int], 必填)
|
||||
|
||||
示例请求:
|
||||
POST /api/scans/bulk-delete/
|
||||
{
|
||||
"ids": [1, 2, 3]
|
||||
}
|
||||
|
||||
返回:
|
||||
- message: 成功消息
|
||||
- deletedCount: 实际删除的记录数
|
||||
|
||||
注意:
|
||||
- 使用级联删除,会同时删除关联的子域名、端点等数据
|
||||
- 只删除存在的记录,不存在的ID会被忽略
|
||||
"""
|
||||
ids = request.data.get('ids', [])
|
||||
|
||||
# 参数验证
|
||||
if not ids:
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='Missing required parameter: ids',
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
if not isinstance(ids, list):
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='ids must be an array',
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
if not all(isinstance(i, int) for i in ids):
|
||||
return error_response(
|
||||
code=ErrorCodes.VALIDATION_ERROR,
|
||||
message='All elements in ids array must be integers',
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
try:
|
||||
# 使用 Service 层批量删除(两阶段删除)
|
||||
scan_service = ScanService()
|
||||
result = scan_service.delete_scans_two_phase(ids)
|
||||
|
||||
return success_response(
|
||||
data={
|
||||
'deletedCount': result['soft_deleted_count'],
|
||||
'deletedScans': result['scan_names']
|
||||
}
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
# 未找到记录
|
||||
return error_response(
|
||||
code=ErrorCodes.NOT_FOUND,
|
||||
message=str(e),
|
||||
status_code=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("批量删除扫描任务时发生错误")
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def statistics(self, request):
|
||||
"""
|
||||
获取扫描统计数据
|
||||
|
||||
返回扫描任务的汇总统计信息,用于仪表板和扫描历史页面。
|
||||
使用缓存字段聚合查询,性能优异。
|
||||
|
||||
返回:
|
||||
- total: 总扫描次数
|
||||
- running: 运行中的扫描数量
|
||||
- completed: 已完成的扫描数量
|
||||
- failed: 失败的扫描数量
|
||||
- totalVulns: 总共发现的漏洞数量
|
||||
- totalSubdomains: 总共发现的子域名数量
|
||||
- totalEndpoints: 总共发现的端点数量
|
||||
- totalAssets: 总资产数
|
||||
"""
|
||||
try:
|
||||
# 使用 Service 层获取统计数据
|
||||
scan_service = ScanService()
|
||||
stats = scan_service.get_statistics()
|
||||
|
||||
return success_response(
|
||||
data={
|
||||
'total': stats['total'],
|
||||
'running': stats['running'],
|
||||
'completed': stats['completed'],
|
||||
'failed': stats['failed'],
|
||||
'totalVulns': stats['total_vulns'],
|
||||
'totalSubdomains': stats['total_subdomains'],
|
||||
'totalEndpoints': stats['total_endpoints'],
|
||||
'totalWebsites': stats['total_websites'],
|
||||
'totalAssets': stats['total_assets'],
|
||||
}
|
||||
)
|
||||
|
||||
except (DatabaseError, OperationalError):
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
message='Database error',
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE
|
||||
)
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def stop(self, request, pk=None): # pylint: disable=unused-argument
|
||||
"""
|
||||
停止扫描任务
|
||||
|
||||
URL: POST /api/scans/{id}/stop/
|
||||
|
||||
功能:
|
||||
- 终止正在运行或初始化的扫描任务
|
||||
- 更新扫描状态为 CANCELLED
|
||||
|
||||
状态限制:
|
||||
- 只能停止 RUNNING 或 INITIATED 状态的扫描
|
||||
- 已完成、失败或取消的扫描无法停止
|
||||
|
||||
返回:
|
||||
- message: 成功消息
|
||||
- revokedTaskCount: 取消的 Flow Run 数量
|
||||
"""
|
||||
try:
|
||||
# 使用 Service 层处理停止逻辑
|
||||
scan_service = ScanService()
|
||||
success, revoked_count = scan_service.stop_scan(scan_id=pk)
|
||||
|
||||
if not success:
|
||||
# 检查是否是状态不允许的问题
|
||||
scan = scan_service.get_scan(scan_id=pk, prefetch_relations=False)
|
||||
if scan and scan.status not in [ScanStatus.RUNNING, ScanStatus.INITIATED]:
|
||||
return error_response(
|
||||
code=ErrorCodes.BAD_REQUEST,
|
||||
message=f'Cannot stop scan: current status is {ScanStatus(scan.status).label}',
|
||||
status_code=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
# 其他失败原因
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
return success_response(
|
||||
data={'revokedTaskCount': revoked_count}
|
||||
)
|
||||
|
||||
except ObjectDoesNotExist:
|
||||
return error_response(
|
||||
code=ErrorCodes.NOT_FOUND,
|
||||
message=f'Scan ID {pk} not found',
|
||||
status_code=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
except (DatabaseError, IntegrityError, OperationalError):
|
||||
return error_response(
|
||||
code=ErrorCodes.SERVER_ERROR,
|
||||
message='Database error',
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE
|
||||
)
|
||||
@@ -1,27 +0,0 @@
|
||||
[tool.pytest.ini_options]
|
||||
DJANGO_SETTINGS_MODULE = "config.settings"
|
||||
python_files = ["test_*.py", "*_test.py"]
|
||||
python_classes = ["Test*"]
|
||||
python_functions = ["test_*"]
|
||||
testpaths = ["apps"]
|
||||
addopts = "-v --reuse-db"
|
||||
|
||||
[tool.pylint]
|
||||
django-settings-module = "config.settings"
|
||||
load-plugins = "pylint_django"
|
||||
|
||||
[tool.pylint.messages_control]
|
||||
disable = [
|
||||
"missing-docstring",
|
||||
"invalid-name",
|
||||
"too-few-public-methods",
|
||||
"no-member",
|
||||
"import-error",
|
||||
"no-name-in-module",
|
||||
]
|
||||
|
||||
[tool.pylint.format]
|
||||
max-line-length = 120
|
||||
|
||||
[tool.pylint.basic]
|
||||
good-names = ["i", "j", "k", "ex", "Run", "_", "id", "pk", "ip", "url", "db", "qs"]
|
||||
@@ -11,7 +11,7 @@ import { DashboardDataTable } from "@/components/dashboard/dashboard-data-table"
|
||||
export default function Page() {
|
||||
return (
|
||||
// Content area containing cards, charts and data tables
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6 animate-dashboard-fade-in">
|
||||
{/* Top statistics cards */}
|
||||
<DashboardStatCards />
|
||||
|
||||
|
||||
@@ -40,8 +40,11 @@ export async function generateMetadata({ params }: { params: Promise<{ locale: s
|
||||
title: t('title'),
|
||||
description: t('description'),
|
||||
keywords: t('keywords').split(',').map(k => k.trim()),
|
||||
generator: "Xingrin ASM Platform",
|
||||
generator: "Orbit ASM Platform",
|
||||
authors: [{ name: "yyhuni" }],
|
||||
icons: {
|
||||
icon: [{ url: "/icon.svg", type: "image/svg+xml" }],
|
||||
},
|
||||
openGraph: {
|
||||
title: t('ogTitle'),
|
||||
description: t('ogDescription'),
|
||||
|
||||
@@ -3,125 +3,226 @@
|
||||
import React from "react"
|
||||
import { useRouter } from "next/navigation"
|
||||
import { useTranslations } from "next-intl"
|
||||
import Lottie from "lottie-react"
|
||||
import securityAnimation from "@/public/animations/Security000-Purple.json"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { Card, CardContent } from "@/components/ui/card"
|
||||
import {
|
||||
Field,
|
||||
FieldGroup,
|
||||
FieldLabel,
|
||||
} from "@/components/ui/field"
|
||||
import { Spinner } from "@/components/ui/spinner"
|
||||
import { useQueryClient } from "@tanstack/react-query"
|
||||
import dynamic from "next/dynamic"
|
||||
import { LoginBootScreen } from "@/components/auth/login-boot-screen"
|
||||
import { TerminalLogin } from "@/components/ui/terminal-login"
|
||||
import { useLogin, useAuth } from "@/hooks/use-auth"
|
||||
import { vulnerabilityKeys } from "@/hooks/use-vulnerabilities"
|
||||
import { useRoutePrefetch } from "@/hooks/use-route-prefetch"
|
||||
import { getAssetStatistics, getStatisticsHistory } from "@/services/dashboard.service"
|
||||
import { getScans } from "@/services/scan.service"
|
||||
import { VulnerabilityService } from "@/services/vulnerability.service"
|
||||
|
||||
// Dynamic import to avoid SSR issues with WebGL
|
||||
const PixelBlast = dynamic(() => import("@/components/PixelBlast"), { ssr: false })
|
||||
|
||||
const BOOT_SPLASH_MS = 600
|
||||
const BOOT_FADE_MS = 200
|
||||
|
||||
type BootOverlayPhase = "entering" | "visible" | "leaving" | "hidden"
|
||||
|
||||
export default function LoginPage() {
|
||||
// Preload all page components on login page
|
||||
useRoutePrefetch()
|
||||
const router = useRouter()
|
||||
const queryClient = useQueryClient()
|
||||
const { data: auth, isLoading: authLoading } = useAuth()
|
||||
const { mutate: login, isPending } = useLogin()
|
||||
const t = useTranslations("auth")
|
||||
|
||||
const [username, setUsername] = React.useState("")
|
||||
const [password, setPassword] = React.useState("")
|
||||
const { mutateAsync: login, isPending } = useLogin()
|
||||
const t = useTranslations("auth.terminal")
|
||||
|
||||
const loginStartedRef = React.useRef(false)
|
||||
const [loginReady, setLoginReady] = React.useState(false)
|
||||
|
||||
const [pixelFirstFrame, setPixelFirstFrame] = React.useState(false)
|
||||
const handlePixelFirstFrame = React.useCallback(() => {
|
||||
setPixelFirstFrame(true)
|
||||
}, [])
|
||||
|
||||
// 提取预加载逻辑为可复用函数
|
||||
const prefetchDashboardData = React.useCallback(async () => {
|
||||
const scansParams = { page: 1, pageSize: 10 }
|
||||
const vulnsParams = { page: 1, pageSize: 10 }
|
||||
|
||||
return Promise.allSettled([
|
||||
queryClient.prefetchQuery({
|
||||
queryKey: ["asset", "statistics"],
|
||||
queryFn: getAssetStatistics,
|
||||
}),
|
||||
queryClient.prefetchQuery({
|
||||
queryKey: ["asset", "statistics", "history", 7],
|
||||
queryFn: () => getStatisticsHistory(7),
|
||||
}),
|
||||
queryClient.prefetchQuery({
|
||||
queryKey: ["scans", scansParams],
|
||||
queryFn: () => getScans(scansParams),
|
||||
}),
|
||||
queryClient.prefetchQuery({
|
||||
queryKey: vulnerabilityKeys.list(vulnsParams),
|
||||
queryFn: () => VulnerabilityService.getAllVulnerabilities(vulnsParams),
|
||||
}),
|
||||
])
|
||||
}, [queryClient])
|
||||
|
||||
// Always show a short splash on entering the login page.
|
||||
const [bootMinDone, setBootMinDone] = React.useState(false)
|
||||
const [bootPhase, setBootPhase] = React.useState<BootOverlayPhase>("entering")
|
||||
|
||||
// If already logged in, redirect to dashboard
|
||||
React.useEffect(() => {
|
||||
if (auth?.authenticated) {
|
||||
router.push("/dashboard/")
|
||||
setBootMinDone(false)
|
||||
setBootPhase("entering")
|
||||
|
||||
const bootTimer = setTimeout(() => setBootMinDone(true), BOOT_SPLASH_MS)
|
||||
const raf = requestAnimationFrame(() => setBootPhase("visible"))
|
||||
|
||||
return () => {
|
||||
clearTimeout(bootTimer)
|
||||
cancelAnimationFrame(raf)
|
||||
}
|
||||
}, [auth, router])
|
||||
}, [])
|
||||
|
||||
const handleSubmit = (e: React.FormEvent) => {
|
||||
e.preventDefault()
|
||||
login({ username, password })
|
||||
|
||||
// Start hiding the splash after the minimum time AND auth check completes.
|
||||
// Note: don't schedule the fade-out timer in the same effect where we set `bootPhase`,
|
||||
// otherwise the effect cleanup will cancel the timer when `bootPhase` changes.
|
||||
React.useEffect(() => {
|
||||
if (bootPhase !== "visible") return
|
||||
if (!bootMinDone) return
|
||||
if (authLoading) return
|
||||
if (!pixelFirstFrame) return
|
||||
|
||||
setBootPhase("leaving")
|
||||
}, [authLoading, bootMinDone, bootPhase, pixelFirstFrame])
|
||||
|
||||
React.useEffect(() => {
|
||||
if (bootPhase !== "leaving") return
|
||||
|
||||
const timer = setTimeout(() => setBootPhase("hidden"), BOOT_FADE_MS)
|
||||
return () => clearTimeout(timer)
|
||||
}, [bootPhase])
|
||||
|
||||
// Memoize translations object to avoid recreating on every render
|
||||
const translations = React.useMemo(() => ({
|
||||
title: t("title"),
|
||||
subtitle: t("subtitle"),
|
||||
usernamePrompt: t("usernamePrompt"),
|
||||
passwordPrompt: t("passwordPrompt"),
|
||||
authenticating: t("authenticating"),
|
||||
processing: t("processing"),
|
||||
accessGranted: t("accessGranted"),
|
||||
welcomeMessage: t("welcomeMessage"),
|
||||
authFailed: t("authFailed"),
|
||||
invalidCredentials: t("invalidCredentials"),
|
||||
shortcuts: t("shortcuts"),
|
||||
submit: t("submit"),
|
||||
cancel: t("cancel"),
|
||||
clear: t("clear"),
|
||||
startEnd: t("startEnd"),
|
||||
}), [t])
|
||||
|
||||
// If already logged in, warm up the dashboard, then redirect.
|
||||
React.useEffect(() => {
|
||||
if (authLoading) return
|
||||
if (!auth?.authenticated) return
|
||||
if (loginStartedRef.current) return
|
||||
|
||||
let cancelled = false
|
||||
|
||||
void (async () => {
|
||||
await prefetchDashboardData()
|
||||
|
||||
if (cancelled) return
|
||||
router.replace("/dashboard/")
|
||||
})()
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [auth?.authenticated, authLoading, prefetchDashboardData, router])
|
||||
|
||||
React.useEffect(() => {
|
||||
if (!loginReady) return
|
||||
router.replace("/dashboard/")
|
||||
}, [loginReady, router])
|
||||
|
||||
const handleLogin = async (username: string, password: string) => {
|
||||
loginStartedRef.current = true
|
||||
setLoginReady(false)
|
||||
|
||||
// 并行执行独立操作:登录验证 + 预加载 dashboard bundle
|
||||
const [loginRes] = await Promise.all([
|
||||
login({ username, password }),
|
||||
router.prefetch("/dashboard/"),
|
||||
])
|
||||
|
||||
// 预加载 dashboard 数据
|
||||
await prefetchDashboardData()
|
||||
|
||||
// Prime auth cache so AuthLayout doesn't flash a full-screen loading state.
|
||||
queryClient.setQueryData(["auth", "me"], {
|
||||
authenticated: true,
|
||||
user: loginRes.user,
|
||||
})
|
||||
|
||||
setLoginReady(true)
|
||||
}
|
||||
|
||||
// Show spinner while loading
|
||||
if (authLoading) {
|
||||
return (
|
||||
<div className="flex min-h-svh w-full flex-col items-center justify-center gap-4 bg-background">
|
||||
<Spinner className="size-8 text-primary" />
|
||||
<p className="text-muted-foreground text-sm" suppressHydrationWarning>loading...</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Don't show login page if already logged in
|
||||
if (auth?.authenticated) {
|
||||
return null
|
||||
}
|
||||
const loginVisible = bootPhase === "leaving" || bootPhase === "hidden"
|
||||
|
||||
return (
|
||||
<div className="login-bg flex min-h-svh flex-col p-6 md:p-10">
|
||||
{/* Main content area */}
|
||||
<div className="flex-1 flex items-center justify-center">
|
||||
<div className="w-full max-w-sm md:max-w-4xl">
|
||||
<Card className="overflow-hidden p-0">
|
||||
<CardContent className="grid p-0 md:grid-cols-2">
|
||||
<form className="p-6 md:p-8" onSubmit={handleSubmit}>
|
||||
<FieldGroup>
|
||||
{/* Fingerprint identifier - for FOFA/Shodan and other search engines to identify */}
|
||||
<meta name="generator" content="Xingrin ASM Platform" />
|
||||
<div className="flex flex-col items-center gap-2 text-center">
|
||||
<h1 className="text-2xl font-bold">{t("title")}</h1>
|
||||
<p className="text-sm text-muted-foreground mt-1">
|
||||
{t("subtitle")}
|
||||
</p>
|
||||
</div>
|
||||
<Field>
|
||||
<FieldLabel htmlFor="username">{t("username")}</FieldLabel>
|
||||
<Input
|
||||
id="username"
|
||||
type="text"
|
||||
placeholder={t("usernamePlaceholder")}
|
||||
value={username}
|
||||
onChange={(e) => setUsername(e.target.value)}
|
||||
required
|
||||
autoFocus
|
||||
/>
|
||||
</Field>
|
||||
<Field>
|
||||
<FieldLabel htmlFor="password">{t("password")}</FieldLabel>
|
||||
<Input
|
||||
id="password"
|
||||
type="password"
|
||||
placeholder={t("passwordPlaceholder")}
|
||||
value={password}
|
||||
onChange={(e) => setPassword(e.target.value)}
|
||||
required
|
||||
/>
|
||||
</Field>
|
||||
<Field>
|
||||
<Button type="submit" className="w-full" disabled={isPending}>
|
||||
{isPending ? t("loggingIn") : t("login")}
|
||||
</Button>
|
||||
</Field>
|
||||
</FieldGroup>
|
||||
</form>
|
||||
<div className="bg-primary/5 relative hidden md:flex md:items-center md:justify-center">
|
||||
<div className="text-center p-4">
|
||||
<Lottie
|
||||
animationData={securityAnimation}
|
||||
loop={true}
|
||||
className="w-96 h-96 mx-auto"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</div>
|
||||
<div className="relative flex min-h-svh flex-col bg-black">
|
||||
<div className={`fixed inset-0 z-0 transition-opacity duration-300 ${loginVisible ? "opacity-100" : "opacity-0"}`}>
|
||||
<PixelBlast
|
||||
onFirstFrame={handlePixelFirstFrame}
|
||||
className=""
|
||||
style={{}}
|
||||
pixelSize={6.5}
|
||||
patternScale={4.5}
|
||||
color="#FF10F0"
|
||||
speed={0.35}
|
||||
enableRipples={false}
|
||||
/>
|
||||
</div>
|
||||
|
||||
|
||||
{/* Fingerprint identifier - for FOFA/Shodan and other search engines to identify */}
|
||||
<meta name="generator" content="Orbit ASM Platform" />
|
||||
|
||||
{/* Main content area */}
|
||||
<div
|
||||
className={`relative z-10 flex-1 flex items-center justify-center p-6 transition-[opacity,transform] duration-300 ${
|
||||
loginVisible ? "opacity-100 translate-y-0" : "opacity-0 translate-y-2"
|
||||
}`}
|
||||
>
|
||||
<TerminalLogin
|
||||
onLogin={handleLogin}
|
||||
authDone={loginReady}
|
||||
isPending={isPending}
|
||||
translations={translations}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Version number - fixed at the bottom of the page */}
|
||||
<div className="flex-shrink-0 text-center py-4">
|
||||
<div
|
||||
className={`relative z-10 flex-shrink-0 text-center py-4 transition-opacity duration-300 ${
|
||||
loginVisible ? "opacity-100" : "opacity-0"
|
||||
}`}
|
||||
>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{process.env.NEXT_PUBLIC_VERSION || 'dev'}
|
||||
{process.env.NEXT_PUBLIC_VERSION || "dev"}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Full-page splash overlay */}
|
||||
{bootPhase !== "hidden" && (
|
||||
<div
|
||||
className={`fixed inset-0 z-50 transition-opacity ease-out ${
|
||||
bootPhase === "visible" ? "opacity-100" : "opacity-0 pointer-events-none"
|
||||
}`}
|
||||
style={{ transitionDuration: `${BOOT_FADE_MS}ms` }}
|
||||
>
|
||||
<LoginBootScreen />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ const FEATURE_LIST = [
|
||||
{ key: "site_scan" },
|
||||
{ key: "fingerprint_detect" },
|
||||
{ key: "directory_scan" },
|
||||
{ key: "screenshot" },
|
||||
{ key: "url_fetch" },
|
||||
{ key: "vuln_scan" },
|
||||
] as const
|
||||
@@ -48,6 +49,7 @@ function parseEngineFeatures(engine: ScanEngine): Record<FeatureKey, boolean> {
|
||||
site_scan: false,
|
||||
fingerprint_detect: false,
|
||||
directory_scan: false,
|
||||
screenshot: false,
|
||||
url_fetch: false,
|
||||
vuln_scan: false,
|
||||
}
|
||||
@@ -64,6 +66,7 @@ function parseEngineFeatures(engine: ScanEngine): Record<FeatureKey, boolean> {
|
||||
site_scan: !!config.site_scan,
|
||||
fingerprint_detect: !!config.fingerprint_detect,
|
||||
directory_scan: !!config.directory_scan,
|
||||
screenshot: !!config.screenshot,
|
||||
url_fetch: !!config.url_fetch,
|
||||
vuln_scan: !!config.vuln_scan,
|
||||
}
|
||||
|
||||
@@ -3,9 +3,10 @@
|
||||
import React from "react"
|
||||
import { usePathname, useParams } from "next/navigation"
|
||||
import Link from "next/link"
|
||||
import { Target } from "lucide-react"
|
||||
import { Target, LayoutDashboard, Package, FolderSearch, Image, ShieldAlert } from "lucide-react"
|
||||
import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { useScan } from "@/hooks/use-scans"
|
||||
import { useTranslations } from "next-intl"
|
||||
|
||||
@@ -19,94 +20,124 @@ export default function ScanHistoryLayout({
|
||||
const { data: scanData, isLoading } = useScan(parseInt(id))
|
||||
const t = useTranslations("scan.history")
|
||||
|
||||
const getActiveTab = () => {
|
||||
if (pathname.includes("/subdomain")) return "subdomain"
|
||||
if (pathname.includes("/endpoints")) return "endpoints"
|
||||
if (pathname.includes("/websites")) return "websites"
|
||||
// Get primary navigation active tab
|
||||
const getPrimaryTab = () => {
|
||||
if (pathname.includes("/overview")) return "overview"
|
||||
if (pathname.includes("/directories")) return "directories"
|
||||
if (pathname.includes("/screenshots")) return "screenshots"
|
||||
if (pathname.includes("/vulnerabilities")) return "vulnerabilities"
|
||||
if (pathname.includes("/ip-addresses")) return "ip-addresses"
|
||||
return ""
|
||||
// All asset pages fall under "assets"
|
||||
if (
|
||||
pathname.includes("/websites") ||
|
||||
pathname.includes("/subdomain") ||
|
||||
pathname.includes("/ip-addresses") ||
|
||||
pathname.includes("/endpoints")
|
||||
) {
|
||||
return "assets"
|
||||
}
|
||||
return "overview"
|
||||
}
|
||||
|
||||
// Get secondary navigation active tab (for assets)
|
||||
const getSecondaryTab = () => {
|
||||
if (pathname.includes("/websites")) return "websites"
|
||||
if (pathname.includes("/subdomain")) return "subdomain"
|
||||
if (pathname.includes("/ip-addresses")) return "ip-addresses"
|
||||
if (pathname.includes("/endpoints")) return "endpoints"
|
||||
return "websites"
|
||||
}
|
||||
|
||||
// Check if we should show secondary navigation
|
||||
const showSecondaryNav = getPrimaryTab() === "assets"
|
||||
|
||||
const basePath = `/scan/history/${id}`
|
||||
const tabPaths = {
|
||||
subdomain: `${basePath}/subdomain/`,
|
||||
endpoints: `${basePath}/endpoints/`,
|
||||
websites: `${basePath}/websites/`,
|
||||
const primaryPaths = {
|
||||
overview: `${basePath}/overview/`,
|
||||
assets: `${basePath}/websites/`, // Default to websites when clicking assets
|
||||
directories: `${basePath}/directories/`,
|
||||
screenshots: `${basePath}/screenshots/`,
|
||||
vulnerabilities: `${basePath}/vulnerabilities/`,
|
||||
}
|
||||
|
||||
const secondaryPaths = {
|
||||
websites: `${basePath}/websites/`,
|
||||
subdomain: `${basePath}/subdomain/`,
|
||||
"ip-addresses": `${basePath}/ip-addresses/`,
|
||||
endpoints: `${basePath}/endpoints/`,
|
||||
}
|
||||
|
||||
// Get counts for each tab from scan data
|
||||
const stats = scanData?.cachedStats
|
||||
const counts = {
|
||||
subdomain: scanData?.summary?.subdomains || 0,
|
||||
endpoints: scanData?.summary?.endpoints || 0,
|
||||
websites: scanData?.summary?.websites || 0,
|
||||
directories: scanData?.summary?.directories || 0,
|
||||
vulnerabilities: scanData?.summary?.vulnerabilities?.total || 0,
|
||||
"ip-addresses": scanData?.summary?.ips || 0,
|
||||
subdomain: stats?.subdomainsCount || 0,
|
||||
endpoints: stats?.endpointsCount || 0,
|
||||
websites: stats?.websitesCount || 0,
|
||||
directories: stats?.directoriesCount || 0,
|
||||
screenshots: stats?.screenshotsCount || 0,
|
||||
vulnerabilities: stats?.vulnsTotal || 0,
|
||||
"ip-addresses": stats?.ipsCount || 0,
|
||||
}
|
||||
|
||||
// Calculate total assets count
|
||||
const totalAssets = counts.websites + counts.subdomain + counts["ip-addresses"] + counts.endpoints
|
||||
|
||||
// Loading state
|
||||
if (isLoading) {
|
||||
return (
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
|
||||
{/* Header skeleton */}
|
||||
<div className="flex items-center gap-2 px-4 lg:px-6">
|
||||
<Skeleton className="h-4 w-16" />
|
||||
<span className="text-muted-foreground">/</span>
|
||||
<Skeleton className="h-4 w-32" />
|
||||
</div>
|
||||
{/* Tabs skeleton */}
|
||||
<div className="flex gap-1 px-4 lg:px-6">
|
||||
<Skeleton className="h-9 w-20" />
|
||||
<Skeleton className="h-9 w-20" />
|
||||
<Skeleton className="h-9 w-24" />
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
|
||||
<div className="flex items-center justify-between px-4 lg:px-6">
|
||||
<div>
|
||||
<h2 className="text-2xl font-bold tracking-tight flex items-center gap-2">
|
||||
<Target />
|
||||
Scan Results
|
||||
</h2>
|
||||
<p className="text-muted-foreground">{t("taskId", { id })}</p>
|
||||
</div>
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6 h-full">
|
||||
{/* Header: Page label + Scan info */}
|
||||
<div className="flex items-center gap-2 text-sm px-4 lg:px-6">
|
||||
<span className="text-muted-foreground">{t("breadcrumb.scanHistory")}</span>
|
||||
<span className="text-muted-foreground">/</span>
|
||||
<span className="font-medium flex items-center gap-1.5">
|
||||
<Target className="h-4 w-4" />
|
||||
{(scanData?.target as any)?.name || t("taskId", { id })}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center justify-between px-4 lg:px-6">
|
||||
<Tabs value={getActiveTab()} className="w-full">
|
||||
{/* Primary navigation */}
|
||||
<div className="px-4 lg:px-6">
|
||||
<Tabs value={getPrimaryTab()}>
|
||||
<TabsList>
|
||||
<TabsTrigger value="websites" asChild>
|
||||
<Link href={tabPaths.websites} className="flex items-center gap-0.5">
|
||||
Websites
|
||||
{counts.websites > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.websites}
|
||||
</Badge>
|
||||
)}
|
||||
<TabsTrigger value="overview" asChild>
|
||||
<Link href={primaryPaths.overview} className="flex items-center gap-1.5">
|
||||
<LayoutDashboard className="h-4 w-4" />
|
||||
{t("tabs.overview")}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="subdomain" asChild>
|
||||
<Link href={tabPaths.subdomain} className="flex items-center gap-0.5">
|
||||
Subdomains
|
||||
{counts.subdomain > 0 && (
|
||||
<TabsTrigger value="assets" asChild>
|
||||
<Link href={primaryPaths.assets} className="flex items-center gap-1.5">
|
||||
<Package className="h-4 w-4" />
|
||||
{t("tabs.assets")}
|
||||
{totalAssets > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.subdomain}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="ip-addresses" asChild>
|
||||
<Link href={tabPaths["ip-addresses"]} className="flex items-center gap-0.5">
|
||||
IP Addresses
|
||||
{counts["ip-addresses"] > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts["ip-addresses"]}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="endpoints" asChild>
|
||||
<Link href={tabPaths.endpoints} className="flex items-center gap-0.5">
|
||||
URLs
|
||||
{counts.endpoints > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.endpoints}
|
||||
{totalAssets}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="directories" asChild>
|
||||
<Link href={tabPaths.directories} className="flex items-center gap-0.5">
|
||||
Directories
|
||||
<Link href={primaryPaths.directories} className="flex items-center gap-1.5">
|
||||
<FolderSearch className="h-4 w-4" />
|
||||
{t("tabs.directories")}
|
||||
{counts.directories > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.directories}
|
||||
@@ -114,9 +145,21 @@ export default function ScanHistoryLayout({
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="screenshots" asChild>
|
||||
<Link href={primaryPaths.screenshots} className="flex items-center gap-1.5">
|
||||
<Image className="h-4 w-4" />
|
||||
{t("tabs.screenshots")}
|
||||
{counts.screenshots > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.screenshots}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="vulnerabilities" asChild>
|
||||
<Link href={tabPaths.vulnerabilities} className="flex items-center gap-0.5">
|
||||
Vulnerabilities
|
||||
<Link href={primaryPaths.vulnerabilities} className="flex items-center gap-1.5">
|
||||
<ShieldAlert className="h-4 w-4" />
|
||||
{t("tabs.vulnerabilities")}
|
||||
{counts.vulnerabilities > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.vulnerabilities}
|
||||
@@ -128,6 +171,57 @@ export default function ScanHistoryLayout({
|
||||
</Tabs>
|
||||
</div>
|
||||
|
||||
{/* Secondary navigation (only for assets) */}
|
||||
{showSecondaryNav && (
|
||||
<div className="flex items-center px-4 lg:px-6">
|
||||
<Tabs value={getSecondaryTab()} className="w-full">
|
||||
<TabsList variant="underline">
|
||||
<TabsTrigger value="websites" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.websites} className="flex items-center gap-0.5">
|
||||
{t("tabs.websites")}
|
||||
{counts.websites > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.websites}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="subdomain" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.subdomain} className="flex items-center gap-0.5">
|
||||
{t("tabs.subdomains")}
|
||||
{counts.subdomain > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.subdomain}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="ip-addresses" variant="underline" asChild>
|
||||
<Link href={secondaryPaths["ip-addresses"]} className="flex items-center gap-0.5">
|
||||
{t("tabs.ips")}
|
||||
{counts["ip-addresses"] > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts["ip-addresses"]}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="endpoints" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.endpoints} className="flex items-center gap-0.5">
|
||||
{t("tabs.urls")}
|
||||
{counts.endpoints > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.endpoints}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
</Tabs>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Sub-page content */}
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
|
||||
19
frontend/app/[locale]/scan/history/[id]/overview/page.tsx
Normal file
19
frontend/app/[locale]/scan/history/[id]/overview/page.tsx
Normal file
@@ -0,0 +1,19 @@
|
||||
"use client"
|
||||
|
||||
import { useParams } from "next/navigation"
|
||||
import { ScanOverview } from "@/components/scan/history/scan-overview"
|
||||
|
||||
/**
|
||||
* Scan overview page
|
||||
* Displays scan statistics and summary information
|
||||
*/
|
||||
export default function ScanOverviewPage() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const scanId = Number(id)
|
||||
|
||||
return (
|
||||
<div className="flex-1 flex flex-col min-h-0 px-4 lg:px-6">
|
||||
<ScanOverview scanId={scanId} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -8,7 +8,7 @@ export default function ScanHistoryDetailPage() {
|
||||
const router = useRouter()
|
||||
|
||||
useEffect(() => {
|
||||
router.replace(`/scan/history/${id}/websites/`)
|
||||
router.replace(`/scan/history/${id}/overview/`)
|
||||
}, [id, router])
|
||||
|
||||
return null
|
||||
|
||||
15
frontend/app/[locale]/scan/history/[id]/screenshots/page.tsx
Normal file
15
frontend/app/[locale]/scan/history/[id]/screenshots/page.tsx
Normal file
@@ -0,0 +1,15 @@
|
||||
"use client"
|
||||
|
||||
import { useParams } from "next/navigation"
|
||||
import { ScreenshotsGallery } from "@/components/screenshots/screenshots-gallery"
|
||||
|
||||
export default function ScanScreenshotsPage() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const scanId = Number(id)
|
||||
|
||||
return (
|
||||
<div className="px-4 lg:px-6">
|
||||
<ScreenshotsGallery scanId={scanId} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
306
frontend/app/[locale]/settings/api-keys/page.tsx
Normal file
306
frontend/app/[locale]/settings/api-keys/page.tsx
Normal file
@@ -0,0 +1,306 @@
|
||||
"use client"
|
||||
|
||||
import React, { useState, useEffect } from 'react'
|
||||
import { IconEye, IconEyeOff, IconWorldSearch, IconRadar2 } from '@tabler/icons-react'
|
||||
|
||||
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Input } from '@/components/ui/input'
|
||||
import { Switch } from '@/components/ui/switch'
|
||||
import { Separator } from '@/components/ui/separator'
|
||||
import { Badge } from '@/components/ui/badge'
|
||||
import { Skeleton } from '@/components/ui/skeleton'
|
||||
import { useApiKeySettings, useUpdateApiKeySettings } from '@/hooks/use-api-key-settings'
|
||||
import type { ApiKeySettings } from '@/types/api-key-settings.types'
|
||||
|
||||
// 密码输入框组件(带显示/隐藏切换)
|
||||
function PasswordInput({ value, onChange, placeholder, disabled }: {
|
||||
value: string
|
||||
onChange: (value: string) => void
|
||||
placeholder?: string
|
||||
disabled?: boolean
|
||||
}) {
|
||||
const [show, setShow] = useState(false)
|
||||
return (
|
||||
<div className="relative">
|
||||
<Input
|
||||
type={show ? 'text' : 'password'}
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
placeholder={placeholder}
|
||||
disabled={disabled}
|
||||
className="pr-10"
|
||||
/>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => setShow(!show)}
|
||||
className="absolute right-3 top-1/2 -translate-y-1/2 text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
{show ? <IconEyeOff className="h-4 w-4" /> : <IconEye className="h-4 w-4" />}
|
||||
</button>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Provider 配置定义
|
||||
const PROVIDERS = [
|
||||
{
|
||||
key: 'fofa',
|
||||
name: 'FOFA',
|
||||
description: '网络空间测绘平台,提供全球互联网资产搜索',
|
||||
icon: IconWorldSearch,
|
||||
color: 'text-blue-500',
|
||||
bgColor: 'bg-blue-500/10',
|
||||
fields: [
|
||||
{ name: 'email', label: '邮箱', type: 'text', placeholder: 'your@email.com' },
|
||||
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 FOFA API Key' },
|
||||
],
|
||||
docUrl: 'https://fofa.info/api',
|
||||
},
|
||||
{
|
||||
key: 'hunter',
|
||||
name: 'Hunter (鹰图)',
|
||||
description: '奇安信威胁情报平台,提供网络空间资产测绘',
|
||||
icon: IconRadar2,
|
||||
color: 'text-orange-500',
|
||||
bgColor: 'bg-orange-500/10',
|
||||
fields: [
|
||||
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 Hunter API Key' },
|
||||
],
|
||||
docUrl: 'https://hunter.qianxin.com/',
|
||||
},
|
||||
{
|
||||
key: 'shodan',
|
||||
name: 'Shodan',
|
||||
description: '全球最大的互联网设备搜索引擎',
|
||||
icon: IconWorldSearch,
|
||||
color: 'text-red-500',
|
||||
bgColor: 'bg-red-500/10',
|
||||
fields: [
|
||||
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 Shodan API Key' },
|
||||
],
|
||||
docUrl: 'https://developer.shodan.io/',
|
||||
},
|
||||
{
|
||||
key: 'censys',
|
||||
name: 'Censys',
|
||||
description: '互联网资产搜索和监控平台',
|
||||
icon: IconWorldSearch,
|
||||
color: 'text-purple-500',
|
||||
bgColor: 'bg-purple-500/10',
|
||||
fields: [
|
||||
{ name: 'apiId', label: 'API ID', type: 'text', placeholder: '输入 Censys API ID' },
|
||||
{ name: 'apiSecret', label: 'API Secret', type: 'password', placeholder: '输入 Censys API Secret' },
|
||||
],
|
||||
docUrl: 'https://search.censys.io/api',
|
||||
},
|
||||
{
|
||||
key: 'zoomeye',
|
||||
name: 'ZoomEye (钟馗之眼)',
|
||||
description: '知道创宇网络空间搜索引擎',
|
||||
icon: IconWorldSearch,
|
||||
color: 'text-green-500',
|
||||
bgColor: 'bg-green-500/10',
|
||||
fields: [
|
||||
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 ZoomEye API Key' },
|
||||
],
|
||||
docUrl: 'https://www.zoomeye.org/doc',
|
||||
},
|
||||
{
|
||||
key: 'securitytrails',
|
||||
name: 'SecurityTrails',
|
||||
description: 'DNS 历史记录和子域名数据平台',
|
||||
icon: IconWorldSearch,
|
||||
color: 'text-cyan-500',
|
||||
bgColor: 'bg-cyan-500/10',
|
||||
fields: [
|
||||
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 SecurityTrails API Key' },
|
||||
],
|
||||
docUrl: 'https://securitytrails.com/corp/api',
|
||||
},
|
||||
{
|
||||
key: 'threatbook',
|
||||
name: 'ThreatBook (微步在线)',
|
||||
description: '威胁情报平台,提供域名和 IP 情报查询',
|
||||
icon: IconWorldSearch,
|
||||
color: 'text-indigo-500',
|
||||
bgColor: 'bg-indigo-500/10',
|
||||
fields: [
|
||||
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 ThreatBook API Key' },
|
||||
],
|
||||
docUrl: 'https://x.threatbook.com/api',
|
||||
},
|
||||
{
|
||||
key: 'quake',
|
||||
name: 'Quake (360)',
|
||||
description: '360 网络空间测绘系统',
|
||||
icon: IconWorldSearch,
|
||||
color: 'text-teal-500',
|
||||
bgColor: 'bg-teal-500/10',
|
||||
fields: [
|
||||
{ name: 'apiKey', label: 'API Key', type: 'password', placeholder: '输入 Quake API Key' },
|
||||
],
|
||||
docUrl: 'https://quake.360.net/quake/#/help',
|
||||
},
|
||||
]
|
||||
|
||||
// 默认配置
|
||||
const DEFAULT_SETTINGS: ApiKeySettings = {
|
||||
fofa: { enabled: false, email: '', apiKey: '' },
|
||||
hunter: { enabled: false, apiKey: '' },
|
||||
shodan: { enabled: false, apiKey: '' },
|
||||
censys: { enabled: false, apiId: '', apiSecret: '' },
|
||||
zoomeye: { enabled: false, apiKey: '' },
|
||||
securitytrails: { enabled: false, apiKey: '' },
|
||||
threatbook: { enabled: false, apiKey: '' },
|
||||
quake: { enabled: false, apiKey: '' },
|
||||
}
|
||||
|
||||
export default function ApiKeysSettingsPage() {
|
||||
const { data: settings, isLoading } = useApiKeySettings()
|
||||
const updateMutation = useUpdateApiKeySettings()
|
||||
|
||||
const [formData, setFormData] = useState<ApiKeySettings>(DEFAULT_SETTINGS)
|
||||
const [hasChanges, setHasChanges] = useState(false)
|
||||
|
||||
// 当数据加载完成后,更新表单数据
|
||||
useEffect(() => {
|
||||
if (settings) {
|
||||
setFormData({ ...DEFAULT_SETTINGS, ...settings })
|
||||
setHasChanges(false)
|
||||
}
|
||||
}, [settings])
|
||||
|
||||
const updateProvider = (providerKey: string, field: string, value: any) => {
|
||||
setFormData(prev => ({
|
||||
...prev,
|
||||
[providerKey]: {
|
||||
...prev[providerKey as keyof ApiKeySettings],
|
||||
[field]: value,
|
||||
}
|
||||
}))
|
||||
setHasChanges(true)
|
||||
}
|
||||
|
||||
const handleSave = async () => {
|
||||
updateMutation.mutate(formData)
|
||||
setHasChanges(false)
|
||||
}
|
||||
|
||||
const enabledCount = Object.values(formData).filter((p: any) => p?.enabled).length
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<div className="p-4 md:p-6 space-y-6">
|
||||
<div>
|
||||
<Skeleton className="h-8 w-48" />
|
||||
<Skeleton className="h-4 w-96 mt-2" />
|
||||
</div>
|
||||
<div className="grid gap-4">
|
||||
{[1, 2, 3].map((i) => (
|
||||
<Skeleton key={i} className="h-24 w-full" />
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="p-4 md:p-6 space-y-6">
|
||||
{/* 页面标题 */}
|
||||
<div>
|
||||
<div className="flex items-center gap-2">
|
||||
<h1 className="text-2xl font-semibold">API 密钥配置</h1>
|
||||
{enabledCount > 0 && (
|
||||
<Badge variant="secondary">{enabledCount} 个已启用</Badge>
|
||||
)}
|
||||
</div>
|
||||
<p className="text-muted-foreground mt-1">
|
||||
配置第三方数据源的 API 密钥,用于增强子域名发现能力。启用后将在 subfinder 扫描时自动使用。
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Provider 卡片列表 */}
|
||||
<div className="grid gap-4">
|
||||
{PROVIDERS.map((provider) => {
|
||||
const data = formData[provider.key as keyof ApiKeySettings] || {}
|
||||
const isEnabled = (data as any)?.enabled || false
|
||||
|
||||
return (
|
||||
<Card key={provider.key}>
|
||||
<CardHeader className="pb-4">
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-3">
|
||||
<div className={`flex h-10 w-10 items-center justify-center rounded-lg ${provider.bgColor}`}>
|
||||
<provider.icon className={`h-5 w-5 ${provider.color}`} />
|
||||
</div>
|
||||
<div>
|
||||
<div className="flex items-center gap-2">
|
||||
<CardTitle className="text-base">{provider.name}</CardTitle>
|
||||
{isEnabled && <Badge variant="outline" className="text-xs text-green-600">已启用</Badge>}
|
||||
</div>
|
||||
<CardDescription>{provider.description}</CardDescription>
|
||||
</div>
|
||||
</div>
|
||||
<Switch
|
||||
checked={isEnabled}
|
||||
onCheckedChange={(checked) => updateProvider(provider.key, 'enabled', checked)}
|
||||
/>
|
||||
</div>
|
||||
</CardHeader>
|
||||
|
||||
{/* 展开的配置表单 */}
|
||||
{isEnabled && (
|
||||
<CardContent className="pt-0">
|
||||
<Separator className="mb-4" />
|
||||
<div className="space-y-4">
|
||||
{provider.fields.map((field) => (
|
||||
<div key={field.name} className="space-y-2">
|
||||
<label className="text-sm font-medium">{field.label}</label>
|
||||
{field.type === 'password' ? (
|
||||
<PasswordInput
|
||||
value={(data as any)[field.name] || ''}
|
||||
onChange={(value) => updateProvider(provider.key, field.name, value)}
|
||||
placeholder={field.placeholder}
|
||||
/>
|
||||
) : (
|
||||
<Input
|
||||
type="text"
|
||||
value={(data as any)[field.name] || ''}
|
||||
onChange={(e) => updateProvider(provider.key, field.name, e.target.value)}
|
||||
placeholder={field.placeholder}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
<p className="text-xs text-muted-foreground">
|
||||
获取 API Key:
|
||||
<a
|
||||
href={provider.docUrl}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="text-primary hover:underline ml-1"
|
||||
>
|
||||
{provider.docUrl}
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
</CardContent>
|
||||
)}
|
||||
</Card>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
|
||||
{/* 保存按钮 */}
|
||||
<div className="flex justify-end">
|
||||
<Button
|
||||
onClick={handleSave}
|
||||
disabled={updateMutation.isPending || !hasChanges}
|
||||
>
|
||||
{updateMutation.isPending ? '保存中...' : '保存配置'}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
132
frontend/app/[locale]/settings/blacklist/page.tsx
Normal file
132
frontend/app/[locale]/settings/blacklist/page.tsx
Normal file
@@ -0,0 +1,132 @@
|
||||
"use client"
|
||||
|
||||
import React, { useState, useEffect } from "react"
|
||||
import { useTranslations } from "next-intl"
|
||||
import { AlertTriangle, Loader2, Ban } from "lucide-react"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Textarea } from "@/components/ui/textarea"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"
|
||||
import { useGlobalBlacklist, useUpdateGlobalBlacklist } from "@/hooks/use-global-blacklist"
|
||||
|
||||
/**
|
||||
* Global blacklist settings page
|
||||
*/
|
||||
export default function GlobalBlacklistPage() {
|
||||
const t = useTranslations("pages.settings.blacklist")
|
||||
|
||||
const [blacklistText, setBlacklistText] = useState("")
|
||||
const [hasChanges, setHasChanges] = useState(false)
|
||||
|
||||
const { data, isLoading, error } = useGlobalBlacklist()
|
||||
const updateBlacklist = useUpdateGlobalBlacklist()
|
||||
|
||||
// Initialize text when data loads
|
||||
useEffect(() => {
|
||||
if (data?.patterns) {
|
||||
setBlacklistText(data.patterns.join("\n"))
|
||||
setHasChanges(false)
|
||||
}
|
||||
}, [data])
|
||||
|
||||
// Handle text change
|
||||
const handleTextChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => {
|
||||
setBlacklistText(e.target.value)
|
||||
setHasChanges(true)
|
||||
}
|
||||
|
||||
// Handle save
|
||||
const handleSave = () => {
|
||||
const patterns = blacklistText
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.length > 0)
|
||||
|
||||
updateBlacklist.mutate(
|
||||
{ patterns },
|
||||
{
|
||||
onSuccess: () => {
|
||||
setHasChanges(false)
|
||||
},
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<div className="flex flex-1 flex-col gap-4 p-4">
|
||||
<div className="space-y-2">
|
||||
<Skeleton className="h-8 w-48" />
|
||||
<Skeleton className="h-4 w-96" />
|
||||
</div>
|
||||
<Skeleton className="h-[400px] w-full" />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return (
|
||||
<div className="flex flex-1 flex-col items-center justify-center py-12">
|
||||
<AlertTriangle className="h-10 w-10 text-destructive mb-4" />
|
||||
<p className="text-muted-foreground">{t("loadError")}</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-1 flex-col gap-4 p-4">
|
||||
{/* Page header */}
|
||||
<div>
|
||||
<h1 className="text-2xl font-bold">{t("title")}</h1>
|
||||
<p className="text-muted-foreground">{t("description")}</p>
|
||||
</div>
|
||||
|
||||
{/* Blacklist card */}
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<div className="flex items-center gap-2">
|
||||
<Ban className="h-5 w-5 text-muted-foreground" />
|
||||
<CardTitle>{t("card.title")}</CardTitle>
|
||||
</div>
|
||||
<CardDescription>{t("card.description")}</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent className="space-y-4">
|
||||
{/* Rules hint */}
|
||||
<div className="flex flex-wrap items-center gap-x-4 gap-y-2 text-sm text-muted-foreground">
|
||||
<span className="font-medium text-foreground">{t("rules.title")}:</span>
|
||||
<span><code className="bg-muted px-1.5 py-0.5 rounded text-xs">*.gov</code> {t("rules.domain")}</span>
|
||||
<span><code className="bg-muted px-1.5 py-0.5 rounded text-xs">*cdn*</code> {t("rules.keyword")}</span>
|
||||
<span><code className="bg-muted px-1.5 py-0.5 rounded text-xs">192.168.1.1</code> {t("rules.ip")}</span>
|
||||
<span><code className="bg-muted px-1.5 py-0.5 rounded text-xs">10.0.0.0/8</code> {t("rules.cidr")}</span>
|
||||
</div>
|
||||
|
||||
{/* Scope hint */}
|
||||
<div className="rounded-lg border bg-muted/50 p-3 text-sm">
|
||||
<p className="text-muted-foreground">{t("scopeHint")}</p>
|
||||
</div>
|
||||
|
||||
{/* Input */}
|
||||
<Textarea
|
||||
value={blacklistText}
|
||||
onChange={handleTextChange}
|
||||
placeholder={t("placeholder")}
|
||||
className="min-h-[320px] font-mono text-sm"
|
||||
/>
|
||||
|
||||
{/* Save button */}
|
||||
<div className="flex justify-end">
|
||||
<Button
|
||||
onClick={handleSave}
|
||||
disabled={!hasChanges || updateBlacklist.isPending}
|
||||
>
|
||||
{updateBlacklist.isPending && (
|
||||
<Loader2 className="mr-2 h-4 w-4 animate-spin" />
|
||||
)}
|
||||
{t("save")}
|
||||
</Button>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -29,6 +29,10 @@ export default function NotificationSettingsPage() {
|
||||
enabled: z.boolean(),
|
||||
webhookUrl: z.string().url(t("discord.urlInvalid")).or(z.literal('')),
|
||||
}),
|
||||
wecom: z.object({
|
||||
enabled: z.boolean(),
|
||||
webhookUrl: z.string().url(t("wecom.urlInvalid")).or(z.literal('')),
|
||||
}),
|
||||
categories: z.object({
|
||||
scan: z.boolean(),
|
||||
vulnerability: z.boolean(),
|
||||
@@ -46,6 +50,15 @@ export default function NotificationSettingsPage() {
|
||||
})
|
||||
}
|
||||
}
|
||||
if (val.wecom.enabled) {
|
||||
if (!val.wecom.webhookUrl || val.wecom.webhookUrl.trim() === '') {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
message: t("wecom.requiredError"),
|
||||
path: ['wecom', 'webhookUrl'],
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const NOTIFICATION_CATEGORIES = [
|
||||
@@ -79,6 +92,7 @@ export default function NotificationSettingsPage() {
|
||||
resolver: zodResolver(schema),
|
||||
values: data ?? {
|
||||
discord: { enabled: false, webhookUrl: '' },
|
||||
wecom: { enabled: false, webhookUrl: '' },
|
||||
categories: {
|
||||
scan: true,
|
||||
vulnerability: true,
|
||||
@@ -93,6 +107,7 @@ export default function NotificationSettingsPage() {
|
||||
}
|
||||
|
||||
const discordEnabled = form.watch('discord.enabled')
|
||||
const wecomEnabled = form.watch('wecom.enabled')
|
||||
|
||||
return (
|
||||
<div className="p-4 md:p-6 space-y-6">
|
||||
@@ -187,25 +202,59 @@ export default function NotificationSettingsPage() {
|
||||
</CardHeader>
|
||||
</Card>
|
||||
|
||||
{/* Feishu/DingTalk/WeCom - Coming soon */}
|
||||
<Card className="opacity-60">
|
||||
{/* 企业微信 */}
|
||||
<Card>
|
||||
<CardHeader className="pb-4">
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="flex h-10 w-10 items-center justify-center rounded-lg bg-muted">
|
||||
<IconBrandSlack className="h-5 w-5 text-muted-foreground" />
|
||||
<div className="flex h-10 w-10 items-center justify-center rounded-lg bg-[#07C160]/10">
|
||||
<IconBrandSlack className="h-5 w-5 text-[#07C160]" />
|
||||
</div>
|
||||
<div>
|
||||
<div className="flex items-center gap-2">
|
||||
<CardTitle className="text-base">{t("enterprise.title")}</CardTitle>
|
||||
<Badge variant="secondary" className="text-xs">{t("emailChannel.comingSoon")}</Badge>
|
||||
</div>
|
||||
<CardDescription>{t("enterprise.description")}</CardDescription>
|
||||
<CardTitle className="text-base">{t("wecom.title")}</CardTitle>
|
||||
<CardDescription>{t("wecom.description")}</CardDescription>
|
||||
</div>
|
||||
</div>
|
||||
<Switch disabled />
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="wecom.enabled"
|
||||
render={({ field }) => (
|
||||
<FormControl>
|
||||
<Switch
|
||||
checked={field.value}
|
||||
onCheckedChange={field.onChange}
|
||||
disabled={isLoading || updateMutation.isPending}
|
||||
/>
|
||||
</FormControl>
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
</CardHeader>
|
||||
{wecomEnabled && (
|
||||
<CardContent className="pt-0">
|
||||
<Separator className="mb-4" />
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="wecom.webhookUrl"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>{t("wecom.webhookLabel")}</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
placeholder={t("wecom.webhookPlaceholder")}
|
||||
{...field}
|
||||
disabled={isLoading || updateMutation.isPending}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormDescription>
|
||||
{t("wecom.webhookHelp")}
|
||||
</FormDescription>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
</CardContent>
|
||||
)}
|
||||
</Card>
|
||||
</TabsContent>
|
||||
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
"use client"
|
||||
|
||||
import { useTranslations } from "next-intl"
|
||||
import { SystemLogsView } from "@/components/settings/system-logs"
|
||||
|
||||
export default function SystemLogsPage() {
|
||||
const t = useTranslations("settings.systemLogs")
|
||||
|
||||
return (
|
||||
<div className="flex flex-1 flex-col gap-4 p-4">
|
||||
<div>
|
||||
<h1 className="text-2xl font-bold tracking-tight">{t("title")}</h1>
|
||||
<p className="text-muted-foreground">{t("description")}</p>
|
||||
</div>
|
||||
<div className="flex flex-1 flex-col p-4 h-full">
|
||||
<SystemLogsView />
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -5,15 +5,15 @@ import { useEffect } from "react"
|
||||
|
||||
/**
|
||||
* Target detail page (compatible with old routes)
|
||||
* Automatically redirects to websites page
|
||||
* Automatically redirects to overview page
|
||||
*/
|
||||
export default function TargetDetailsPage() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const router = useRouter()
|
||||
|
||||
useEffect(() => {
|
||||
// Redirect to websites page
|
||||
router.replace(`/target/${id}/websites/`)
|
||||
// Redirect to overview page
|
||||
router.replace(`/target/${id}/overview/`)
|
||||
}, [id, router])
|
||||
|
||||
return null
|
||||
|
||||
@@ -3,16 +3,23 @@
|
||||
import React from "react"
|
||||
import { usePathname, useParams } from "next/navigation"
|
||||
import Link from "next/link"
|
||||
import { Target } from "lucide-react"
|
||||
import { Target, LayoutDashboard, Package, FolderSearch, Image, ShieldAlert, Settings, HelpCircle } from "lucide-react"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip"
|
||||
import { useTarget } from "@/hooks/use-targets"
|
||||
import { useTranslations } from "next-intl"
|
||||
|
||||
/**
|
||||
* Target detail layout
|
||||
* Provides shared target information and navigation for all sub-pages
|
||||
* Two-level navigation: Overview / Assets / Vulnerabilities
|
||||
* Assets has secondary navigation for different asset types
|
||||
*/
|
||||
export default function TargetLayout({
|
||||
children,
|
||||
@@ -30,26 +37,53 @@ export default function TargetLayout({
|
||||
error
|
||||
} = useTarget(Number(id))
|
||||
|
||||
// Get currently active tab
|
||||
const getActiveTab = () => {
|
||||
if (pathname.includes("/subdomain")) return "subdomain"
|
||||
if (pathname.includes("/endpoints")) return "endpoints"
|
||||
if (pathname.includes("/websites")) return "websites"
|
||||
// Get primary navigation active tab
|
||||
const getPrimaryTab = () => {
|
||||
if (pathname.includes("/overview")) return "overview"
|
||||
if (pathname.includes("/directories")) return "directories"
|
||||
if (pathname.includes("/screenshots")) return "screenshots"
|
||||
if (pathname.includes("/vulnerabilities")) return "vulnerabilities"
|
||||
if (pathname.includes("/ip-addresses")) return "ip-addresses"
|
||||
return ""
|
||||
if (pathname.includes("/settings")) return "settings"
|
||||
// All asset pages fall under "assets"
|
||||
if (
|
||||
pathname.includes("/websites") ||
|
||||
pathname.includes("/subdomain") ||
|
||||
pathname.includes("/ip-addresses") ||
|
||||
pathname.includes("/endpoints")
|
||||
) {
|
||||
return "assets"
|
||||
}
|
||||
return "overview"
|
||||
}
|
||||
|
||||
// Get secondary navigation active tab (for assets)
|
||||
const getSecondaryTab = () => {
|
||||
if (pathname.includes("/websites")) return "websites"
|
||||
if (pathname.includes("/subdomain")) return "subdomain"
|
||||
if (pathname.includes("/ip-addresses")) return "ip-addresses"
|
||||
if (pathname.includes("/endpoints")) return "endpoints"
|
||||
return "websites"
|
||||
}
|
||||
|
||||
// Check if we should show secondary navigation
|
||||
const showSecondaryNav = getPrimaryTab() === "assets"
|
||||
|
||||
// Tab path mapping
|
||||
const basePath = `/target/${id}`
|
||||
const tabPaths = {
|
||||
subdomain: `${basePath}/subdomain/`,
|
||||
endpoints: `${basePath}/endpoints/`,
|
||||
websites: `${basePath}/websites/`,
|
||||
const primaryPaths = {
|
||||
overview: `${basePath}/overview/`,
|
||||
assets: `${basePath}/websites/`, // Default to websites when clicking assets
|
||||
directories: `${basePath}/directories/`,
|
||||
screenshots: `${basePath}/screenshots/`,
|
||||
vulnerabilities: `${basePath}/vulnerabilities/`,
|
||||
settings: `${basePath}/settings/`,
|
||||
}
|
||||
|
||||
const secondaryPaths = {
|
||||
websites: `${basePath}/websites/`,
|
||||
subdomain: `${basePath}/subdomain/`,
|
||||
"ip-addresses": `${basePath}/ip-addresses/`,
|
||||
endpoints: `${basePath}/endpoints/`,
|
||||
}
|
||||
|
||||
// Get counts for each tab from target data
|
||||
@@ -60,29 +94,27 @@ export default function TargetLayout({
|
||||
directories: (target as any)?.summary?.directories || 0,
|
||||
vulnerabilities: (target as any)?.summary?.vulnerabilities?.total || 0,
|
||||
"ip-addresses": (target as any)?.summary?.ips || 0,
|
||||
screenshots: (target as any)?.summary?.screenshots || 0,
|
||||
}
|
||||
|
||||
// Calculate total assets count
|
||||
const totalAssets = counts.websites + counts.subdomain + counts["ip-addresses"] + counts.endpoints
|
||||
|
||||
// Loading state
|
||||
if (isLoading) {
|
||||
return (
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
|
||||
{/* Page header skeleton */}
|
||||
<div className="flex items-center justify-between px-4 lg:px-6">
|
||||
<div className="w-full max-w-xl space-y-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<Skeleton className="h-6 w-6 rounded-md" />
|
||||
<Skeleton className="h-7 w-48" />
|
||||
</div>
|
||||
<Skeleton className="h-4 w-72" />
|
||||
</div>
|
||||
{/* Header skeleton */}
|
||||
<div className="flex items-center gap-2 px-4 lg:px-6">
|
||||
<Skeleton className="h-4 w-16" />
|
||||
<span className="text-muted-foreground">/</span>
|
||||
<Skeleton className="h-4 w-32" />
|
||||
</div>
|
||||
|
||||
{/* Tabs navigation skeleton */}
|
||||
<div className="flex items-center justify-between px-4 lg:px-6">
|
||||
<div className="flex gap-2">
|
||||
<Skeleton className="h-9 w-20" />
|
||||
<Skeleton className="h-9 w-24" />
|
||||
</div>
|
||||
{/* Tabs skeleton */}
|
||||
<div className="flex gap-1 px-4 lg:px-6">
|
||||
<Skeleton className="h-9 w-20" />
|
||||
<Skeleton className="h-9 w-20" />
|
||||
<Skeleton className="h-9 w-24" />
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
@@ -123,84 +155,144 @@ export default function TargetLayout({
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
|
||||
{/* Page header */}
|
||||
{/* Header: Page label + Target name */}
|
||||
<div className="flex items-center gap-2 text-sm px-4 lg:px-6">
|
||||
<span className="text-muted-foreground">{t("breadcrumb.targetDetail")}</span>
|
||||
<span className="text-muted-foreground">/</span>
|
||||
<span className="font-medium flex items-center gap-1.5">
|
||||
<Target className="h-4 w-4" />
|
||||
{target.name}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
{/* Primary navigation */}
|
||||
<div className="flex items-center justify-between px-4 lg:px-6">
|
||||
<div>
|
||||
<h2 className="text-2xl font-bold tracking-tight flex items-center gap-2">
|
||||
<Target />
|
||||
{target.name}
|
||||
</h2>
|
||||
<p className="text-muted-foreground">{target.description || t("noDescription")}</p>
|
||||
<div className="flex items-center gap-3">
|
||||
<Tabs value={getPrimaryTab()}>
|
||||
<TabsList>
|
||||
<TabsTrigger value="overview" asChild>
|
||||
<Link href={primaryPaths.overview} className="flex items-center gap-1.5">
|
||||
<LayoutDashboard className="h-4 w-4" />
|
||||
{t("tabs.overview")}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="assets" asChild>
|
||||
<Link href={primaryPaths.assets} className="flex items-center gap-1.5">
|
||||
<Package className="h-4 w-4" />
|
||||
{t("tabs.assets")}
|
||||
{totalAssets > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{totalAssets}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="directories" asChild>
|
||||
<Link href={primaryPaths.directories} className="flex items-center gap-1.5">
|
||||
<FolderSearch className="h-4 w-4" />
|
||||
{t("tabs.directories")}
|
||||
{counts.directories > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.directories}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="screenshots" asChild>
|
||||
<Link href={primaryPaths.screenshots} className="flex items-center gap-1.5">
|
||||
<Image className="h-4 w-4" />
|
||||
{t("tabs.screenshots")}
|
||||
{counts.screenshots > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.screenshots}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="vulnerabilities" asChild>
|
||||
<Link href={primaryPaths.vulnerabilities} className="flex items-center gap-1.5">
|
||||
<ShieldAlert className="h-4 w-4" />
|
||||
{t("tabs.vulnerabilities")}
|
||||
{counts.vulnerabilities > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.vulnerabilities}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="settings" asChild>
|
||||
<Link href={primaryPaths.settings} className="flex items-center gap-1.5">
|
||||
<Settings className="h-4 w-4" />
|
||||
{t("tabs.settings")}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
</Tabs>
|
||||
|
||||
{getPrimaryTab() === "directories" && (
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<HelpCircle className="h-4 w-4 text-muted-foreground cursor-help" />
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="right" className="max-w-sm">
|
||||
{t("directoriesHelp")}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Tabs navigation - Use Link to ensure progress bar is triggered */}
|
||||
<div className="flex items-center justify-between px-4 lg:px-6">
|
||||
<Tabs value={getActiveTab()} className="w-full">
|
||||
<TabsList>
|
||||
<TabsTrigger value="websites" asChild>
|
||||
<Link href={tabPaths.websites} className="flex items-center gap-0.5">
|
||||
Websites
|
||||
{counts.websites > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.websites}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="subdomain" asChild>
|
||||
<Link href={tabPaths.subdomain} className="flex items-center gap-0.5">
|
||||
Subdomains
|
||||
{counts.subdomain > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.subdomain}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="ip-addresses" asChild>
|
||||
<Link href={tabPaths["ip-addresses"]} className="flex items-center gap-0.5">
|
||||
IP Addresses
|
||||
{counts["ip-addresses"] > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts["ip-addresses"]}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="endpoints" asChild>
|
||||
<Link href={tabPaths.endpoints} className="flex items-center gap-0.5">
|
||||
URLs
|
||||
{counts.endpoints > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.endpoints}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="directories" asChild>
|
||||
<Link href={tabPaths.directories} className="flex items-center gap-0.5">
|
||||
Directories
|
||||
{counts.directories > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.directories}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="vulnerabilities" asChild>
|
||||
<Link href={tabPaths.vulnerabilities} className="flex items-center gap-0.5">
|
||||
Vulnerabilities
|
||||
{counts.vulnerabilities > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.vulnerabilities}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
</Tabs>
|
||||
</div>
|
||||
{/* Secondary navigation (only for assets) */}
|
||||
{showSecondaryNav && (
|
||||
<div className="flex items-center px-4 lg:px-6">
|
||||
<Tabs value={getSecondaryTab()} className="w-full">
|
||||
<TabsList variant="underline">
|
||||
<TabsTrigger value="websites" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.websites} className="flex items-center gap-0.5">
|
||||
{t("tabs.websites")}
|
||||
{counts.websites > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.websites}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="subdomain" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.subdomain} className="flex items-center gap-0.5">
|
||||
{t("tabs.subdomains")}
|
||||
{counts.subdomain > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.subdomain}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="ip-addresses" variant="underline" asChild>
|
||||
<Link href={secondaryPaths["ip-addresses"]} className="flex items-center gap-0.5">
|
||||
{t("tabs.ips")}
|
||||
{counts["ip-addresses"] > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts["ip-addresses"]}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="endpoints" variant="underline" asChild>
|
||||
<Link href={secondaryPaths.endpoints} className="flex items-center gap-0.5">
|
||||
{t("tabs.urls")}
|
||||
{counts.endpoints > 0 && (
|
||||
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
|
||||
{counts.endpoints}
|
||||
</Badge>
|
||||
)}
|
||||
</Link>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
</Tabs>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Sub-page content */}
|
||||
{children}
|
||||
|
||||
19
frontend/app/[locale]/target/[id]/overview/page.tsx
Normal file
19
frontend/app/[locale]/target/[id]/overview/page.tsx
Normal file
@@ -0,0 +1,19 @@
|
||||
"use client"
|
||||
|
||||
import { useParams } from "next/navigation"
|
||||
import { TargetOverview } from "@/components/target/target-overview"
|
||||
|
||||
/**
|
||||
* Target overview page
|
||||
* Displays target statistics and summary information
|
||||
*/
|
||||
export default function TargetOverviewPage() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const targetId = Number(id)
|
||||
|
||||
return (
|
||||
<div className="px-4 lg:px-6">
|
||||
<TargetOverview targetId={targetId} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -5,15 +5,15 @@ import { useEffect } from "react"
|
||||
|
||||
/**
|
||||
* Target detail default page
|
||||
* Automatically redirects to websites page
|
||||
* Automatically redirects to overview page
|
||||
*/
|
||||
export default function TargetDetailPage() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const router = useRouter()
|
||||
|
||||
useEffect(() => {
|
||||
// Redirect to websites page
|
||||
router.replace(`/target/${id}/websites/`)
|
||||
// Redirect to overview page
|
||||
router.replace(`/target/${id}/overview/`)
|
||||
}, [id, router])
|
||||
|
||||
return null
|
||||
|
||||
15
frontend/app/[locale]/target/[id]/screenshots/page.tsx
Normal file
15
frontend/app/[locale]/target/[id]/screenshots/page.tsx
Normal file
@@ -0,0 +1,15 @@
|
||||
"use client"
|
||||
|
||||
import { useParams } from "next/navigation"
|
||||
import { ScreenshotsGallery } from "@/components/screenshots/screenshots-gallery"
|
||||
|
||||
export default function ScreenshotsPage() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const targetId = Number(id)
|
||||
|
||||
return (
|
||||
<div className="px-4 lg:px-6">
|
||||
<ScreenshotsGallery targetId={targetId} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
19
frontend/app/[locale]/target/[id]/settings/page.tsx
Normal file
19
frontend/app/[locale]/target/[id]/settings/page.tsx
Normal file
@@ -0,0 +1,19 @@
|
||||
"use client"
|
||||
|
||||
import { useParams } from "next/navigation"
|
||||
import { TargetSettings } from "@/components/target/target-settings"
|
||||
|
||||
/**
|
||||
* Target settings page
|
||||
* Contains blacklist configuration and other settings
|
||||
*/
|
||||
export default function TargetSettingsPage() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
const targetId = Number(id)
|
||||
|
||||
return (
|
||||
<div className="px-4 lg:px-6">
|
||||
<TargetSettings targetId={targetId} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -1,9 +1,19 @@
|
||||
"use client"
|
||||
|
||||
import { useEffect, useMemo, useState } from "react"
|
||||
import Editor from "@monaco-editor/react"
|
||||
import dynamic from "next/dynamic"
|
||||
import Link from "next/link"
|
||||
import { useParams } from "next/navigation"
|
||||
|
||||
// Dynamic import Monaco Editor to reduce bundle size (~2MB)
|
||||
const Editor = dynamic(() => import("@monaco-editor/react"), {
|
||||
ssr: false,
|
||||
loading: () => (
|
||||
<div className="flex items-center justify-center h-full">
|
||||
<div className="text-sm text-muted-foreground">Loading editor...</div>
|
||||
</div>
|
||||
),
|
||||
})
|
||||
import {
|
||||
ChevronDown,
|
||||
ChevronRight,
|
||||
@@ -160,7 +170,7 @@ export default function NucleiRepoDetailPage() {
|
||||
} else {
|
||||
setEditorValue("")
|
||||
}
|
||||
}, [templateContent?.path])
|
||||
}, [templateContent])
|
||||
|
||||
const toggleFolder = (path: string) => {
|
||||
setExpandedPaths((prev) =>
|
||||
@@ -248,7 +258,7 @@ export default function NucleiRepoDetailPage() {
|
||||
}
|
||||
}}
|
||||
className={cn(
|
||||
"flex w-full items-center gap-1.5 rounded-md px-2 py-1.5 text-left text-sm transition-colors",
|
||||
"tree-node-item flex w-full items-center gap-1.5 rounded-md px-2 py-1.5 text-left text-sm transition-colors",
|
||||
isFolder && "font-medium",
|
||||
isActive
|
||||
? "bg-primary/10 text-primary"
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 25 KiB |
@@ -245,6 +245,12 @@
|
||||
/* Chrome, Safari and Opera */
|
||||
}
|
||||
|
||||
/* 性能优化:长列表渲染优化 - content-visibility */
|
||||
.tree-node-item {
|
||||
content-visibility: auto;
|
||||
contain-intrinsic-size: 0 36px;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* 登录页背景 - 使用主题色适配亮暗模式 */
|
||||
@@ -272,6 +278,20 @@
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
/* 终端光标闪烁动画 */
|
||||
@keyframes blink {
|
||||
0%, 50% {
|
||||
opacity: 1;
|
||||
}
|
||||
51%, 100% {
|
||||
opacity: 0;
|
||||
}
|
||||
}
|
||||
|
||||
.animate-blink {
|
||||
animation: blink 1s step-end infinite;
|
||||
}
|
||||
|
||||
/* 通知铃铛摇晃动画 */
|
||||
@keyframes wiggle {
|
||||
0%, 100% {
|
||||
@@ -367,4 +387,206 @@
|
||||
|
||||
.animate-border-flow {
|
||||
animation: border-flow 2s linear infinite;
|
||||
}
|
||||
}
|
||||
|
||||
/* Dashboard 淡入动画 - 纯 CSS 实现,避免 hydration mismatch */
|
||||
@keyframes dashboard-fade-in {
|
||||
from {
|
||||
opacity: 0;
|
||||
filter: blur(4px);
|
||||
}
|
||||
to {
|
||||
opacity: 1;
|
||||
filter: blur(0);
|
||||
}
|
||||
}
|
||||
|
||||
.animate-dashboard-fade-in {
|
||||
animation: dashboard-fade-in 500ms ease-out forwards;
|
||||
}
|
||||
|
||||
/* 登录页 - Glitch Reveal(全屏开场) - 增强版赛博朋克风格 */
|
||||
@keyframes orbit-splash-jitter {
|
||||
0%,
|
||||
100% {
|
||||
transform: translate3d(0, 0, 0);
|
||||
filter: none;
|
||||
}
|
||||
10% {
|
||||
transform: translate3d(-2px, 0, 0);
|
||||
}
|
||||
20% {
|
||||
transform: translate3d(2px, -1px, 0);
|
||||
filter: hue-rotate(10deg);
|
||||
}
|
||||
30% {
|
||||
transform: translate3d(-1px, 1px, 0);
|
||||
}
|
||||
45% {
|
||||
transform: translate3d(1px, 0, 0);
|
||||
filter: hue-rotate(-10deg);
|
||||
}
|
||||
60% {
|
||||
transform: translate3d(0, -1px, 0);
|
||||
}
|
||||
75% {
|
||||
transform: translate3d(1px, 1px, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes orbit-splash-noise {
|
||||
0% {
|
||||
transform: translate3d(-2%, -2%, 0);
|
||||
opacity: 0.22;
|
||||
}
|
||||
25% {
|
||||
transform: translate3d(2%, -1%, 0);
|
||||
opacity: 0.28;
|
||||
}
|
||||
50% {
|
||||
transform: translate3d(-1%, 2%, 0);
|
||||
opacity: 0.24;
|
||||
}
|
||||
75% {
|
||||
transform: translate3d(1%, 1%, 0);
|
||||
opacity: 0.30;
|
||||
}
|
||||
100% {
|
||||
transform: translate3d(-2%, -2%, 0);
|
||||
opacity: 0.22;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes orbit-splash-sweep {
|
||||
0% {
|
||||
transform: translate3d(0, -120%, 0);
|
||||
opacity: 0;
|
||||
}
|
||||
18% {
|
||||
opacity: 0.35;
|
||||
}
|
||||
100% {
|
||||
transform: translate3d(0, 120%, 0);
|
||||
opacity: 0;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes orbit-glitch-clip {
|
||||
0% {
|
||||
clip-path: inset(0 0 0 0);
|
||||
transform: translate3d(0, 0, 0);
|
||||
}
|
||||
16% {
|
||||
clip-path: inset(12% 0 72% 0);
|
||||
transform: translate3d(-2px, 0, 0);
|
||||
}
|
||||
32% {
|
||||
clip-path: inset(54% 0 18% 0);
|
||||
transform: translate3d(2px, 0, 0);
|
||||
}
|
||||
48% {
|
||||
clip-path: inset(78% 0 6% 0);
|
||||
transform: translate3d(-1px, 0, 0);
|
||||
}
|
||||
64% {
|
||||
clip-path: inset(30% 0 48% 0);
|
||||
transform: translate3d(1px, 0, 0);
|
||||
}
|
||||
80% {
|
||||
clip-path: inset(6% 0 86% 0);
|
||||
transform: translate3d(0, 0, 0);
|
||||
}
|
||||
100% {
|
||||
clip-path: inset(0 0 0 0);
|
||||
transform: translate3d(0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
.orbit-splash-glitch {
|
||||
isolation: isolate;
|
||||
animation: orbit-splash-jitter 0.5s steps(2, end) infinite;
|
||||
}
|
||||
|
||||
.orbit-splash-glitch::before {
|
||||
content: "";
|
||||
position: absolute;
|
||||
inset: -20%;
|
||||
pointer-events: none;
|
||||
z-index: 20;
|
||||
mix-blend-mode: screen;
|
||||
background-image:
|
||||
repeating-linear-gradient(
|
||||
0deg,
|
||||
rgba(255, 255, 255, 0.08) 0px,
|
||||
rgba(255, 255, 255, 0.08) 1px,
|
||||
transparent 1px,
|
||||
transparent 4px
|
||||
),
|
||||
repeating-linear-gradient(
|
||||
90deg,
|
||||
rgba(255, 16, 240, 0.15) 0px,
|
||||
rgba(255, 16, 240, 0.15) 1px,
|
||||
transparent 1px,
|
||||
transparent 84px
|
||||
),
|
||||
repeating-linear-gradient(
|
||||
45deg,
|
||||
rgba(176, 38, 255, 0.08) 0px,
|
||||
rgba(176, 38, 255, 0.08) 1px,
|
||||
transparent 1px,
|
||||
transparent 9px
|
||||
);
|
||||
animation: orbit-splash-noise 0.5s steps(2, end) infinite;
|
||||
}
|
||||
|
||||
.orbit-splash-glitch::after {
|
||||
content: "";
|
||||
position: absolute;
|
||||
inset: 0;
|
||||
pointer-events: none;
|
||||
z-index: 20;
|
||||
background: linear-gradient(
|
||||
180deg,
|
||||
transparent 0%,
|
||||
rgba(255, 16, 240, 0.18) 50%,
|
||||
transparent 100%
|
||||
);
|
||||
opacity: 0;
|
||||
animation: orbit-splash-sweep 0.5s ease-out both;
|
||||
}
|
||||
|
||||
.orbit-glitch-text {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
text-shadow: 0 0 20px rgba(255, 16, 240, 0.4), 0 0 40px rgba(255, 16, 240, 0.2);
|
||||
}
|
||||
|
||||
.orbit-glitch-text::before,
|
||||
.orbit-glitch-text::after {
|
||||
content: attr(data-text);
|
||||
position: absolute;
|
||||
inset: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.orbit-glitch-text::before {
|
||||
color: rgba(255, 16, 240, 0.85);
|
||||
transform: translate3d(-2px, 0, 0);
|
||||
animation: orbit-glitch-clip 0.5s steps(2, end) infinite;
|
||||
}
|
||||
|
||||
.orbit-glitch-text::after {
|
||||
color: rgba(176, 38, 255, 0.75);
|
||||
transform: translate3d(2px, 0, 0);
|
||||
animation: orbit-glitch-clip 0.5s steps(2, end) infinite reverse;
|
||||
}
|
||||
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
.orbit-splash-glitch,
|
||||
.orbit-splash-glitch::before,
|
||||
.orbit-splash-glitch::after,
|
||||
.orbit-glitch-text::before,
|
||||
.orbit-glitch-text::after {
|
||||
animation: none !important;
|
||||
}
|
||||
}
|
||||
|
||||
16
frontend/app/icon.svg
Normal file
16
frontend/app/icon.svg
Normal file
@@ -0,0 +1,16 @@
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width="256"
|
||||
height="256"
|
||||
viewBox="0 0 24 24"
|
||||
fill="none"
|
||||
stroke="#06b6d4"
|
||||
stroke-width="2"
|
||||
stroke-linecap="round"
|
||||
stroke-linejoin="round"
|
||||
>
|
||||
<title>Orbit</title>
|
||||
<path d="M21 12h-8a1 1 0 1 0 -1 1v8a9 9 0 0 0 9 -9" />
|
||||
<path d="M16 9a5 5 0 1 0 -7 7" />
|
||||
<path d="M20.486 9a9 9 0 1 0 -11.482 11.495" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 371 B |
@@ -18,5 +18,9 @@
|
||||
"lib": "@/lib",
|
||||
"hooks": "@/hooks"
|
||||
},
|
||||
"registries": {}
|
||||
"registries": {
|
||||
"@animate-ui": "https://animate-ui.com/r/{name}.json",
|
||||
"@magicui": "https://magicui.design/r/{name}.json",
|
||||
"@react-bits": "https://reactbits.dev/r/{name}.json"
|
||||
}
|
||||
}
|
||||
|
||||
6
frontend/components/FaultyTerminal.css
Normal file
6
frontend/components/FaultyTerminal.css
Normal file
@@ -0,0 +1,6 @@
|
||||
.faulty-terminal-container {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
424
frontend/components/FaultyTerminal.tsx
Normal file
424
frontend/components/FaultyTerminal.tsx
Normal file
@@ -0,0 +1,424 @@
|
||||
import { Renderer, Program, Mesh, Color, Triangle } from 'ogl';
|
||||
import { useEffect, useRef, useMemo, useCallback } from 'react';
|
||||
import './FaultyTerminal.css';
|
||||
|
||||
const vertexShader = `
|
||||
attribute vec2 position;
|
||||
attribute vec2 uv;
|
||||
varying vec2 vUv;
|
||||
void main() {
|
||||
vUv = uv;
|
||||
gl_Position = vec4(position, 0.0, 1.0);
|
||||
}
|
||||
`;
|
||||
|
||||
const fragmentShader = `
|
||||
precision mediump float;
|
||||
|
||||
varying vec2 vUv;
|
||||
|
||||
uniform float iTime;
|
||||
uniform vec3 iResolution;
|
||||
uniform float uScale;
|
||||
|
||||
uniform vec2 uGridMul;
|
||||
uniform float uDigitSize;
|
||||
uniform float uScanlineIntensity;
|
||||
uniform float uGlitchAmount;
|
||||
uniform float uFlickerAmount;
|
||||
uniform float uNoiseAmp;
|
||||
uniform float uChromaticAberration;
|
||||
uniform float uDither;
|
||||
uniform float uCurvature;
|
||||
uniform vec3 uTint;
|
||||
uniform vec2 uMouse;
|
||||
uniform float uMouseStrength;
|
||||
uniform float uUseMouse;
|
||||
uniform float uPageLoadProgress;
|
||||
uniform float uUsePageLoadAnimation;
|
||||
uniform float uBrightness;
|
||||
|
||||
float time;
|
||||
|
||||
float hash21(vec2 p){
|
||||
p = fract(p * 234.56);
|
||||
p += dot(p, p + 34.56);
|
||||
return fract(p.x * p.y);
|
||||
}
|
||||
|
||||
float noise(vec2 p)
|
||||
{
|
||||
return sin(p.x * 10.0) * sin(p.y * (3.0 + sin(time * 0.090909))) + 0.2;
|
||||
}
|
||||
|
||||
mat2 rotate(float angle)
|
||||
{
|
||||
float c = cos(angle);
|
||||
float s = sin(angle);
|
||||
return mat2(c, -s, s, c);
|
||||
}
|
||||
|
||||
float fbm(vec2 p)
|
||||
{
|
||||
p *= 1.1;
|
||||
float f = 0.0;
|
||||
float amp = 0.5 * uNoiseAmp;
|
||||
|
||||
mat2 modify0 = rotate(time * 0.02);
|
||||
f += amp * noise(p);
|
||||
p = modify0 * p * 2.0;
|
||||
amp *= 0.454545;
|
||||
|
||||
mat2 modify1 = rotate(time * 0.02);
|
||||
f += amp * noise(p);
|
||||
p = modify1 * p * 2.0;
|
||||
amp *= 0.454545;
|
||||
|
||||
mat2 modify2 = rotate(time * 0.08);
|
||||
f += amp * noise(p);
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
float pattern(vec2 p, out vec2 q, out vec2 r) {
|
||||
vec2 offset1 = vec2(1.0);
|
||||
vec2 offset0 = vec2(0.0);
|
||||
mat2 rot01 = rotate(0.1 * time);
|
||||
mat2 rot1 = rotate(0.1);
|
||||
|
||||
q = vec2(fbm(p + offset1), fbm(rot01 * p + offset1));
|
||||
r = vec2(fbm(rot1 * q + offset0), fbm(q + offset0));
|
||||
return fbm(p + r);
|
||||
}
|
||||
|
||||
float digit(vec2 p){
|
||||
vec2 grid = uGridMul * 15.0;
|
||||
vec2 s = floor(p * grid) / grid;
|
||||
p = p * grid;
|
||||
vec2 q, r;
|
||||
float intensity = pattern(s * 0.1, q, r) * 1.3 - 0.03;
|
||||
|
||||
if(uUseMouse > 0.5){
|
||||
vec2 mouseWorld = uMouse * uScale;
|
||||
float distToMouse = distance(s, mouseWorld);
|
||||
float mouseInfluence = exp(-distToMouse * 8.0) * uMouseStrength * 10.0;
|
||||
intensity += mouseInfluence;
|
||||
|
||||
float ripple = sin(distToMouse * 20.0 - iTime * 5.0) * 0.1 * mouseInfluence;
|
||||
intensity += ripple;
|
||||
}
|
||||
|
||||
if(uUsePageLoadAnimation > 0.5){
|
||||
float cellRandom = fract(sin(dot(s, vec2(12.9898, 78.233))) * 43758.5453);
|
||||
float cellDelay = cellRandom * 0.8;
|
||||
float cellProgress = clamp((uPageLoadProgress - cellDelay) / 0.2, 0.0, 1.0);
|
||||
|
||||
float fadeAlpha = smoothstep(0.0, 1.0, cellProgress);
|
||||
intensity *= fadeAlpha;
|
||||
}
|
||||
|
||||
p = fract(p);
|
||||
p *= uDigitSize;
|
||||
|
||||
float px5 = p.x * 5.0;
|
||||
float py5 = (1.0 - p.y) * 5.0;
|
||||
float x = fract(px5);
|
||||
float y = fract(py5);
|
||||
|
||||
float i = floor(py5) - 2.0;
|
||||
float j = floor(px5) - 2.0;
|
||||
float n = i * i + j * j;
|
||||
float f = n * 0.0625;
|
||||
|
||||
float isOn = step(0.1, intensity - f);
|
||||
float brightness = isOn * (0.2 + y * 0.8) * (0.75 + x * 0.25);
|
||||
|
||||
return step(0.0, p.x) * step(p.x, 1.0) * step(0.0, p.y) * step(p.y, 1.0) * brightness;
|
||||
}
|
||||
|
||||
float onOff(float a, float b, float c)
|
||||
{
|
||||
return step(c, sin(iTime + a * cos(iTime * b))) * uFlickerAmount;
|
||||
}
|
||||
|
||||
float displace(vec2 look)
|
||||
{
|
||||
float y = look.y - mod(iTime * 0.25, 1.0);
|
||||
float window = 1.0 / (1.0 + 50.0 * y * y);
|
||||
return sin(look.y * 20.0 + iTime) * 0.0125 * onOff(4.0, 2.0, 0.8) * (1.0 + cos(iTime * 60.0)) * window;
|
||||
}
|
||||
|
||||
vec3 getColor(vec2 p){
|
||||
|
||||
float bar = step(mod(p.y + time * 20.0, 1.0), 0.2) * 0.4 + 1.0;
|
||||
bar *= uScanlineIntensity;
|
||||
|
||||
float displacement = displace(p);
|
||||
p.x += displacement;
|
||||
|
||||
if (uGlitchAmount != 1.0) {
|
||||
float extra = displacement * (uGlitchAmount - 1.0);
|
||||
p.x += extra;
|
||||
}
|
||||
|
||||
float middle = digit(p);
|
||||
|
||||
const float off = 0.002;
|
||||
float sum = digit(p + vec2(-off, -off)) + digit(p + vec2(0.0, -off)) + digit(p + vec2(off, -off)) +
|
||||
digit(p + vec2(-off, 0.0)) + digit(p + vec2(0.0, 0.0)) + digit(p + vec2(off, 0.0)) +
|
||||
digit(p + vec2(-off, off)) + digit(p + vec2(0.0, off)) + digit(p + vec2(off, off));
|
||||
|
||||
vec3 baseColor = vec3(0.9) * middle + sum * 0.1 * vec3(1.0) * bar;
|
||||
return baseColor;
|
||||
}
|
||||
|
||||
vec2 barrel(vec2 uv){
|
||||
vec2 c = uv * 2.0 - 1.0;
|
||||
float r2 = dot(c, c);
|
||||
c *= 1.0 + uCurvature * r2;
|
||||
return c * 0.5 + 0.5;
|
||||
}
|
||||
|
||||
void main() {
|
||||
time = iTime * 0.333333;
|
||||
vec2 uv = vUv;
|
||||
|
||||
if(uCurvature != 0.0){
|
||||
uv = barrel(uv);
|
||||
}
|
||||
|
||||
vec2 p = uv * uScale;
|
||||
vec3 col = getColor(p);
|
||||
|
||||
if(uChromaticAberration != 0.0){
|
||||
vec2 ca = vec2(uChromaticAberration) / iResolution.xy;
|
||||
col.r = getColor(p + ca).r;
|
||||
col.b = getColor(p - ca).b;
|
||||
}
|
||||
|
||||
col *= uTint;
|
||||
col *= uBrightness;
|
||||
|
||||
if(uDither > 0.0){
|
||||
float rnd = hash21(gl_FragCoord.xy);
|
||||
col += (rnd - 0.5) * (uDither * 0.003922);
|
||||
}
|
||||
|
||||
gl_FragColor = vec4(col, 1.0);
|
||||
}
|
||||
`;
|
||||
|
||||
function hexToRgb(hex: string) {
|
||||
let h = hex.replace('#', '').trim();
|
||||
if (h.length === 3)
|
||||
h = h
|
||||
.split('')
|
||||
.map(c => c + c)
|
||||
.join('');
|
||||
const num = parseInt(h, 16);
|
||||
return [((num >> 16) & 255) / 255, ((num >> 8) & 255) / 255, (num & 255) / 255];
|
||||
}
|
||||
|
||||
interface FaultyTerminalProps {
|
||||
scale?: number;
|
||||
gridMul?: [number, number];
|
||||
digitSize?: number;
|
||||
timeScale?: number;
|
||||
pause?: boolean;
|
||||
scanlineIntensity?: number;
|
||||
glitchAmount?: number;
|
||||
flickerAmount?: number;
|
||||
noiseAmp?: number;
|
||||
chromaticAberration?: number;
|
||||
dither?: number;
|
||||
curvature?: number;
|
||||
tint?: string;
|
||||
mouseReact?: boolean;
|
||||
mouseStrength?: number;
|
||||
dpr?: number;
|
||||
pageLoadAnimation?: boolean;
|
||||
brightness?: number;
|
||||
className?: string;
|
||||
style?: React.CSSProperties;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
export default function FaultyTerminal({
|
||||
scale = 1,
|
||||
gridMul = [2, 1],
|
||||
digitSize = 1.5,
|
||||
timeScale = 0.3,
|
||||
pause = false,
|
||||
scanlineIntensity = 0.3,
|
||||
glitchAmount = 1,
|
||||
flickerAmount = 1,
|
||||
noiseAmp = 0,
|
||||
chromaticAberration = 0,
|
||||
dither = 0,
|
||||
curvature = 0.2,
|
||||
tint = '#ffffff',
|
||||
mouseReact = true,
|
||||
mouseStrength = 0.2,
|
||||
dpr = Math.min(window.devicePixelRatio || 1, 2),
|
||||
pageLoadAnimation = true,
|
||||
brightness = 1,
|
||||
className,
|
||||
style,
|
||||
...rest
|
||||
}: FaultyTerminalProps) {
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
const programRef = useRef<any>(null);
|
||||
const rendererRef = useRef<any>(null);
|
||||
const mouseRef = useRef({ x: 0.5, y: 0.5 });
|
||||
const smoothMouseRef = useRef({ x: 0.5, y: 0.5 });
|
||||
const frozenTimeRef = useRef(0);
|
||||
const rafRef = useRef(0);
|
||||
const loadAnimationStartRef = useRef(0);
|
||||
const timeOffsetRef = useRef(Math.random() * 100);
|
||||
|
||||
const tintVec = useMemo(() => hexToRgb(tint), [tint]);
|
||||
|
||||
const ditherValue = useMemo(() => (typeof dither === 'boolean' ? (dither ? 1 : 0) : dither), [dither]);
|
||||
|
||||
const handleMouseMove = useCallback((e: MouseEvent) => {
|
||||
const ctn = containerRef.current;
|
||||
if (!ctn) return;
|
||||
const rect = ctn.getBoundingClientRect();
|
||||
const x = (e.clientX - rect.left) / rect.width;
|
||||
const y = 1 - (e.clientY - rect.top) / rect.height;
|
||||
mouseRef.current = { x, y };
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
const ctn = containerRef.current;
|
||||
if (!ctn) return;
|
||||
|
||||
const renderer = new Renderer({ dpr });
|
||||
rendererRef.current = renderer;
|
||||
const gl = renderer.gl;
|
||||
gl.clearColor(0, 0, 0, 1);
|
||||
|
||||
const geometry = new Triangle(gl);
|
||||
|
||||
const program = new Program(gl, {
|
||||
vertex: vertexShader,
|
||||
fragment: fragmentShader,
|
||||
uniforms: {
|
||||
iTime: { value: 0 },
|
||||
iResolution: {
|
||||
value: new Color(gl.canvas.width, gl.canvas.height, gl.canvas.width / gl.canvas.height)
|
||||
},
|
||||
uScale: { value: scale },
|
||||
|
||||
uGridMul: { value: new Float32Array(gridMul) },
|
||||
uDigitSize: { value: digitSize },
|
||||
uScanlineIntensity: { value: scanlineIntensity },
|
||||
uGlitchAmount: { value: glitchAmount },
|
||||
uFlickerAmount: { value: flickerAmount },
|
||||
uNoiseAmp: { value: noiseAmp },
|
||||
uChromaticAberration: { value: chromaticAberration },
|
||||
uDither: { value: ditherValue },
|
||||
uCurvature: { value: curvature },
|
||||
uTint: { value: new Color(tintVec[0], tintVec[1], tintVec[2]) },
|
||||
uMouse: {
|
||||
value: new Float32Array([smoothMouseRef.current.x, smoothMouseRef.current.y])
|
||||
},
|
||||
uMouseStrength: { value: mouseStrength },
|
||||
uUseMouse: { value: mouseReact ? 1 : 0 },
|
||||
uPageLoadProgress: { value: pageLoadAnimation ? 0 : 1 },
|
||||
uUsePageLoadAnimation: { value: pageLoadAnimation ? 1 : 0 },
|
||||
uBrightness: { value: brightness }
|
||||
}
|
||||
});
|
||||
programRef.current = program;
|
||||
|
||||
const mesh = new Mesh(gl, { geometry, program });
|
||||
|
||||
function resize() {
|
||||
if (!ctn || !renderer) return;
|
||||
renderer.setSize(ctn.offsetWidth, ctn.offsetHeight);
|
||||
program.uniforms.iResolution.value = new Color(
|
||||
gl.canvas.width,
|
||||
gl.canvas.height,
|
||||
gl.canvas.width / gl.canvas.height
|
||||
);
|
||||
}
|
||||
|
||||
const resizeObserver = new ResizeObserver(() => resize());
|
||||
resizeObserver.observe(ctn);
|
||||
resize();
|
||||
|
||||
const update = (t: number) => {
|
||||
rafRef.current = requestAnimationFrame(update);
|
||||
|
||||
if (pageLoadAnimation && loadAnimationStartRef.current === 0) {
|
||||
loadAnimationStartRef.current = t;
|
||||
}
|
||||
|
||||
if (!pause) {
|
||||
const elapsed = (t * 0.001 + timeOffsetRef.current) * timeScale;
|
||||
program.uniforms.iTime.value = elapsed;
|
||||
frozenTimeRef.current = elapsed;
|
||||
} else {
|
||||
program.uniforms.iTime.value = frozenTimeRef.current;
|
||||
}
|
||||
|
||||
if (pageLoadAnimation && loadAnimationStartRef.current > 0) {
|
||||
const animationDuration = 2000;
|
||||
const animationElapsed = t - loadAnimationStartRef.current;
|
||||
const progress = Math.min(animationElapsed / animationDuration, 1);
|
||||
program.uniforms.uPageLoadProgress.value = progress;
|
||||
}
|
||||
|
||||
if (mouseReact) {
|
||||
const dampingFactor = 0.08;
|
||||
const smoothMouse = smoothMouseRef.current;
|
||||
const mouse = mouseRef.current;
|
||||
smoothMouse.x += (mouse.x - smoothMouse.x) * dampingFactor;
|
||||
smoothMouse.y += (mouse.y - smoothMouse.y) * dampingFactor;
|
||||
|
||||
const mouseUniform = program.uniforms.uMouse.value;
|
||||
mouseUniform[0] = smoothMouse.x;
|
||||
mouseUniform[1] = smoothMouse.y;
|
||||
}
|
||||
|
||||
renderer.render({ scene: mesh });
|
||||
};
|
||||
rafRef.current = requestAnimationFrame(update);
|
||||
ctn.appendChild(gl.canvas);
|
||||
|
||||
if (mouseReact) window.addEventListener('mousemove', handleMouseMove);
|
||||
|
||||
return () => {
|
||||
cancelAnimationFrame(rafRef.current);
|
||||
resizeObserver.disconnect();
|
||||
if (mouseReact) window.removeEventListener('mousemove', handleMouseMove);
|
||||
if (gl.canvas.parentElement === ctn) ctn.removeChild(gl.canvas);
|
||||
gl.getExtension('WEBGL_lose_context')?.loseContext();
|
||||
loadAnimationStartRef.current = 0;
|
||||
timeOffsetRef.current = Math.random() * 100;
|
||||
};
|
||||
}, [
|
||||
dpr,
|
||||
pause,
|
||||
timeScale,
|
||||
scale,
|
||||
gridMul,
|
||||
digitSize,
|
||||
scanlineIntensity,
|
||||
glitchAmount,
|
||||
flickerAmount,
|
||||
noiseAmp,
|
||||
chromaticAberration,
|
||||
ditherValue,
|
||||
curvature,
|
||||
tintVec,
|
||||
mouseReact,
|
||||
mouseStrength,
|
||||
pageLoadAnimation,
|
||||
brightness,
|
||||
handleMouseMove
|
||||
]);
|
||||
|
||||
return <div ref={containerRef} className={`faulty-terminal-container ${className}`} style={style} {...rest} />;
|
||||
}
|
||||
6
frontend/components/PixelBlast.css
Normal file
6
frontend/components/PixelBlast.css
Normal file
@@ -0,0 +1,6 @@
|
||||
.pixel-blast-container {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
782
frontend/components/PixelBlast.tsx
Normal file
782
frontend/components/PixelBlast.tsx
Normal file
@@ -0,0 +1,782 @@
|
||||
import { useEffect, useRef, useState, useMemo } from 'react';
|
||||
import * as THREE from 'three';
|
||||
import { EffectComposer, EffectPass, RenderPass, Effect } from 'postprocessing';
|
||||
import './PixelBlast.css';
|
||||
|
||||
const createTouchTexture = () => {
|
||||
const size = 64;
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = size;
|
||||
canvas.height = size;
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) throw new Error('2D context not available');
|
||||
ctx.fillStyle = 'black';
|
||||
ctx.fillRect(0, 0, canvas.width, canvas.height);
|
||||
const texture = new THREE.Texture(canvas);
|
||||
texture.minFilter = THREE.LinearFilter;
|
||||
texture.magFilter = THREE.LinearFilter;
|
||||
texture.generateMipmaps = false;
|
||||
const trail: any[] = [];
|
||||
let last: any = null;
|
||||
const maxAge = 64;
|
||||
let radius = 0.1 * size;
|
||||
const speed = 1 / maxAge;
|
||||
const clear = () => {
|
||||
ctx.fillStyle = 'black';
|
||||
ctx.fillRect(0, 0, canvas.width, canvas.height);
|
||||
};
|
||||
const drawPoint = (p: any) => {
|
||||
const pos = { x: p.x * size, y: (1 - p.y) * size };
|
||||
let intensity = 1;
|
||||
const easeOutSine = (t: number) => Math.sin((t * Math.PI) / 2);
|
||||
const easeOutQuad = (t: number) => -t * (t - 2);
|
||||
if (p.age < maxAge * 0.3) intensity = easeOutSine(p.age / (maxAge * 0.3));
|
||||
else intensity = easeOutQuad(1 - (p.age - maxAge * 0.3) / (maxAge * 0.7)) || 0;
|
||||
intensity *= p.force;
|
||||
const color = `${((p.vx + 1) / 2) * 255}, ${((p.vy + 1) / 2) * 255}, ${intensity * 255}`;
|
||||
const offset = size * 5;
|
||||
ctx.shadowOffsetX = offset;
|
||||
ctx.shadowOffsetY = offset;
|
||||
ctx.shadowBlur = radius;
|
||||
ctx.shadowColor = `rgba(${color},${0.22 * intensity})`;
|
||||
ctx.beginPath();
|
||||
ctx.fillStyle = 'rgba(255,0,0,1)';
|
||||
ctx.arc(pos.x - offset, pos.y - offset, radius, 0, Math.PI * 2);
|
||||
ctx.fill();
|
||||
};
|
||||
const addTouch = (norm: any) => {
|
||||
let force = 0;
|
||||
let vx = 0;
|
||||
let vy = 0;
|
||||
if (last) {
|
||||
const dx = norm.x - last.x;
|
||||
const dy = norm.y - last.y;
|
||||
if (dx === 0 && dy === 0) return;
|
||||
const dd = dx * dx + dy * dy;
|
||||
const d = Math.sqrt(dd);
|
||||
vx = dx / (d || 1);
|
||||
vy = dy / (d || 1);
|
||||
force = Math.min(dd * 10000, 1);
|
||||
}
|
||||
last = { x: norm.x, y: norm.y };
|
||||
trail.push({ x: norm.x, y: norm.y, age: 0, force, vx, vy });
|
||||
};
|
||||
const update = () => {
|
||||
clear();
|
||||
for (let i = trail.length - 1; i >= 0; i--) {
|
||||
const point = trail[i];
|
||||
const f = point.force * speed * (1 - point.age / maxAge);
|
||||
point.x += point.vx * f;
|
||||
point.y += point.vy * f;
|
||||
point.age++;
|
||||
if (point.age > maxAge) trail.splice(i, 1);
|
||||
}
|
||||
for (let i = 0; i < trail.length; i++) drawPoint(trail[i]);
|
||||
texture.needsUpdate = true;
|
||||
};
|
||||
return {
|
||||
canvas,
|
||||
texture,
|
||||
addTouch,
|
||||
update,
|
||||
set radiusScale(v) {
|
||||
radius = 0.1 * size * v;
|
||||
},
|
||||
get radiusScale() {
|
||||
return radius / (0.1 * size);
|
||||
},
|
||||
size
|
||||
};
|
||||
};
|
||||
|
||||
const createLiquidEffect = (texture: any, opts: any) => {
|
||||
const fragment = `
|
||||
uniform sampler2D uTexture;
|
||||
uniform float uStrength;
|
||||
uniform float uTime;
|
||||
uniform float uFreq;
|
||||
|
||||
void mainUv(inout vec2 uv) {
|
||||
vec4 tex = texture2D(uTexture, uv);
|
||||
float vx = tex.r * 2.0 - 1.0;
|
||||
float vy = tex.g * 2.0 - 1.0;
|
||||
float intensity = tex.b;
|
||||
|
||||
float wave = 0.5 + 0.5 * sin(uTime * uFreq + intensity * 6.2831853);
|
||||
|
||||
float amt = uStrength * intensity * wave;
|
||||
|
||||
uv += vec2(vx, vy) * amt;
|
||||
}
|
||||
`;
|
||||
return new Effect('LiquidEffect', fragment, {
|
||||
uniforms: new Map([
|
||||
['uTexture', new THREE.Uniform(texture)],
|
||||
['uStrength', new THREE.Uniform(opts?.strength ?? 0.025)],
|
||||
['uTime', new THREE.Uniform(0)],
|
||||
['uFreq', new THREE.Uniform(opts?.freq ?? 4.5)]
|
||||
])
|
||||
});
|
||||
};
|
||||
|
||||
const SHAPE_MAP = {
|
||||
square: 0,
|
||||
circle: 1,
|
||||
triangle: 2,
|
||||
diamond: 3
|
||||
};
|
||||
|
||||
const VERTEX_SRC = `
|
||||
void main() {
|
||||
gl_Position = vec4(position, 1.0);
|
||||
}
|
||||
`;
|
||||
|
||||
const FRAGMENT_SRC = `
|
||||
precision highp float;
|
||||
|
||||
uniform vec3 uColor;
|
||||
uniform vec2 uResolution;
|
||||
uniform float uTime;
|
||||
uniform float uPixelSize;
|
||||
uniform float uScale;
|
||||
uniform float uDensity;
|
||||
uniform float uPixelJitter;
|
||||
uniform int uEnableRipples;
|
||||
uniform float uRippleSpeed;
|
||||
uniform float uRippleThickness;
|
||||
uniform float uRippleIntensity;
|
||||
uniform float uEdgeFade;
|
||||
|
||||
uniform int uShapeType;
|
||||
const int SHAPE_SQUARE = 0;
|
||||
const int SHAPE_CIRCLE = 1;
|
||||
const int SHAPE_TRIANGLE = 2;
|
||||
const int SHAPE_DIAMOND = 3;
|
||||
|
||||
const int MAX_CLICKS = 10;
|
||||
|
||||
uniform vec2 uClickPos [MAX_CLICKS];
|
||||
uniform float uClickTimes[MAX_CLICKS];
|
||||
|
||||
out vec4 fragColor;
|
||||
|
||||
float Bayer2(vec2 a) {
|
||||
a = floor(a);
|
||||
return fract(a.x / 2. + a.y * a.y * .75);
|
||||
}
|
||||
#define Bayer4(a) (Bayer2(.5*(a))*0.25 + Bayer2(a))
|
||||
#define Bayer8(a) (Bayer4(.5*(a))*0.25 + Bayer2(a))
|
||||
|
||||
#define FBM_OCTAVES 2
|
||||
#define FBM_LACUNARITY 1.25
|
||||
#define FBM_GAIN 1.0
|
||||
|
||||
float hash11(float n){ return fract(sin(n)*43758.5453); }
|
||||
|
||||
float vnoise(vec3 p){
|
||||
vec3 ip = floor(p);
|
||||
vec3 fp = fract(p);
|
||||
float n000 = hash11(dot(ip + vec3(0.0,0.0,0.0), vec3(1.0,57.0,113.0)));
|
||||
float n100 = hash11(dot(ip + vec3(1.0,0.0,0.0), vec3(1.0,57.0,113.0)));
|
||||
float n010 = hash11(dot(ip + vec3(0.0,1.0,0.0), vec3(1.0,57.0,113.0)));
|
||||
float n110 = hash11(dot(ip + vec3(1.0,1.0,0.0), vec3(1.0,57.0,113.0)));
|
||||
float n001 = hash11(dot(ip + vec3(0.0,0.0,1.0), vec3(1.0,57.0,113.0)));
|
||||
float n101 = hash11(dot(ip + vec3(1.0,0.0,1.0), vec3(1.0,57.0,113.0)));
|
||||
float n011 = hash11(dot(ip + vec3(0.0,1.0,1.0), vec3(1.0,57.0,113.0)));
|
||||
float n111 = hash11(dot(ip + vec3(1.0,1.0,1.0), vec3(1.0,57.0,113.0)));
|
||||
vec3 w = fp*fp*fp*(fp*(fp*6.0-15.0)+10.0);
|
||||
float x00 = mix(n000, n100, w.x);
|
||||
float x10 = mix(n010, n110, w.x);
|
||||
float x01 = mix(n001, n101, w.x);
|
||||
float x11 = mix(n011, n111, w.x);
|
||||
float y0 = mix(x00, x10, w.y);
|
||||
float y1 = mix(x01, x11, w.y);
|
||||
return mix(y0, y1, w.z) * 2.0 - 1.0;
|
||||
}
|
||||
|
||||
float fbm2(vec2 uv, float t){
|
||||
vec3 p = vec3(uv * uScale, t);
|
||||
float amp = 1.0;
|
||||
float freq = 1.0;
|
||||
float sum = 1.0;
|
||||
for (int i = 0; i < FBM_OCTAVES; ++i){
|
||||
sum += amp * vnoise(p * freq);
|
||||
freq *= FBM_LACUNARITY;
|
||||
amp *= FBM_GAIN;
|
||||
}
|
||||
return sum * 0.5 + 0.5;
|
||||
}
|
||||
|
||||
float maskCircle(vec2 p, float cov){
|
||||
float r = sqrt(cov) * .25;
|
||||
float d = length(p - 0.5) - r;
|
||||
float aa = 0.5 * fwidth(d);
|
||||
return cov * (1.0 - smoothstep(-aa, aa, d * 2.0));
|
||||
}
|
||||
|
||||
float maskTriangle(vec2 p, vec2 id, float cov){
|
||||
bool flip = mod(id.x + id.y, 2.0) > 0.5;
|
||||
if (flip) p.x = 1.0 - p.x;
|
||||
float r = sqrt(cov);
|
||||
float d = p.y - r*(1.0 - p.x);
|
||||
float aa = fwidth(d);
|
||||
return cov * clamp(0.5 - d/aa, 0.0, 1.0);
|
||||
}
|
||||
|
||||
float maskDiamond(vec2 p, float cov){
|
||||
float r = sqrt(cov) * 0.564;
|
||||
return step(abs(p.x - 0.49) + abs(p.y - 0.49), r);
|
||||
}
|
||||
|
||||
void main(){
|
||||
float pixelSize = uPixelSize;
|
||||
vec2 fragCoord = gl_FragCoord.xy - uResolution * .5;
|
||||
float aspectRatio = uResolution.x / uResolution.y;
|
||||
|
||||
vec2 pixelId = floor(fragCoord / pixelSize);
|
||||
vec2 pixelUV = fract(fragCoord / pixelSize);
|
||||
|
||||
float cellPixelSize = 8.0 * pixelSize;
|
||||
vec2 cellId = floor(fragCoord / cellPixelSize);
|
||||
vec2 cellCoord = cellId * cellPixelSize;
|
||||
vec2 uv = cellCoord / uResolution * vec2(aspectRatio, 1.0);
|
||||
|
||||
float base = fbm2(uv, uTime * 0.05);
|
||||
base = base * 0.5 - 0.65;
|
||||
|
||||
float feed = base + (uDensity - 0.5) * 0.3;
|
||||
|
||||
float speed = uRippleSpeed;
|
||||
float thickness = uRippleThickness;
|
||||
const float dampT = 1.0;
|
||||
const float dampR = 10.0;
|
||||
|
||||
if (uEnableRipples == 1) {
|
||||
for (int i = 0; i < MAX_CLICKS; ++i){
|
||||
vec2 pos = uClickPos[i];
|
||||
if (pos.x < 0.0) continue;
|
||||
float cellPixelSize = 8.0 * pixelSize;
|
||||
vec2 cuv = (((pos - uResolution * .5 - cellPixelSize * .5) / (uResolution))) * vec2(aspectRatio, 1.0);
|
||||
float t = max(uTime - uClickTimes[i], 0.0);
|
||||
float r = distance(uv, cuv);
|
||||
float waveR = speed * t;
|
||||
float ring = exp(-pow((r - waveR) / thickness, 2.0));
|
||||
float atten = exp(-dampT * t) * exp(-dampR * r);
|
||||
feed = max(feed, ring * atten * uRippleIntensity);
|
||||
}
|
||||
}
|
||||
|
||||
float bayer = Bayer8(fragCoord / uPixelSize) - 0.5;
|
||||
float bw = step(0.5, feed + bayer);
|
||||
|
||||
float h = fract(sin(dot(floor(fragCoord / uPixelSize), vec2(127.1, 311.7))) * 43758.5453);
|
||||
float jitterScale = 1.0 + (h - 0.5) * uPixelJitter;
|
||||
float coverage = bw * jitterScale;
|
||||
float M;
|
||||
if (uShapeType == SHAPE_CIRCLE) M = maskCircle (pixelUV, coverage);
|
||||
else if (uShapeType == SHAPE_TRIANGLE) M = maskTriangle(pixelUV, pixelId, coverage);
|
||||
else if (uShapeType == SHAPE_DIAMOND) M = maskDiamond(pixelUV, coverage);
|
||||
else M = coverage;
|
||||
|
||||
if (uEdgeFade > 0.0) {
|
||||
vec2 norm = gl_FragCoord.xy / uResolution;
|
||||
float edge = min(min(norm.x, norm.y), min(1.0 - norm.x, 1.0 - norm.y));
|
||||
float fade = smoothstep(0.0, uEdgeFade, edge);
|
||||
M *= fade;
|
||||
}
|
||||
|
||||
vec3 color = uColor;
|
||||
|
||||
// sRGB gamma correction - convert linear to sRGB for accurate color output
|
||||
vec3 srgbColor = mix(
|
||||
color * 12.92,
|
||||
1.055 * pow(color, vec3(1.0 / 2.4)) - 0.055,
|
||||
step(0.0031308, color)
|
||||
);
|
||||
|
||||
fragColor = vec4(srgbColor, M);
|
||||
}
|
||||
`;
|
||||
|
||||
const MAX_CLICKS = 10;
|
||||
|
||||
interface PixelBlastProps {
|
||||
variant?: string;
|
||||
pixelSize?: number;
|
||||
color?: string;
|
||||
className?: string;
|
||||
style?: React.CSSProperties;
|
||||
antialias?: boolean;
|
||||
patternScale?: number;
|
||||
patternDensity?: number;
|
||||
liquid?: boolean;
|
||||
liquidStrength?: number;
|
||||
liquidRadius?: number;
|
||||
pixelSizeJitter?: number;
|
||||
enableRipples?: boolean;
|
||||
rippleIntensityScale?: number;
|
||||
rippleThickness?: number;
|
||||
rippleSpeed?: number;
|
||||
liquidWobbleSpeed?: number;
|
||||
autoPauseOffscreen?: boolean;
|
||||
speed?: number;
|
||||
transparent?: boolean;
|
||||
edgeFade?: number;
|
||||
noiseAmount?: number;
|
||||
respectReducedMotion?: boolean;
|
||||
maxPixelRatio?: number;
|
||||
onFirstFrame?: () => void;
|
||||
}
|
||||
|
||||
const PixelBlast = ({
|
||||
variant = 'square',
|
||||
pixelSize = 3,
|
||||
color = '#B19EEF',
|
||||
className,
|
||||
style,
|
||||
antialias = true,
|
||||
patternScale = 2,
|
||||
patternDensity = 1,
|
||||
liquid = false,
|
||||
liquidStrength = 0.1,
|
||||
liquidRadius = 1,
|
||||
pixelSizeJitter = 0,
|
||||
enableRipples = true,
|
||||
rippleIntensityScale = 1,
|
||||
rippleThickness = 0.1,
|
||||
rippleSpeed = 0.3,
|
||||
liquidWobbleSpeed = 4.5,
|
||||
autoPauseOffscreen = true,
|
||||
speed = 0.5,
|
||||
transparent = true,
|
||||
edgeFade = 0.5,
|
||||
noiseAmount = 0,
|
||||
respectReducedMotion = true,
|
||||
maxPixelRatio = 2,
|
||||
onFirstFrame
|
||||
}: PixelBlastProps) => {
|
||||
const containerRef = useRef(null);
|
||||
const visibilityRef = useRef({ visible: true });
|
||||
const speedRef = useRef(speed);
|
||||
const threeRef = useRef<any>(null);
|
||||
const prevConfigRef = useRef<any>(null);
|
||||
const [prefersReducedMotion, setPrefersReducedMotion] = useState(false);
|
||||
|
||||
const onFirstFrameRef = useRef<PixelBlastProps['onFirstFrame']>(onFirstFrame);
|
||||
onFirstFrameRef.current = onFirstFrame;
|
||||
|
||||
const firstFrameFiredRef = useRef(false);
|
||||
|
||||
// Limit pixel ratio for performance (lower on mobile)
|
||||
const effectivePixelRatio = useMemo(() => {
|
||||
if (typeof window === 'undefined') return 1;
|
||||
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent);
|
||||
const dpr = window.devicePixelRatio || 1;
|
||||
if (isMobile) return Math.min(dpr, 1.5, maxPixelRatio);
|
||||
return Math.min(dpr, maxPixelRatio);
|
||||
}, [maxPixelRatio]);
|
||||
|
||||
// Check for prefers-reduced-motion
|
||||
useEffect(() => {
|
||||
if (!respectReducedMotion) return;
|
||||
const mq = window.matchMedia('(prefers-reduced-motion: reduce)');
|
||||
setPrefersReducedMotion(mq.matches);
|
||||
const handler = (e: MediaQueryListEvent) => setPrefersReducedMotion(e.matches);
|
||||
mq.addEventListener('change', handler);
|
||||
return () => mq.removeEventListener('change', handler);
|
||||
}, [respectReducedMotion]);
|
||||
|
||||
// If WebGL rendering is disabled (e.g. reduced motion), still signal readiness so
|
||||
// callers don't wait forever.
|
||||
useEffect(() => {
|
||||
if (!prefersReducedMotion) return;
|
||||
if (firstFrameFiredRef.current) return;
|
||||
firstFrameFiredRef.current = true;
|
||||
onFirstFrameRef.current?.();
|
||||
}, [prefersReducedMotion]);
|
||||
|
||||
// Pause animation when page is not visible or element is offscreen
|
||||
useEffect(() => {
|
||||
if (!autoPauseOffscreen || prefersReducedMotion) return;
|
||||
|
||||
const container = containerRef.current;
|
||||
if (!container) return;
|
||||
|
||||
// IntersectionObserver for offscreen detection
|
||||
const io = new IntersectionObserver(
|
||||
([entry]) => {
|
||||
visibilityRef.current.visible = entry.isIntersecting;
|
||||
},
|
||||
{ threshold: 0 }
|
||||
);
|
||||
io.observe(container);
|
||||
|
||||
// Page Visibility API
|
||||
const handleVisibility = () => {
|
||||
if (document.hidden) {
|
||||
visibilityRef.current.visible = false;
|
||||
}
|
||||
};
|
||||
document.addEventListener('visibilitychange', handleVisibility);
|
||||
|
||||
return () => {
|
||||
io.disconnect();
|
||||
document.removeEventListener('visibilitychange', handleVisibility);
|
||||
};
|
||||
}, [autoPauseOffscreen, prefersReducedMotion]);
|
||||
|
||||
// Main WebGL setup effect
|
||||
useEffect(() => {
|
||||
// Skip WebGL setup if user prefers reduced motion
|
||||
if (prefersReducedMotion) return;
|
||||
|
||||
const container = containerRef.current;
|
||||
if (!container) return;
|
||||
speedRef.current = speed;
|
||||
const needsReinitKeys = ['antialias', 'liquid', 'noiseAmount'];
|
||||
const cfg = { antialias, liquid, noiseAmount };
|
||||
let mustReinit = false;
|
||||
if (!threeRef.current) mustReinit = true;
|
||||
else if (prevConfigRef.current) {
|
||||
for (const k of needsReinitKeys)
|
||||
if ((prevConfigRef.current as any)[k] !== (cfg as any)[k]) {
|
||||
mustReinit = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (mustReinit) {
|
||||
if (threeRef.current) {
|
||||
const t = threeRef.current;
|
||||
t.resizeObserver?.disconnect();
|
||||
cancelAnimationFrame(t.raf);
|
||||
t.quad?.geometry.dispose();
|
||||
t.material.dispose();
|
||||
t.composer?.dispose();
|
||||
t.renderer.dispose();
|
||||
if (t.renderer.domElement.parentElement === container) (container as HTMLDivElement).removeChild(t.renderer.domElement);
|
||||
threeRef.current = null;
|
||||
}
|
||||
|
||||
let renderer: THREE.WebGLRenderer | null = null;
|
||||
let canvas: HTMLCanvasElement | null = null;
|
||||
|
||||
try {
|
||||
canvas = document.createElement('canvas');
|
||||
renderer = new THREE.WebGLRenderer({
|
||||
canvas,
|
||||
antialias,
|
||||
alpha: true,
|
||||
powerPreference: 'high-performance'
|
||||
});
|
||||
renderer.domElement.style.width = '100%';
|
||||
renderer.domElement.style.height = '100%';
|
||||
renderer.setPixelRatio(effectivePixelRatio);
|
||||
(container as HTMLDivElement).appendChild(renderer.domElement);
|
||||
if (transparent) renderer.setClearAlpha(0);
|
||||
else renderer.setClearColor(0x000000, 1);
|
||||
const uniforms = {
|
||||
uResolution: { value: new THREE.Vector2(0, 0) },
|
||||
uTime: { value: 0 },
|
||||
uColor: { value: new THREE.Color(color) },
|
||||
uClickPos: {
|
||||
value: Array.from({ length: MAX_CLICKS }, () => new THREE.Vector2(-1, -1))
|
||||
},
|
||||
uClickTimes: { value: new Float32Array(MAX_CLICKS) },
|
||||
uShapeType: { value: SHAPE_MAP[variant as keyof typeof SHAPE_MAP] ?? 0 },
|
||||
uPixelSize: { value: pixelSize * renderer.getPixelRatio() },
|
||||
uScale: { value: patternScale },
|
||||
uDensity: { value: patternDensity },
|
||||
uPixelJitter: { value: pixelSizeJitter },
|
||||
uEnableRipples: { value: enableRipples ? 1 : 0 },
|
||||
uRippleSpeed: { value: rippleSpeed },
|
||||
uRippleThickness: { value: rippleThickness },
|
||||
uRippleIntensity: { value: rippleIntensityScale },
|
||||
uEdgeFade: { value: edgeFade }
|
||||
};
|
||||
const scene = new THREE.Scene();
|
||||
const camera = new THREE.OrthographicCamera(-1, 1, 1, -1, 0, 1);
|
||||
const material = new THREE.ShaderMaterial({
|
||||
vertexShader: VERTEX_SRC,
|
||||
fragmentShader: FRAGMENT_SRC,
|
||||
uniforms,
|
||||
transparent: true,
|
||||
depthTest: false,
|
||||
depthWrite: false,
|
||||
glslVersion: THREE.GLSL3
|
||||
});
|
||||
const quadGeom = new THREE.PlaneGeometry(2, 2);
|
||||
const quad = new THREE.Mesh(quadGeom, material);
|
||||
scene.add(quad);
|
||||
const clock = new THREE.Clock();
|
||||
const setSize = () => {
|
||||
if (!renderer) return;
|
||||
const w = (container as HTMLDivElement).clientWidth || 1;
|
||||
const h = (container as HTMLDivElement).clientHeight || 1;
|
||||
renderer.setSize(w, h, false);
|
||||
uniforms.uResolution.value.set(renderer.domElement.width, renderer.domElement.height);
|
||||
if (threeRef.current?.composer)
|
||||
threeRef.current.composer.setSize(renderer.domElement.width, renderer.domElement.height);
|
||||
uniforms.uPixelSize.value = pixelSize * renderer.getPixelRatio();
|
||||
};
|
||||
setSize();
|
||||
const ro = new ResizeObserver(setSize);
|
||||
ro.observe(container);
|
||||
const randomFloat = () => {
|
||||
if (typeof window !== 'undefined' && window.crypto?.getRandomValues) {
|
||||
const u32 = new Uint32Array(1);
|
||||
window.crypto.getRandomValues(u32);
|
||||
return u32[0] / 0xffffffff;
|
||||
}
|
||||
return Math.random();
|
||||
};
|
||||
const timeOffset = randomFloat() * 1000;
|
||||
let composer: EffectComposer | undefined;
|
||||
let touch: ReturnType<typeof createTouchTexture> | undefined;
|
||||
let liquidEffect: Effect | undefined;
|
||||
if (liquid) {
|
||||
touch = createTouchTexture();
|
||||
touch.radiusScale = liquidRadius;
|
||||
composer = new EffectComposer(renderer);
|
||||
const renderPass = new RenderPass(scene, camera);
|
||||
liquidEffect = createLiquidEffect(touch.texture, {
|
||||
strength: liquidStrength,
|
||||
freq: liquidWobbleSpeed
|
||||
});
|
||||
const effectPass = new EffectPass(camera, liquidEffect);
|
||||
effectPass.renderToScreen = true;
|
||||
composer.addPass(renderPass);
|
||||
composer.addPass(effectPass);
|
||||
}
|
||||
if (noiseAmount > 0) {
|
||||
if (!composer) {
|
||||
composer = new EffectComposer(renderer);
|
||||
composer.addPass(new RenderPass(scene, camera));
|
||||
}
|
||||
const noiseEffect = new Effect(
|
||||
'NoiseEffect',
|
||||
`uniform float uTime; uniform float uAmount; float hash(vec2 p){ return fract(sin(dot(p, vec2(127.1,311.7))) * 43758.5453);} void mainUv(inout vec2 uv){} void mainImage(const in vec4 inputColor,const in vec2 uv,out vec4 outputColor){ float n=hash(floor(uv*vec2(1920.0,1080.0))+floor(uTime*60.0)); float g=(n-0.5)*uAmount; outputColor=inputColor+vec4(vec3(g),0.0);} `,
|
||||
{
|
||||
uniforms: new Map([
|
||||
['uTime', new THREE.Uniform(0)],
|
||||
['uAmount', new THREE.Uniform(noiseAmount)]
|
||||
])
|
||||
}
|
||||
);
|
||||
const noisePass = new EffectPass(camera, noiseEffect);
|
||||
noisePass.renderToScreen = true;
|
||||
if (composer && composer.passes.length > 0) composer.passes.forEach(p => (p.renderToScreen = false));
|
||||
composer.addPass(noisePass);
|
||||
}
|
||||
if (composer && renderer) composer.setSize(renderer.domElement.width, renderer.domElement.height);
|
||||
const mapToPixels = (e: MouseEvent | PointerEvent) => {
|
||||
if (!renderer) return { fx: 0, fy: 0, w: 0, h: 0 };
|
||||
const rect = renderer.domElement.getBoundingClientRect();
|
||||
const scaleX = renderer.domElement.width / rect.width;
|
||||
const scaleY = renderer.domElement.height / rect.height;
|
||||
const fx = (e.clientX - rect.left) * scaleX;
|
||||
const fy = (rect.height - (e.clientY - rect.top)) * scaleY;
|
||||
return {
|
||||
fx,
|
||||
fy,
|
||||
w: renderer.domElement.width,
|
||||
h: renderer.domElement.height
|
||||
};
|
||||
};
|
||||
let lastRippleTime = 0;
|
||||
const rippleThrottle = 150; // ms between ripples
|
||||
const onPointerMove = (e: MouseEvent | PointerEvent) => {
|
||||
const { fx, fy, w, h } = mapToPixels(e);
|
||||
|
||||
// Trigger ripple on mouse move (throttled)
|
||||
const now = performance.now();
|
||||
if (now - lastRippleTime > rippleThrottle) {
|
||||
const ix = threeRef.current?.clickIx ?? 0;
|
||||
uniforms.uClickPos.value[ix].set(fx, fy);
|
||||
uniforms.uClickTimes.value[ix] = uniforms.uTime.value;
|
||||
if (threeRef.current) threeRef.current.clickIx = (ix + 1) % MAX_CLICKS;
|
||||
lastRippleTime = now;
|
||||
}
|
||||
|
||||
// Liquid touch effect
|
||||
if (touch) {
|
||||
touch.addTouch({ x: fx / w, y: fy / h });
|
||||
}
|
||||
};
|
||||
renderer.domElement.addEventListener('pointermove', onPointerMove, {
|
||||
passive: true
|
||||
});
|
||||
|
||||
// Store event handler for cleanup
|
||||
const domElement = renderer.domElement;
|
||||
let raf = 0;
|
||||
let lastFrameTime = 0;
|
||||
const targetDelta = 1000 / 10; // throttle to ~20fps
|
||||
const animate = (now?: number) => {
|
||||
const timeNow = now ?? performance.now();
|
||||
if (autoPauseOffscreen && !visibilityRef.current.visible) {
|
||||
raf = requestAnimationFrame(animate);
|
||||
if (threeRef.current) threeRef.current.raf = raf;
|
||||
return;
|
||||
}
|
||||
if (timeNow - lastFrameTime < targetDelta) {
|
||||
raf = requestAnimationFrame(animate);
|
||||
if (threeRef.current) threeRef.current.raf = raf;
|
||||
return;
|
||||
}
|
||||
lastFrameTime = timeNow;
|
||||
uniforms.uTime.value = timeOffset + clock.getElapsedTime() * speedRef.current;
|
||||
if (liquidEffect) liquidEffect.uniforms.get('uTime')!.value = uniforms.uTime.value;
|
||||
if (composer) {
|
||||
if (touch) touch.update();
|
||||
composer.passes.forEach(p => {
|
||||
const effs = (p as any).effects;
|
||||
if (effs)
|
||||
effs.forEach((eff: Effect) => {
|
||||
const u = eff.uniforms?.get('uTime');
|
||||
if (u) u.value = uniforms.uTime.value;
|
||||
});
|
||||
});
|
||||
composer.render();
|
||||
} else if (renderer) renderer.render(scene, camera);
|
||||
|
||||
if (!firstFrameFiredRef.current) {
|
||||
firstFrameFiredRef.current = true;
|
||||
onFirstFrameRef.current?.();
|
||||
}
|
||||
|
||||
raf = requestAnimationFrame(animate);
|
||||
if (threeRef.current) threeRef.current.raf = raf;
|
||||
};
|
||||
raf = requestAnimationFrame(animate);
|
||||
threeRef.current = {
|
||||
renderer,
|
||||
scene,
|
||||
camera,
|
||||
material,
|
||||
clock,
|
||||
clickIx: 0,
|
||||
uniforms,
|
||||
resizeObserver: ro,
|
||||
raf,
|
||||
quad,
|
||||
timeOffset,
|
||||
composer,
|
||||
touch,
|
||||
liquidEffect,
|
||||
onPointerMove,
|
||||
domElement
|
||||
};
|
||||
} catch (err) {
|
||||
console.error('[PixelBlast] WebGL initialization failed', err);
|
||||
if (renderer) renderer.dispose();
|
||||
if (canvas && canvas.parentElement === container) {
|
||||
(container as HTMLDivElement).removeChild(canvas);
|
||||
}
|
||||
threeRef.current = null;
|
||||
|
||||
if (!firstFrameFiredRef.current) {
|
||||
firstFrameFiredRef.current = true;
|
||||
onFirstFrameRef.current?.();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const t = threeRef.current;
|
||||
t.uniforms.uShapeType.value = SHAPE_MAP[variant as keyof typeof SHAPE_MAP] ?? 0;
|
||||
t.uniforms.uPixelSize.value = pixelSize * t.renderer.getPixelRatio();
|
||||
t.uniforms.uColor.value.set(color);
|
||||
t.uniforms.uScale.value = patternScale;
|
||||
t.uniforms.uDensity.value = patternDensity;
|
||||
t.uniforms.uPixelJitter.value = pixelSizeJitter;
|
||||
t.uniforms.uEnableRipples.value = enableRipples ? 1 : 0;
|
||||
t.uniforms.uRippleIntensity.value = rippleIntensityScale;
|
||||
t.uniforms.uRippleThickness.value = rippleThickness;
|
||||
t.uniforms.uRippleSpeed.value = rippleSpeed;
|
||||
t.uniforms.uEdgeFade.value = edgeFade;
|
||||
if (transparent) t.renderer.setClearAlpha(0);
|
||||
else t.renderer.setClearColor(0x000000, 1);
|
||||
if (t.liquidEffect) {
|
||||
const uStrength = t.liquidEffect;
|
||||
if (uStrength) uStrength.value = liquidStrength;
|
||||
const uFreq = t.liquidEffect.uniforms.get('uFreq');
|
||||
if (uFreq) uFreq.value = liquidWobbleSpeed;
|
||||
}
|
||||
if (t.touch) t.touch.radiusScale = liquidRadius;
|
||||
}
|
||||
prevConfigRef.current = cfg;
|
||||
return () => {
|
||||
if (!threeRef.current) return;
|
||||
const t = threeRef.current;
|
||||
|
||||
// Remove event listeners
|
||||
if (t.domElement && t.onPointerMove) {
|
||||
t.domElement.removeEventListener('pointermove', t.onPointerMove);
|
||||
}
|
||||
|
||||
t.resizeObserver?.disconnect();
|
||||
cancelAnimationFrame(t.raf);
|
||||
|
||||
// Dispose Three.js resources
|
||||
t.quad?.geometry.dispose();
|
||||
t.material.dispose();
|
||||
t.composer?.dispose();
|
||||
|
||||
// Dispose touch texture
|
||||
if (t.touch?.texture) {
|
||||
t.touch.texture.dispose();
|
||||
}
|
||||
|
||||
t.renderer.dispose();
|
||||
if (t.renderer.domElement.parentElement === container) {
|
||||
(container as HTMLDivElement).removeChild(t.renderer.domElement);
|
||||
}
|
||||
threeRef.current = null;
|
||||
};
|
||||
}, [
|
||||
antialias,
|
||||
liquid,
|
||||
noiseAmount,
|
||||
pixelSize,
|
||||
patternScale,
|
||||
patternDensity,
|
||||
enableRipples,
|
||||
rippleIntensityScale,
|
||||
rippleThickness,
|
||||
rippleSpeed,
|
||||
pixelSizeJitter,
|
||||
edgeFade,
|
||||
transparent,
|
||||
liquidStrength,
|
||||
liquidRadius,
|
||||
liquidWobbleSpeed,
|
||||
autoPauseOffscreen,
|
||||
variant,
|
||||
color,
|
||||
speed,
|
||||
prefersReducedMotion,
|
||||
effectivePixelRatio
|
||||
]);
|
||||
|
||||
// Render empty container if user prefers reduced motion
|
||||
if (prefersReducedMotion) {
|
||||
return (
|
||||
<div
|
||||
ref={containerRef}
|
||||
className={`pixel-blast-container ${className ?? ''}`}
|
||||
style={{ ...style, backgroundColor: 'transparent' }}
|
||||
aria-label="PixelBlast background (disabled for reduced motion)"
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={containerRef}
|
||||
className={`pixel-blast-container ${className ?? ''}`}
|
||||
style={style}
|
||||
aria-label="PixelBlast interactive background"
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default PixelBlast;
|
||||
30
frontend/components/Shuffle.css
Normal file
30
frontend/components/Shuffle.css
Normal file
@@ -0,0 +1,30 @@
|
||||
.shuffle-parent {
|
||||
display: inline-block;
|
||||
white-space: normal;
|
||||
word-wrap: break-word;
|
||||
will-change: transform;
|
||||
line-height: 1.2;
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
.shuffle-parent.is-ready {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
.shuffle-char-wrapper {
|
||||
display: inline-block;
|
||||
overflow: hidden;
|
||||
vertical-align: baseline;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.shuffle-char-wrapper > span {
|
||||
display: inline-flex;
|
||||
will-change: transform;
|
||||
}
|
||||
|
||||
.shuffle-char {
|
||||
line-height: 1;
|
||||
display: inline-block;
|
||||
text-align: center;
|
||||
}
|
||||
423
frontend/components/Shuffle.tsx
Normal file
423
frontend/components/Shuffle.tsx
Normal file
@@ -0,0 +1,423 @@
|
||||
import React, { useRef, useEffect, useState, useMemo } from 'react';
|
||||
import { gsap } from 'gsap';
|
||||
import { ScrollTrigger } from 'gsap/ScrollTrigger';
|
||||
import { SplitText as GSAPSplitText } from 'gsap/SplitText';
|
||||
import { useGSAP } from '@gsap/react';
|
||||
import './Shuffle.css';
|
||||
|
||||
gsap.registerPlugin(ScrollTrigger, GSAPSplitText, useGSAP);
|
||||
|
||||
interface ShuffleProps {
|
||||
text: string;
|
||||
className?: string;
|
||||
style?: React.CSSProperties;
|
||||
shuffleDirection?: 'up' | 'down' | 'left' | 'right';
|
||||
duration?: number;
|
||||
maxDelay?: number;
|
||||
ease?: string;
|
||||
threshold?: number;
|
||||
rootMargin?: string;
|
||||
tag?: keyof React.JSX.IntrinsicElements;
|
||||
textAlign?: 'left' | 'center' | 'right';
|
||||
onShuffleComplete?: () => void;
|
||||
shuffleTimes?: number;
|
||||
animationMode?: 'evenodd' | 'random';
|
||||
loop?: boolean;
|
||||
loopDelay?: number;
|
||||
stagger?: number;
|
||||
scrambleCharset?: string;
|
||||
colorFrom?: string;
|
||||
colorTo?: string;
|
||||
triggerOnce?: boolean;
|
||||
respectReducedMotion?: boolean;
|
||||
triggerOnHover?: boolean;
|
||||
autoPlay?: boolean;
|
||||
}
|
||||
|
||||
const Shuffle: React.FC<ShuffleProps> = ({
|
||||
text,
|
||||
className = '',
|
||||
style = {},
|
||||
shuffleDirection = 'right',
|
||||
duration = 0.35,
|
||||
maxDelay = 0,
|
||||
ease = 'power3.out',
|
||||
threshold = 0.1,
|
||||
rootMargin = '-100px',
|
||||
tag = 'p',
|
||||
textAlign = 'center',
|
||||
onShuffleComplete,
|
||||
shuffleTimes = 1,
|
||||
animationMode = 'evenodd',
|
||||
loop = false,
|
||||
loopDelay = 0,
|
||||
stagger = 0.03,
|
||||
scrambleCharset = '',
|
||||
colorFrom,
|
||||
colorTo,
|
||||
triggerOnce = true,
|
||||
respectReducedMotion = true,
|
||||
triggerOnHover = true,
|
||||
autoPlay = true
|
||||
}) => {
|
||||
const ref = useRef<HTMLElement | null>(null);
|
||||
const [fontsLoaded, setFontsLoaded] = useState(false);
|
||||
const [ready, setReady] = useState(false);
|
||||
|
||||
const splitRef = useRef<any>(null);
|
||||
const wrappersRef = useRef<any[]>([]);
|
||||
const tlRef = useRef<gsap.core.Timeline | null>(null);
|
||||
const playingRef = useRef(false);
|
||||
const hoverHandlerRef = useRef<((e: MouseEvent) => void) | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if ('fonts' in document) {
|
||||
if (document.fonts.status === 'loaded') setFontsLoaded(true);
|
||||
else document.fonts.ready.then(() => setFontsLoaded(true));
|
||||
} else setFontsLoaded(true);
|
||||
}, []);
|
||||
|
||||
const scrollTriggerStart = useMemo(() => {
|
||||
const startPct = (1 - threshold) * 100;
|
||||
const mm = /^(-?\d+(?:\.\d+)?)(px|em|rem|%)?$/.exec(rootMargin || '');
|
||||
const mv = mm ? parseFloat(mm[1]) : 0;
|
||||
const mu = mm ? mm[2] || 'px' : 'px';
|
||||
const sign = mv === 0 ? '' : mv < 0 ? `-=${Math.abs(mv)}${mu}` : `+=${mv}${mu}`;
|
||||
return `top ${startPct}%${sign}`;
|
||||
}, [threshold, rootMargin]);
|
||||
|
||||
useGSAP(
|
||||
() => {
|
||||
if (!ref.current || !text || !fontsLoaded) return;
|
||||
if (respectReducedMotion && window.matchMedia && window.matchMedia('(prefers-reduced-motion: reduce)').matches) {
|
||||
setReady(true);
|
||||
onShuffleComplete?.();
|
||||
return;
|
||||
}
|
||||
|
||||
const el = ref.current;
|
||||
|
||||
const start = scrollTriggerStart;
|
||||
|
||||
const removeHover = () => {
|
||||
if (hoverHandlerRef.current && ref.current) {
|
||||
ref.current.removeEventListener('mouseenter', hoverHandlerRef.current);
|
||||
hoverHandlerRef.current = null;
|
||||
}
|
||||
};
|
||||
|
||||
const teardown = () => {
|
||||
if (tlRef.current) {
|
||||
tlRef.current.kill();
|
||||
tlRef.current = null;
|
||||
}
|
||||
if (wrappersRef.current.length) {
|
||||
wrappersRef.current.forEach(wrap => {
|
||||
const inner = wrap.firstElementChild;
|
||||
const orig = inner?.querySelector('[data-orig="1"]');
|
||||
if (orig && wrap.parentNode) wrap.parentNode.replaceChild(orig, wrap);
|
||||
});
|
||||
wrappersRef.current = [];
|
||||
}
|
||||
try {
|
||||
splitRef.current?.revert();
|
||||
} catch {
|
||||
/* noop */
|
||||
}
|
||||
splitRef.current = null;
|
||||
playingRef.current = false;
|
||||
};
|
||||
|
||||
const build = () => {
|
||||
teardown();
|
||||
|
||||
splitRef.current = new GSAPSplitText(el, {
|
||||
type: 'chars',
|
||||
charsClass: 'shuffle-char',
|
||||
wordsClass: 'shuffle-word',
|
||||
linesClass: 'shuffle-line',
|
||||
smartWrap: true,
|
||||
reduceWhiteSpace: false
|
||||
});
|
||||
|
||||
const chars = splitRef.current.chars || [];
|
||||
wrappersRef.current = [];
|
||||
|
||||
const rolls = Math.max(1, Math.floor(shuffleTimes));
|
||||
const rand = (set: string) => set.charAt(Math.floor(Math.random() * set.length)) || '';
|
||||
|
||||
chars.forEach((ch: any) => {
|
||||
const parent = ch.parentElement;
|
||||
if (!parent) return;
|
||||
|
||||
const w = ch.getBoundingClientRect().width;
|
||||
const h = ch.getBoundingClientRect().height;
|
||||
if (!w) return;
|
||||
|
||||
const wrap = document.createElement('span');
|
||||
Object.assign(wrap.style, {
|
||||
display: 'inline-block',
|
||||
overflow: 'hidden',
|
||||
width: w + 'px',
|
||||
height: shuffleDirection === 'up' || shuffleDirection === 'down' ? h + 'px' : 'auto',
|
||||
verticalAlign: 'bottom'
|
||||
});
|
||||
|
||||
const inner = document.createElement('span');
|
||||
Object.assign(inner.style, {
|
||||
display: 'inline-block',
|
||||
whiteSpace: shuffleDirection === 'up' || shuffleDirection === 'down' ? 'normal' : 'nowrap',
|
||||
willChange: 'transform'
|
||||
});
|
||||
|
||||
parent.insertBefore(wrap, ch);
|
||||
wrap.appendChild(inner);
|
||||
|
||||
const firstOrig = ch.cloneNode(true);
|
||||
Object.assign(firstOrig.style, {
|
||||
display: shuffleDirection === 'up' || shuffleDirection === 'down' ? 'block' : 'inline-block',
|
||||
width: w + 'px',
|
||||
textAlign: 'center'
|
||||
});
|
||||
|
||||
ch.setAttribute('data-orig', '1');
|
||||
Object.assign(ch.style, {
|
||||
display: shuffleDirection === 'up' || shuffleDirection === 'down' ? 'block' : 'inline-block',
|
||||
width: w + 'px',
|
||||
textAlign: 'center'
|
||||
});
|
||||
|
||||
inner.appendChild(firstOrig);
|
||||
for (let k = 0; k < rolls; k++) {
|
||||
const c = ch.cloneNode(true);
|
||||
if (scrambleCharset) c.textContent = rand(scrambleCharset);
|
||||
Object.assign(c.style, {
|
||||
display: shuffleDirection === 'up' || shuffleDirection === 'down' ? 'block' : 'inline-block',
|
||||
width: w + 'px',
|
||||
textAlign: 'center'
|
||||
});
|
||||
inner.appendChild(c);
|
||||
}
|
||||
inner.appendChild(ch);
|
||||
|
||||
const steps = rolls + 1;
|
||||
|
||||
if (shuffleDirection === 'right' || shuffleDirection === 'down') {
|
||||
const firstCopy = inner.firstElementChild;
|
||||
const real = inner.lastElementChild;
|
||||
if (real) inner.insertBefore(real, inner.firstChild);
|
||||
if (firstCopy) inner.appendChild(firstCopy);
|
||||
}
|
||||
|
||||
let startX = 0;
|
||||
let finalX = 0;
|
||||
let startY = 0;
|
||||
let finalY = 0;
|
||||
|
||||
if (shuffleDirection === 'right') {
|
||||
startX = -steps * w;
|
||||
finalX = 0;
|
||||
} else if (shuffleDirection === 'left') {
|
||||
startX = 0;
|
||||
finalX = -steps * w;
|
||||
} else if (shuffleDirection === 'down') {
|
||||
startY = -steps * h;
|
||||
finalY = 0;
|
||||
} else if (shuffleDirection === 'up') {
|
||||
startY = 0;
|
||||
finalY = -steps * h;
|
||||
}
|
||||
|
||||
if (shuffleDirection === 'left' || shuffleDirection === 'right') {
|
||||
gsap.set(inner, { x: startX, y: 0, force3D: true });
|
||||
inner.setAttribute('data-start-x', String(startX));
|
||||
inner.setAttribute('data-final-x', String(finalX));
|
||||
} else {
|
||||
gsap.set(inner, { x: 0, y: startY, force3D: true });
|
||||
inner.setAttribute('data-start-y', String(startY));
|
||||
inner.setAttribute('data-final-y', String(finalY));
|
||||
}
|
||||
|
||||
if (colorFrom) inner.style.color = colorFrom;
|
||||
wrappersRef.current.push(wrap);
|
||||
});
|
||||
};
|
||||
|
||||
const inners = () => wrappersRef.current.map(w => w.firstElementChild);
|
||||
|
||||
const randomizeScrambles = () => {
|
||||
if (!scrambleCharset) return;
|
||||
wrappersRef.current.forEach(w => {
|
||||
const strip = w.firstElementChild;
|
||||
if (!strip) return;
|
||||
const kids = Array.from(strip.children) as Element[];
|
||||
for (let i = 1; i < kids.length - 1; i++) {
|
||||
kids[i].textContent = scrambleCharset.charAt(Math.floor(Math.random() * scrambleCharset.length));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const cleanupToStill = () => {
|
||||
wrappersRef.current.forEach(w => {
|
||||
const strip = w.firstElementChild;
|
||||
if (!strip) return;
|
||||
const real = strip.querySelector('[data-orig="1"]');
|
||||
if (!real) return;
|
||||
strip.replaceChildren(real);
|
||||
strip.style.transform = 'none';
|
||||
strip.style.willChange = 'auto';
|
||||
});
|
||||
};
|
||||
|
||||
const play = () => {
|
||||
const strips = inners();
|
||||
if (!strips.length) return;
|
||||
|
||||
playingRef.current = true;
|
||||
const isVertical = shuffleDirection === 'up' || shuffleDirection === 'down';
|
||||
|
||||
const tl = gsap.timeline({
|
||||
smoothChildTiming: true,
|
||||
repeat: loop ? -1 : 0,
|
||||
repeatDelay: loop ? loopDelay : 0,
|
||||
onRepeat: () => {
|
||||
if (scrambleCharset) randomizeScrambles();
|
||||
if (isVertical) {
|
||||
gsap.set(strips, { y: (i, t) => parseFloat(t.getAttribute('data-start-y') || '0') });
|
||||
} else {
|
||||
gsap.set(strips, { x: (i, t) => parseFloat(t.getAttribute('data-start-x') || '0') });
|
||||
}
|
||||
onShuffleComplete?.();
|
||||
},
|
||||
onComplete: () => {
|
||||
playingRef.current = false;
|
||||
if (!loop) {
|
||||
cleanupToStill();
|
||||
if (colorTo) gsap.set(strips, { color: colorTo });
|
||||
onShuffleComplete?.();
|
||||
armHover();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const addTween = (targets: any, at: any) => {
|
||||
const vars: any = {
|
||||
duration,
|
||||
ease,
|
||||
force3D: true,
|
||||
stagger: animationMode === 'evenodd' ? stagger : 0
|
||||
};
|
||||
if (isVertical) {
|
||||
vars.y = (i: number, t: any) => parseFloat(t.getAttribute('data-final-y') || '0');
|
||||
} else {
|
||||
vars.x = (i: number, t: any) => parseFloat(t.getAttribute('data-final-x') || '0');
|
||||
}
|
||||
|
||||
tl.to(targets, vars, at);
|
||||
|
||||
if (colorFrom && colorTo) {
|
||||
tl.to(targets, { color: colorTo, duration, ease }, at);
|
||||
}
|
||||
};
|
||||
|
||||
if (animationMode === 'evenodd') {
|
||||
const odd = strips.filter((_, i) => i % 2 === 1);
|
||||
const even = strips.filter((_, i) => i % 2 === 0);
|
||||
const oddTotal = duration + Math.max(0, odd.length - 1) * stagger;
|
||||
const evenStart = odd.length ? oddTotal * 0.7 : 0;
|
||||
if (odd.length) addTween(odd, 0);
|
||||
if (even.length) addTween(even, evenStart);
|
||||
} else {
|
||||
strips.forEach(strip => {
|
||||
const d = Math.random() * maxDelay;
|
||||
const vars: any = {
|
||||
duration,
|
||||
ease,
|
||||
force3D: true
|
||||
};
|
||||
if (isVertical) {
|
||||
vars.y = parseFloat(strip.getAttribute('data-final-y') || '0');
|
||||
} else {
|
||||
vars.x = parseFloat(strip.getAttribute('data-final-x') || '0');
|
||||
}
|
||||
tl.to(strip, vars, d);
|
||||
if (colorFrom && colorTo) tl.fromTo(strip, { color: colorFrom }, { color: colorTo, duration, ease }, d);
|
||||
});
|
||||
}
|
||||
|
||||
tlRef.current = tl;
|
||||
};
|
||||
|
||||
const armHover = () => {
|
||||
if (!triggerOnHover || !ref.current) return;
|
||||
removeHover();
|
||||
const handler = () => {
|
||||
if (playingRef.current) return;
|
||||
build();
|
||||
if (scrambleCharset) randomizeScrambles();
|
||||
play();
|
||||
};
|
||||
hoverHandlerRef.current = handler;
|
||||
ref.current.addEventListener('mouseenter', handler);
|
||||
};
|
||||
|
||||
const create = () => {
|
||||
build();
|
||||
if (scrambleCharset) randomizeScrambles();
|
||||
if (autoPlay) {
|
||||
play();
|
||||
}
|
||||
armHover();
|
||||
setReady(true);
|
||||
};
|
||||
|
||||
const st = ScrollTrigger.create({
|
||||
trigger: el,
|
||||
start,
|
||||
once: triggerOnce,
|
||||
onEnter: create
|
||||
});
|
||||
|
||||
return () => {
|
||||
st.kill();
|
||||
removeHover();
|
||||
teardown();
|
||||
setReady(false);
|
||||
};
|
||||
},
|
||||
{
|
||||
dependencies: [
|
||||
text,
|
||||
duration,
|
||||
maxDelay,
|
||||
ease,
|
||||
scrollTriggerStart,
|
||||
fontsLoaded,
|
||||
shuffleDirection,
|
||||
shuffleTimes,
|
||||
animationMode,
|
||||
loop,
|
||||
loopDelay,
|
||||
stagger,
|
||||
scrambleCharset,
|
||||
colorFrom,
|
||||
colorTo,
|
||||
triggerOnce,
|
||||
respectReducedMotion,
|
||||
triggerOnHover,
|
||||
onShuffleComplete,
|
||||
autoPlay
|
||||
],
|
||||
scope: ref
|
||||
}
|
||||
);
|
||||
|
||||
const commonStyle = useMemo(() => ({ textAlign, ...style }), [textAlign, style]);
|
||||
|
||||
const classes = useMemo(() => `shuffle-parent ${ready ? 'is-ready' : ''} ${className}`, [ready, className]);
|
||||
|
||||
const Tag = tag || 'p';
|
||||
return React.createElement(Tag, { ref, className: classes, style: commonStyle }, text);
|
||||
};
|
||||
|
||||
export default Shuffle;
|
||||
189
frontend/components/about-dialog.tsx
Normal file
189
frontend/components/about-dialog.tsx
Normal file
@@ -0,0 +1,189 @@
|
||||
"use client"
|
||||
|
||||
import { useState } from 'react'
|
||||
import { useTranslations } from 'next-intl'
|
||||
import { useQueryClient } from '@tanstack/react-query'
|
||||
import {
|
||||
IconRadar,
|
||||
IconRefresh,
|
||||
IconExternalLink,
|
||||
IconBrandGithub,
|
||||
IconMessageReport,
|
||||
IconBook,
|
||||
IconFileText,
|
||||
IconCheck,
|
||||
IconArrowUp,
|
||||
} from '@tabler/icons-react'
|
||||
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogTrigger,
|
||||
} from '@/components/ui/dialog'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Separator } from '@/components/ui/separator'
|
||||
import { Badge } from '@/components/ui/badge'
|
||||
import { useVersion } from '@/hooks/use-version'
|
||||
import { VersionService } from '@/services/version.service'
|
||||
import type { UpdateCheckResult } from '@/types/version.types'
|
||||
|
||||
interface AboutDialogProps {
|
||||
children: React.ReactNode
|
||||
}
|
||||
|
||||
export function AboutDialog({ children }: AboutDialogProps) {
|
||||
const t = useTranslations('about')
|
||||
const { data: versionData } = useVersion()
|
||||
const queryClient = useQueryClient()
|
||||
|
||||
const [isChecking, setIsChecking] = useState(false)
|
||||
const [updateResult, setUpdateResult] = useState<UpdateCheckResult | null>(null)
|
||||
const [checkError, setCheckError] = useState<string | null>(null)
|
||||
|
||||
const handleCheckUpdate = async () => {
|
||||
setIsChecking(true)
|
||||
setCheckError(null)
|
||||
try {
|
||||
const result = await VersionService.checkUpdate()
|
||||
setUpdateResult(result)
|
||||
queryClient.setQueryData(['check-update'], result)
|
||||
} catch {
|
||||
setCheckError(t('checkFailed'))
|
||||
} finally {
|
||||
setIsChecking(false)
|
||||
}
|
||||
}
|
||||
|
||||
const currentVersion = updateResult?.currentVersion || versionData?.version || '-'
|
||||
const latestVersion = updateResult?.latestVersion
|
||||
const hasUpdate = updateResult?.hasUpdate
|
||||
|
||||
return (
|
||||
<Dialog>
|
||||
<DialogTrigger asChild>
|
||||
{children}
|
||||
</DialogTrigger>
|
||||
<DialogContent className="sm:max-w-md">
|
||||
<DialogHeader>
|
||||
<DialogTitle>{t('title')}</DialogTitle>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="space-y-6">
|
||||
{/* Logo and name */}
|
||||
<div className="flex flex-col items-center py-4">
|
||||
<div className="flex h-16 w-16 items-center justify-center rounded-2xl bg-primary/10 mb-3">
|
||||
<IconRadar className="h-8 w-8 text-primary" />
|
||||
</div>
|
||||
<h2 className="text-xl font-semibold">{t('productName')}</h2>
|
||||
<p className="text-sm text-muted-foreground">{t('description')}</p>
|
||||
</div>
|
||||
|
||||
{/* Version info */}
|
||||
<div className="rounded-lg border p-4 space-y-3">
|
||||
<div className="flex items-center justify-between">
|
||||
<span className="text-sm text-muted-foreground">{t('currentVersion')}</span>
|
||||
<span className="font-mono text-sm">{currentVersion}</span>
|
||||
</div>
|
||||
|
||||
{updateResult && (
|
||||
<div className="flex items-center justify-between">
|
||||
<span className="text-sm text-muted-foreground">{t('latestVersion')}</span>
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="font-mono text-sm">{latestVersion}</span>
|
||||
{hasUpdate ? (
|
||||
<Badge variant="default" className="gap-1">
|
||||
<IconArrowUp className="h-3 w-3" />
|
||||
{t('updateAvailable')}
|
||||
</Badge>
|
||||
) : (
|
||||
<Badge variant="secondary" className="gap-1">
|
||||
<IconCheck className="h-3 w-3" />
|
||||
{t('upToDate')}
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{checkError && (
|
||||
<p className="text-sm text-destructive">{checkError}</p>
|
||||
)}
|
||||
|
||||
<div className="flex gap-2">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
className="flex-1"
|
||||
onClick={handleCheckUpdate}
|
||||
disabled={isChecking}
|
||||
>
|
||||
<IconRefresh className={`h-4 w-4 mr-2 ${isChecking ? 'animate-spin' : ''}`} />
|
||||
{isChecking ? t('checking') : t('checkUpdate')}
|
||||
</Button>
|
||||
|
||||
{hasUpdate && updateResult?.releaseUrl && (
|
||||
<Button
|
||||
variant="default"
|
||||
size="sm"
|
||||
className="flex-1"
|
||||
asChild
|
||||
>
|
||||
<a href={updateResult.releaseUrl} target="_blank" rel="noopener noreferrer">
|
||||
<IconExternalLink className="h-4 w-4 mr-2" />
|
||||
{t('viewRelease')}
|
||||
</a>
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{hasUpdate && (
|
||||
<div className="rounded-md bg-muted p-3 text-sm text-muted-foreground">
|
||||
<p>{t('updateHint')}</p>
|
||||
<code className="mt-1 block rounded bg-background px-2 py-1 font-mono text-xs">
|
||||
sudo ./update.sh
|
||||
</code>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<Separator />
|
||||
|
||||
{/* Links */}
|
||||
<div className="grid grid-cols-2 gap-2">
|
||||
<Button variant="ghost" size="sm" className="justify-start" asChild>
|
||||
<a href="https://github.com/yyhuni/xingrin" target="_blank" rel="noopener noreferrer">
|
||||
<IconBrandGithub className="h-4 w-4 mr-2" />
|
||||
GitHub
|
||||
</a>
|
||||
</Button>
|
||||
<Button variant="ghost" size="sm" className="justify-start" asChild>
|
||||
<a href="https://github.com/yyhuni/xingrin/releases" target="_blank" rel="noopener noreferrer">
|
||||
<IconFileText className="h-4 w-4 mr-2" />
|
||||
{t('changelog')}
|
||||
</a>
|
||||
</Button>
|
||||
<Button variant="ghost" size="sm" className="justify-start" asChild>
|
||||
<a href="https://github.com/yyhuni/xingrin/issues" target="_blank" rel="noopener noreferrer">
|
||||
<IconMessageReport className="h-4 w-4 mr-2" />
|
||||
{t('feedback')}
|
||||
</a>
|
||||
</Button>
|
||||
<Button variant="ghost" size="sm" className="justify-start" asChild>
|
||||
<a href="https://github.com/yyhuni/xingrin#readme" target="_blank" rel="noopener noreferrer">
|
||||
<IconBook className="h-4 w-4 mr-2" />
|
||||
{t('docs')}
|
||||
</a>
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<p className="text-center text-xs text-muted-foreground">
|
||||
© 2026 {t('productName')} · GPL-3.0
|
||||
</p>
|
||||
</div>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,360 @@
|
||||
'use client';
|
||||
|
||||
import * as React from 'react';
|
||||
|
||||
import { cn } from '@/lib/utils';
|
||||
|
||||
type MouseGravity = 'attract' | 'repel';
|
||||
type GlowAnimation = 'instant' | 'ease' | 'spring';
|
||||
type StarsInteractionType = 'bounce' | 'merge';
|
||||
|
||||
type GravityStarsProps = {
|
||||
starsCount?: number;
|
||||
starsSize?: number;
|
||||
starsOpacity?: number;
|
||||
glowIntensity?: number;
|
||||
glowAnimation?: GlowAnimation;
|
||||
movementSpeed?: number;
|
||||
mouseInfluence?: number;
|
||||
mouseGravity?: MouseGravity;
|
||||
gravityStrength?: number;
|
||||
starsInteraction?: boolean;
|
||||
starsInteractionType?: StarsInteractionType;
|
||||
} & React.ComponentProps<'div'>;
|
||||
|
||||
type Particle = {
|
||||
x: number;
|
||||
y: number;
|
||||
vx: number;
|
||||
vy: number;
|
||||
size: number;
|
||||
opacity: number;
|
||||
baseOpacity: number;
|
||||
mass: number;
|
||||
glowMultiplier?: number;
|
||||
glowVelocity?: number;
|
||||
};
|
||||
|
||||
function GravityStarsBackground({
|
||||
starsCount = 75,
|
||||
starsSize = 2,
|
||||
starsOpacity = 0.75,
|
||||
glowIntensity = 15,
|
||||
glowAnimation = 'ease',
|
||||
movementSpeed = 0.3,
|
||||
mouseInfluence = 100,
|
||||
mouseGravity = 'attract',
|
||||
gravityStrength = 75,
|
||||
starsInteraction = false,
|
||||
starsInteractionType = 'bounce',
|
||||
className,
|
||||
...props
|
||||
}: GravityStarsProps) {
|
||||
const containerRef = React.useRef<HTMLDivElement | null>(null);
|
||||
const canvasRef = React.useRef<HTMLCanvasElement | null>(null);
|
||||
const animRef = React.useRef<number | null>(null);
|
||||
const starsRef = React.useRef<Particle[]>([]);
|
||||
const mouseRef = React.useRef<{ x: number; y: number }>({ x: 0, y: 0 });
|
||||
const [dpr, setDpr] = React.useState(1);
|
||||
const [canvasSize, setCanvasSize] = React.useState({
|
||||
width: 800,
|
||||
height: 600,
|
||||
});
|
||||
|
||||
const readColor = React.useCallback(() => {
|
||||
const el = containerRef.current;
|
||||
if (!el) return '#ffffff';
|
||||
const cs = getComputedStyle(el);
|
||||
return cs.color || '#ffffff';
|
||||
}, []);
|
||||
|
||||
const initStars = React.useCallback(
|
||||
(w: number, h: number) => {
|
||||
starsRef.current = Array.from({ length: starsCount }).map(() => {
|
||||
const angle = Math.random() * Math.PI * 2;
|
||||
const speed = movementSpeed * (0.5 + Math.random() * 0.5);
|
||||
return {
|
||||
x: Math.random() * w,
|
||||
y: Math.random() * h,
|
||||
vx: Math.cos(angle) * speed,
|
||||
vy: Math.sin(angle) * speed,
|
||||
size: Math.random() * starsSize + 1,
|
||||
opacity: starsOpacity,
|
||||
baseOpacity: starsOpacity,
|
||||
mass: Math.random() * 0.5 + 0.5,
|
||||
glowMultiplier: 1,
|
||||
glowVelocity: 0,
|
||||
};
|
||||
});
|
||||
},
|
||||
[starsCount, movementSpeed, starsOpacity, starsSize],
|
||||
);
|
||||
|
||||
const redistributeStars = React.useCallback((w: number, h: number) => {
|
||||
starsRef.current.forEach((p) => {
|
||||
p.x = Math.random() * w;
|
||||
p.y = Math.random() * h;
|
||||
});
|
||||
}, []);
|
||||
|
||||
const resizeCanvas = React.useCallback(() => {
|
||||
const canvas = canvasRef.current;
|
||||
const container = containerRef.current;
|
||||
if (!canvas || !container) return;
|
||||
const rect = container.getBoundingClientRect();
|
||||
const nextDpr = Math.max(1, Math.min(window.devicePixelRatio || 1, 2));
|
||||
setDpr(nextDpr);
|
||||
canvas.width = Math.max(1, Math.floor(rect.width * nextDpr));
|
||||
canvas.height = Math.max(1, Math.floor(rect.height * nextDpr));
|
||||
canvas.style.width = `${rect.width}px`;
|
||||
canvas.style.height = `${rect.height}px`;
|
||||
setCanvasSize({ width: rect.width, height: rect.height });
|
||||
if (starsRef.current.length === 0) {
|
||||
initStars(rect.width, rect.height);
|
||||
} else {
|
||||
redistributeStars(rect.width, rect.height);
|
||||
}
|
||||
}, [initStars, redistributeStars]);
|
||||
|
||||
const handlePointerMove = React.useCallback(
|
||||
(e: React.MouseEvent | React.TouchEvent) => {
|
||||
const canvas = canvasRef.current;
|
||||
if (!canvas) return;
|
||||
const rect = canvas.getBoundingClientRect();
|
||||
let clientX = 0;
|
||||
let clientY = 0;
|
||||
if ('touches' in e) {
|
||||
const t = e.touches[0];
|
||||
if (!t) return;
|
||||
clientX = t.clientX;
|
||||
clientY = t.clientY;
|
||||
} else {
|
||||
clientX = e.clientX;
|
||||
clientY = e.clientY;
|
||||
}
|
||||
mouseRef.current = { x: clientX - rect.left, y: clientY - rect.top };
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
const updateStars = React.useCallback(() => {
|
||||
const w = canvasSize.width;
|
||||
const h = canvasSize.height;
|
||||
const mouse = mouseRef.current;
|
||||
|
||||
for (let i = 0; i < starsRef.current.length; i++) {
|
||||
const p = starsRef.current[i];
|
||||
|
||||
const dx = mouse.x - p.x;
|
||||
const dy = mouse.y - p.y;
|
||||
const dist = Math.hypot(dx, dy);
|
||||
|
||||
if (dist < mouseInfluence && dist > 0) {
|
||||
const force = (mouseInfluence - dist) / mouseInfluence;
|
||||
const nx = dx / dist;
|
||||
const ny = dy / dist;
|
||||
const g = force * (gravityStrength * 0.001);
|
||||
|
||||
if (mouseGravity === 'attract') {
|
||||
p.vx += nx * g;
|
||||
p.vy += ny * g;
|
||||
} else if (mouseGravity === 'repel') {
|
||||
p.vx -= nx * g;
|
||||
p.vy -= ny * g;
|
||||
}
|
||||
|
||||
p.opacity = Math.min(1, p.baseOpacity + force * 0.4);
|
||||
|
||||
const targetGlow = 1 + force * 2;
|
||||
const currentGlow = p.glowMultiplier || 1;
|
||||
|
||||
if (glowAnimation === 'instant') {
|
||||
p.glowMultiplier = targetGlow;
|
||||
} else if (glowAnimation === 'ease') {
|
||||
const ease = 0.15;
|
||||
p.glowMultiplier = currentGlow + (targetGlow - currentGlow) * ease;
|
||||
} else {
|
||||
const spring = (targetGlow - currentGlow) * 0.2;
|
||||
const damping = 0.85;
|
||||
p.glowVelocity = (p.glowVelocity || 0) * damping + spring;
|
||||
p.glowMultiplier = currentGlow + (p.glowVelocity || 0);
|
||||
}
|
||||
} else {
|
||||
p.opacity = Math.max(p.baseOpacity * 0.3, p.opacity - 0.02);
|
||||
const targetGlow = 1;
|
||||
const currentGlow = p.glowMultiplier || 1;
|
||||
if (glowAnimation === 'instant') {
|
||||
p.glowMultiplier = targetGlow;
|
||||
} else if (glowAnimation === 'ease') {
|
||||
const ease = 0.08;
|
||||
p.glowMultiplier = Math.max(
|
||||
1,
|
||||
currentGlow + (targetGlow - currentGlow) * ease,
|
||||
);
|
||||
} else {
|
||||
const spring = (targetGlow - currentGlow) * 0.15;
|
||||
const damping = 0.9;
|
||||
p.glowVelocity = (p.glowVelocity || 0) * damping + spring;
|
||||
p.glowMultiplier = Math.max(1, currentGlow + (p.glowVelocity || 0));
|
||||
}
|
||||
}
|
||||
|
||||
if (starsInteraction) {
|
||||
for (let j = i + 1; j < starsRef.current.length; j++) {
|
||||
const o = starsRef.current[j];
|
||||
const dx2 = o.x - p.x;
|
||||
const dy2 = o.y - p.y;
|
||||
const d = Math.hypot(dx2, dy2);
|
||||
const minD = p.size + o.size + 5;
|
||||
if (d < minD && d > 0) {
|
||||
if (starsInteractionType === 'bounce') {
|
||||
const nx = dx2 / d;
|
||||
const ny = dy2 / d;
|
||||
const rvx = p.vx - o.vx;
|
||||
const rvy = p.vy - o.vy;
|
||||
const speed = rvx * nx + rvy * ny;
|
||||
if (speed < 0) continue;
|
||||
const impulse = (2 * speed) / (p.mass + o.mass);
|
||||
p.vx -= impulse * o.mass * nx;
|
||||
p.vy -= impulse * o.mass * ny;
|
||||
o.vx += impulse * p.mass * nx;
|
||||
o.vy += impulse * p.mass * ny;
|
||||
const overlap = minD - d;
|
||||
const sx = nx * overlap * 0.5;
|
||||
const sy = ny * overlap * 0.5;
|
||||
p.x -= sx;
|
||||
p.y -= sy;
|
||||
o.x += sx;
|
||||
o.y += sy;
|
||||
} else {
|
||||
const mergeForce = (minD - d) / minD;
|
||||
p.glowMultiplier = (p.glowMultiplier || 1) + mergeForce * 0.5;
|
||||
o.glowMultiplier = (o.glowMultiplier || 1) + mergeForce * 0.5;
|
||||
const af = mergeForce * 0.01;
|
||||
p.vx += dx2 * af;
|
||||
p.vy += dy2 * af;
|
||||
o.vx -= dx2 * af;
|
||||
o.vy -= dy2 * af;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p.x += p.vx;
|
||||
p.y += p.vy;
|
||||
|
||||
p.vx += (Math.random() - 0.5) * 0.001;
|
||||
p.vy += (Math.random() - 0.5) * 0.001;
|
||||
|
||||
p.vx *= 0.999;
|
||||
p.vy *= 0.999;
|
||||
|
||||
if (p.x < 0) p.x = w;
|
||||
if (p.x > w) p.x = 0;
|
||||
if (p.y < 0) p.y = h;
|
||||
if (p.y > h) p.y = 0;
|
||||
}
|
||||
}, [
|
||||
canvasSize.width,
|
||||
canvasSize.height,
|
||||
mouseInfluence,
|
||||
mouseGravity,
|
||||
gravityStrength,
|
||||
glowAnimation,
|
||||
starsInteraction,
|
||||
starsInteractionType,
|
||||
]);
|
||||
|
||||
const drawStars = React.useCallback(
|
||||
(ctx: CanvasRenderingContext2D) => {
|
||||
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
|
||||
const color = readColor();
|
||||
for (const p of starsRef.current) {
|
||||
ctx.save();
|
||||
ctx.shadowColor = color;
|
||||
ctx.shadowBlur = glowIntensity * (p.glowMultiplier || 1) * 2;
|
||||
ctx.globalAlpha = p.opacity;
|
||||
ctx.fillStyle = color;
|
||||
ctx.beginPath();
|
||||
ctx.arc(p.x * dpr, p.y * dpr, p.size * dpr, 0, Math.PI * 2);
|
||||
ctx.fill();
|
||||
ctx.restore();
|
||||
}
|
||||
},
|
||||
[dpr, glowIntensity, readColor],
|
||||
);
|
||||
|
||||
const animate = React.useCallback(() => {
|
||||
const canvas = canvasRef.current;
|
||||
if (!canvas) return;
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) return;
|
||||
updateStars();
|
||||
drawStars(ctx);
|
||||
animRef.current = requestAnimationFrame(animate);
|
||||
}, [updateStars, drawStars]);
|
||||
|
||||
React.useEffect(() => {
|
||||
resizeCanvas();
|
||||
const container = containerRef.current;
|
||||
const ro =
|
||||
typeof ResizeObserver !== 'undefined'
|
||||
? new ResizeObserver(resizeCanvas)
|
||||
: null;
|
||||
if (container && ro) ro.observe(container);
|
||||
const onResize = () => resizeCanvas();
|
||||
window.addEventListener('resize', onResize);
|
||||
return () => {
|
||||
window.removeEventListener('resize', onResize);
|
||||
if (ro && container) ro.disconnect();
|
||||
};
|
||||
}, [resizeCanvas]);
|
||||
|
||||
React.useEffect(() => {
|
||||
if (starsRef.current.length === 0) {
|
||||
initStars(canvasSize.width, canvasSize.height);
|
||||
} else {
|
||||
starsRef.current.forEach((p) => {
|
||||
p.baseOpacity = starsOpacity;
|
||||
p.opacity = starsOpacity;
|
||||
const spd = Math.hypot(p.vx, p.vy);
|
||||
if (spd > 0) {
|
||||
const ratio = movementSpeed / spd;
|
||||
p.vx *= ratio;
|
||||
p.vy *= ratio;
|
||||
}
|
||||
});
|
||||
}
|
||||
}, [
|
||||
starsCount,
|
||||
starsOpacity,
|
||||
movementSpeed,
|
||||
canvasSize.width,
|
||||
canvasSize.height,
|
||||
initStars,
|
||||
]);
|
||||
|
||||
React.useEffect(() => {
|
||||
if (animRef.current) cancelAnimationFrame(animRef.current);
|
||||
animRef.current = requestAnimationFrame(animate);
|
||||
return () => {
|
||||
if (animRef.current) cancelAnimationFrame(animRef.current);
|
||||
animRef.current = null;
|
||||
};
|
||||
}, [animate]);
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={containerRef}
|
||||
data-slot="gravity-stars-background"
|
||||
className={cn('relative size-full overflow-hidden', className)}
|
||||
onMouseMove={(e) => handlePointerMove(e)}
|
||||
onTouchMove={(e) => handlePointerMove(e)}
|
||||
{...props}
|
||||
>
|
||||
<canvas ref={canvasRef} className="block w-full h-full" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export { GravityStarsBackground, type GravityStarsProps };
|
||||
@@ -5,7 +5,6 @@ import type * as React from "react"
|
||||
// Import various icons from Tabler Icons library
|
||||
import {
|
||||
IconDashboard, // Dashboard icon
|
||||
IconHelp, // Help icon
|
||||
IconListDetails, // List details icon
|
||||
IconSettings, // Settings icon
|
||||
IconUsers, // Users icon
|
||||
@@ -15,8 +14,10 @@ import {
|
||||
IconServer, // Server icon
|
||||
IconTerminal2, // Terminal icon
|
||||
IconBug, // Vulnerability icon
|
||||
IconMessageReport, // Feedback icon
|
||||
IconSearch, // Search icon
|
||||
IconKey, // API Key icon
|
||||
IconBan, // Blacklist icon
|
||||
IconInfoCircle, // About icon
|
||||
} from "@tabler/icons-react"
|
||||
// Import internationalization hook
|
||||
import { useTranslations } from 'next-intl'
|
||||
@@ -25,8 +26,8 @@ import { Link, usePathname } from '@/i18n/navigation'
|
||||
|
||||
// Import custom navigation components
|
||||
import { NavSystem } from "@/components/nav-system"
|
||||
import { NavSecondary } from "@/components/nav-secondary"
|
||||
import { NavUser } from "@/components/nav-user"
|
||||
import { AboutDialog } from "@/components/about-dialog"
|
||||
// Import sidebar UI components
|
||||
import {
|
||||
Sidebar,
|
||||
@@ -137,20 +138,6 @@ export function AppSidebar({ ...props }: React.ComponentProps<typeof Sidebar>) {
|
||||
},
|
||||
]
|
||||
|
||||
// Secondary navigation menu items
|
||||
const navSecondary = [
|
||||
{
|
||||
title: t('feedback'),
|
||||
url: "https://github.com/yyhuni/xingrin/issues",
|
||||
icon: IconMessageReport,
|
||||
},
|
||||
{
|
||||
title: t('help'),
|
||||
url: "https://github.com/yyhuni/xingrin",
|
||||
icon: IconHelp,
|
||||
},
|
||||
]
|
||||
|
||||
// System settings related menu items
|
||||
const documents = [
|
||||
{
|
||||
@@ -168,6 +155,16 @@ export function AppSidebar({ ...props }: React.ComponentProps<typeof Sidebar>) {
|
||||
url: "/settings/notifications/",
|
||||
icon: IconSettings,
|
||||
},
|
||||
{
|
||||
name: t('apiKeys'),
|
||||
url: "/settings/api-keys/",
|
||||
icon: IconKey,
|
||||
},
|
||||
{
|
||||
name: t('globalBlacklist'),
|
||||
url: "/settings/blacklist/",
|
||||
icon: IconBan,
|
||||
},
|
||||
]
|
||||
|
||||
return (
|
||||
@@ -182,8 +179,8 @@ export function AppSidebar({ ...props }: React.ComponentProps<typeof Sidebar>) {
|
||||
className="data-[slot=sidebar-menu-button]:!p-1.5"
|
||||
>
|
||||
<Link href="/">
|
||||
<IconRadar className="!size-5" />
|
||||
<span className="text-base font-semibold">XingRin</span>
|
||||
<IconRadar className="!size-5 text-primary" />
|
||||
<span className="text-base font-semibold">{t('appName')}</span>
|
||||
</Link>
|
||||
</SidebarMenuButton>
|
||||
</SidebarMenuItem>
|
||||
@@ -259,8 +256,21 @@ export function AppSidebar({ ...props }: React.ComponentProps<typeof Sidebar>) {
|
||||
|
||||
{/* System settings navigation menu */}
|
||||
<NavSystem items={documents} />
|
||||
{/* Secondary navigation menu, using mt-auto to push to bottom */}
|
||||
<NavSecondary items={navSecondary} className="mt-auto" />
|
||||
{/* About system button */}
|
||||
<SidebarGroup className="mt-auto">
|
||||
<SidebarGroupContent>
|
||||
<SidebarMenu>
|
||||
<SidebarMenuItem>
|
||||
<AboutDialog>
|
||||
<SidebarMenuButton>
|
||||
<IconInfoCircle />
|
||||
<span>{t('about')}</span>
|
||||
</SidebarMenuButton>
|
||||
</AboutDialog>
|
||||
</SidebarMenuItem>
|
||||
</SidebarMenu>
|
||||
</SidebarGroupContent>
|
||||
</SidebarGroup>
|
||||
</SidebarContent>
|
||||
|
||||
{/* Sidebar footer */}
|
||||
|
||||
@@ -40,8 +40,8 @@ export function ChangePasswordDialog({ open, onOpenChange }: ChangePasswordDialo
|
||||
return
|
||||
}
|
||||
|
||||
if (newPassword.length < 4) {
|
||||
setError(t("passwordTooShort", { min: 4 }))
|
||||
if (newPassword.length < 6) {
|
||||
setError(t("passwordTooShort", { min: 6 }))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
151
frontend/components/auth/login-boot-screen.tsx
Normal file
151
frontend/components/auth/login-boot-screen.tsx
Normal file
@@ -0,0 +1,151 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
type BootLine = {
|
||||
text: string
|
||||
className?: string
|
||||
}
|
||||
|
||||
const BOOT_LINES: BootLine[] = [
|
||||
{ text: "> booting ORBIT...", className: "text-yellow-500" },
|
||||
{ text: "> initializing secure terminal...", className: "text-zinc-200" },
|
||||
{ text: "> loading modules: auth, i18n, ui...", className: "text-zinc-200" },
|
||||
{ text: "> checking session...", className: "text-yellow-500" },
|
||||
{ text: "> ready.", className: "text-green-500" },
|
||||
]
|
||||
|
||||
const SUCCESS_LINES: BootLine[] = [
|
||||
{ text: "> authentication successful", className: "text-green-500" },
|
||||
{ text: "> loading user profile...", className: "text-zinc-200" },
|
||||
{ text: "> initializing dashboard...", className: "text-zinc-200" },
|
||||
{ text: "> preparing workspace...", className: "text-yellow-500" },
|
||||
{ text: "> access granted.", className: "text-green-500" },
|
||||
]
|
||||
|
||||
// Keep the log animation snappy so it can complete within the 0.6s splash.
|
||||
const STEP_DELAYS_MS = [70, 90, 110, 130, 150]
|
||||
|
||||
const GLITCH_MS = 600
|
||||
|
||||
export function LoginBootScreen({ className, success = false }: { className?: string; success?: boolean }) {
|
||||
const [visible, setVisible] = React.useState(0)
|
||||
const [entered, setEntered] = React.useState(false)
|
||||
const [glitchOn, setGlitchOn] = React.useState(true)
|
||||
|
||||
// 根据 success 状态选择显示的行
|
||||
const displayLines = success ? SUCCESS_LINES : BOOT_LINES
|
||||
|
||||
React.useEffect(() => {
|
||||
const raf = requestAnimationFrame(() => setEntered(true))
|
||||
return () => cancelAnimationFrame(raf)
|
||||
}, [])
|
||||
|
||||
React.useEffect(() => {
|
||||
setGlitchOn(true)
|
||||
const timer = setTimeout(() => setGlitchOn(false), GLITCH_MS)
|
||||
return () => clearTimeout(timer)
|
||||
}, [])
|
||||
|
||||
React.useEffect(() => {
|
||||
setVisible(0)
|
||||
|
||||
const timers: Array<ReturnType<typeof setTimeout>> = []
|
||||
let acc = 0
|
||||
|
||||
for (let i = 0; i < displayLines.length; i++) {
|
||||
acc += STEP_DELAYS_MS[i] ?? 160
|
||||
timers.push(
|
||||
setTimeout(() => {
|
||||
setVisible((prev) => Math.max(prev, i + 1))
|
||||
}, acc)
|
||||
)
|
||||
}
|
||||
|
||||
return () => {
|
||||
timers.forEach(clearTimeout)
|
||||
}
|
||||
}, [displayLines])
|
||||
|
||||
const progress = Math.round((Math.min(visible, displayLines.length) / displayLines.length) * 100)
|
||||
|
||||
return (
|
||||
<div className={cn("relative flex min-h-svh flex-col bg-black", glitchOn && "orbit-splash-glitch", className)}>
|
||||
{/* Main content area */}
|
||||
<div className="relative z-10 flex-1 flex items-center justify-center p-6">
|
||||
<div
|
||||
className={cn(
|
||||
"border-zinc-700 bg-zinc-900/80 backdrop-blur-sm z-0 w-full max-w-xl rounded-xl border transition-opacity duration-200 ease-out motion-reduce:transition-none",
|
||||
entered ? "opacity-100" : "opacity-0"
|
||||
)}
|
||||
>
|
||||
{/* Terminal header */}
|
||||
<div className="border-zinc-700 flex items-center gap-x-2 border-b px-4 py-3">
|
||||
<div className="flex flex-row gap-x-2">
|
||||
<div className="h-3 w-3 rounded-full bg-red-500" />
|
||||
<div className="h-3 w-3 rounded-full bg-yellow-500" />
|
||||
<div className="h-3 w-3 rounded-full bg-green-500" />
|
||||
</div>
|
||||
<span className="ml-2 text-xs text-zinc-400 font-mono">ORBIT · boot</span>
|
||||
<span className="ml-auto text-xs text-zinc-500 font-mono">{progress}%</span>
|
||||
</div>
|
||||
|
||||
{/* Terminal body */}
|
||||
<div className="p-4 font-mono text-sm min-h-[280px]">
|
||||
<div className="mb-6 text-center">
|
||||
<div
|
||||
className={cn(
|
||||
"text-3xl sm:text-4xl !font-bold tracking-wide",
|
||||
"bg-gradient-to-r from-[#FF10F0] via-[#B026FF] to-[#FF10F0] bg-clip-text text-transparent",
|
||||
glitchOn && "orbit-glitch-text"
|
||||
)}
|
||||
data-text="ORBIT"
|
||||
style={{
|
||||
filter: "drop-shadow(0 0 20px rgba(255, 16, 240, 0.5)) drop-shadow(0 0 40px rgba(176, 38, 255, 0.3))"
|
||||
}}
|
||||
>
|
||||
ORBIT
|
||||
</div>
|
||||
<div className="mt-3 flex items-center gap-3 text-zinc-400 text-xs">
|
||||
<span className="h-px flex-1 bg-gradient-to-r from-transparent via-[#B026FF] to-transparent" />
|
||||
<span className="whitespace-nowrap">system bootstrap</span>
|
||||
<span className="h-px flex-1 bg-gradient-to-r from-transparent via-[#B026FF] to-transparent" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-1">
|
||||
{displayLines.slice(0, visible).map((line, idx) => (
|
||||
<div key={idx} className={cn("whitespace-pre-wrap", line.className)}>
|
||||
{line.text}
|
||||
</div>
|
||||
))}
|
||||
|
||||
{/* Cursor */}
|
||||
<div className="text-green-500">
|
||||
<span className="inline-block h-4 w-2 align-middle bg-green-500 animate-pulse" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Progress bar */}
|
||||
<div className="mt-6">
|
||||
<div className="h-1.5 w-full rounded bg-zinc-800 overflow-hidden">
|
||||
<div
|
||||
className="h-full bg-gradient-to-r from-[#FF10F0] to-[#B026FF]"
|
||||
style={{
|
||||
width: `${progress}%`,
|
||||
boxShadow: "0 0 10px rgba(255, 16, 240, 0.5), 0 0 20px rgba(176, 38, 255, 0.3)"
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
<div className="mt-2 text-xs text-zinc-500">
|
||||
Checking session…
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -34,36 +34,6 @@ interface BulkAddUrlsDialogProps {
|
||||
onSuccess?: () => void
|
||||
}
|
||||
|
||||
const ASSET_TYPE_LABELS: Record<AssetType, { title: string; description: string; placeholder: string }> = {
|
||||
endpoint: {
|
||||
title: 'Bulk Add Endpoints',
|
||||
description: 'Enter endpoint URL list, one per line.',
|
||||
placeholder: `Please enter endpoint URLs, one per line
|
||||
Example:
|
||||
https://example.com/api/v1
|
||||
https://example.com/api/v2
|
||||
https://example.com/login`,
|
||||
},
|
||||
website: {
|
||||
title: 'Bulk Add Websites',
|
||||
description: 'Enter website URL list, one per line.',
|
||||
placeholder: `Please enter website URLs, one per line
|
||||
Example:
|
||||
https://example.com
|
||||
https://www.example.com
|
||||
https://api.example.com`,
|
||||
},
|
||||
directory: {
|
||||
title: 'Bulk Add Directories',
|
||||
description: 'Enter directory URL list, one per line.',
|
||||
placeholder: `Please enter directory URLs, one per line
|
||||
Example:
|
||||
https://example.com/admin
|
||||
https://example.com/api
|
||||
https://example.com/uploads`,
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Bulk add URLs dialog component
|
||||
*
|
||||
@@ -80,6 +50,14 @@ export function BulkAddUrlsDialog({
|
||||
onSuccess,
|
||||
}: BulkAddUrlsDialogProps) {
|
||||
const tBulkAdd = useTranslations("bulkAdd.common")
|
||||
const tUrl = useTranslations("bulkAdd.url")
|
||||
|
||||
// Get translated labels based on asset type
|
||||
const labels = {
|
||||
title: tUrl(`${assetType}.title`),
|
||||
description: tUrl(`${assetType}.description`),
|
||||
placeholder: tUrl(`${assetType}.placeholder`),
|
||||
}
|
||||
|
||||
// Dialog open/close state
|
||||
const [internalOpen, setInternalOpen] = useState(false)
|
||||
@@ -121,7 +99,6 @@ export function BulkAddUrlsDialog({
|
||||
}
|
||||
|
||||
const mutation = getMutation()
|
||||
const labels = ASSET_TYPE_LABELS[assetType]
|
||||
|
||||
// Handle input changes
|
||||
const handleInputChange = (value: string) => {
|
||||
@@ -222,7 +199,7 @@ export function BulkAddUrlsDialog({
|
||||
<DialogTrigger asChild>
|
||||
<Button size="sm" variant="outline">
|
||||
<Plus className="h-4 w-4" />
|
||||
Bulk Add
|
||||
{tBulkAdd("bulkAdd")}
|
||||
</Button>
|
||||
</DialogTrigger>
|
||||
)}
|
||||
@@ -242,7 +219,7 @@ export function BulkAddUrlsDialog({
|
||||
<div className="grid gap-4 py-4">
|
||||
<div className="grid gap-2">
|
||||
<Label htmlFor="urls">
|
||||
URL List <span className="text-destructive">*</span>
|
||||
{tUrl("label")} <span className="text-destructive">*</span>
|
||||
</Label>
|
||||
<div className="flex border rounded-md overflow-hidden h-[220px]">
|
||||
{/* Line number column */}
|
||||
@@ -278,39 +255,43 @@ export function BulkAddUrlsDialog({
|
||||
{validationResult && (
|
||||
<div className="text-xs space-y-1">
|
||||
<div className="text-muted-foreground">
|
||||
Valid: {validationResult.validCount} items
|
||||
{tUrl("valid", { count: validationResult.validCount })}
|
||||
{validationResult.duplicateCount > 0 && (
|
||||
<span className="text-yellow-600 ml-2">
|
||||
Duplicate: {validationResult.duplicateCount} items
|
||||
{tUrl("duplicate", { count: validationResult.duplicateCount })}
|
||||
</span>
|
||||
)}
|
||||
{validationResult.invalidCount > 0 && (
|
||||
<span className="text-destructive ml-2">
|
||||
Invalid: {validationResult.invalidCount} items
|
||||
{tUrl("invalid", { count: validationResult.invalidCount })}
|
||||
</span>
|
||||
)}
|
||||
{validationResult.mismatchedCount > 0 && (
|
||||
<span className="text-destructive ml-2">
|
||||
Mismatched: {validationResult.mismatchedCount} items
|
||||
{tUrl("mismatched", { count: validationResult.mismatchedCount })}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
{validationResult.firstError && (
|
||||
<div className="text-destructive">
|
||||
Line {validationResult.firstError.index + 1}: "
|
||||
{validationResult.firstError.url.length > 50
|
||||
? validationResult.firstError.url.substring(0, 50) + '...'
|
||||
: validationResult.firstError.url}" -{" "}
|
||||
{validationResult.firstError.error}
|
||||
{tUrl("lineError", {
|
||||
line: validationResult.firstError.index + 1,
|
||||
value: validationResult.firstError.url.length > 50
|
||||
? validationResult.firstError.url.substring(0, 50) + '...'
|
||||
: validationResult.firstError.url,
|
||||
error: validationResult.firstError.error,
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
{validationResult.firstMismatch && !validationResult.firstError && (
|
||||
<div className="text-destructive">
|
||||
Line {validationResult.firstMismatch.index + 1}: "
|
||||
{validationResult.firstMismatch.url.length > 50
|
||||
? validationResult.firstMismatch.url.substring(0, 50) + '...'
|
||||
: validationResult.firstMismatch.url}" -
|
||||
URL does not belong to target {targetName}, please remove before submitting
|
||||
{tUrl("mismatchError", {
|
||||
line: validationResult.firstMismatch.index + 1,
|
||||
value: validationResult.firstMismatch.url.length > 50
|
||||
? validationResult.firstMismatch.url.substring(0, 50) + '...'
|
||||
: validationResult.firstMismatch.url,
|
||||
target: targetName || '',
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
@@ -325,7 +306,7 @@ export function BulkAddUrlsDialog({
|
||||
onClick={() => handleOpenChange(false)}
|
||||
disabled={mutation.isPending}
|
||||
>
|
||||
Cancel
|
||||
{tBulkAdd("cancel")}
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
@@ -334,12 +315,12 @@ export function BulkAddUrlsDialog({
|
||||
{mutation.isPending ? (
|
||||
<>
|
||||
<LoadingSpinner />
|
||||
Creating...
|
||||
{tUrl("creating")}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<Plus className="h-4 w-4" />
|
||||
Bulk Add
|
||||
{tBulkAdd("bulkAdd")}
|
||||
</>
|
||||
)}
|
||||
</Button>
|
||||
|
||||
@@ -67,6 +67,45 @@ const DEFAULT_FIELDS: FilterField[] = [
|
||||
PREDEFINED_FIELDS.host,
|
||||
]
|
||||
|
||||
// History storage key
|
||||
const FILTER_HISTORY_KEY = 'smart_filter_history'
|
||||
const MAX_HISTORY_PER_FIELD = 10
|
||||
|
||||
// Get history values for a field
|
||||
function getFieldHistory(field: string): string[] {
|
||||
if (typeof window === 'undefined') return []
|
||||
try {
|
||||
const history = JSON.parse(localStorage.getItem(FILTER_HISTORY_KEY) || '{}')
|
||||
return history[field] || []
|
||||
} catch {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
// Save a value to field history
|
||||
function saveFieldHistory(field: string, value: string) {
|
||||
if (typeof window === 'undefined' || !value.trim()) return
|
||||
try {
|
||||
const history = JSON.parse(localStorage.getItem(FILTER_HISTORY_KEY) || '{}')
|
||||
const fieldHistory = (history[field] || []).filter((v: string) => v !== value)
|
||||
fieldHistory.unshift(value)
|
||||
history[field] = fieldHistory.slice(0, MAX_HISTORY_PER_FIELD)
|
||||
localStorage.setItem(FILTER_HISTORY_KEY, JSON.stringify(history))
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
|
||||
// Extract field-value pairs from query and save to history
|
||||
function saveQueryHistory(query: string) {
|
||||
const regex = /(\w+)(==|!=|=)"([^"]+)"/g
|
||||
let match
|
||||
while ((match = regex.exec(query)) !== null) {
|
||||
const [, field, , value] = match
|
||||
saveFieldHistory(field, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse filter expression (FOFA style)
|
||||
interface ParsedFilter {
|
||||
field: string
|
||||
@@ -115,10 +154,114 @@ export function SmartFilterInput({
|
||||
const [open, setOpen] = React.useState(false)
|
||||
const [inputValue, setInputValue] = React.useState(value ?? "")
|
||||
const inputRef = React.useRef<HTMLInputElement>(null)
|
||||
const ghostRef = React.useRef<HTMLSpanElement>(null)
|
||||
const listRef = React.useRef<HTMLDivElement>(null)
|
||||
const savedScrollTop = React.useRef<number | null>(null)
|
||||
const hasInitialized = React.useRef(false)
|
||||
|
||||
// Calculate ghost text suggestion
|
||||
const ghostText = React.useMemo(() => {
|
||||
if (!inputValue) return ""
|
||||
|
||||
// Get the last word/token being typed
|
||||
const lastSpaceIndex = inputValue.lastIndexOf(' ')
|
||||
const currentToken = lastSpaceIndex === -1 ? inputValue : inputValue.slice(lastSpaceIndex + 1)
|
||||
const lowerToken = currentToken.toLowerCase()
|
||||
|
||||
// If empty token after space, check if previous expression is complete
|
||||
if (!currentToken && inputValue.trim()) {
|
||||
// Check if last expression is complete (ends with ")
|
||||
if (inputValue.trimEnd().endsWith('"')) {
|
||||
return '&& '
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
if (!currentToken) return ""
|
||||
|
||||
// Priority 1: Field name completion (no = in token)
|
||||
if (!currentToken.includes('=') && !currentToken.includes('!')) {
|
||||
// Find matching field first
|
||||
const matchingField = fields.find(f =>
|
||||
f.key.toLowerCase().startsWith(lowerToken) &&
|
||||
f.key.toLowerCase() !== lowerToken
|
||||
)
|
||||
if (matchingField) {
|
||||
return matchingField.key.slice(currentToken.length) + '="'
|
||||
}
|
||||
|
||||
// If exact match of field name, suggest ="
|
||||
const exactField = fields.find(f => f.key.toLowerCase() === lowerToken)
|
||||
if (exactField) {
|
||||
return '="'
|
||||
}
|
||||
|
||||
// Priority 2: Logical operators (only if no field matches)
|
||||
if ('&&'.startsWith(currentToken) && currentToken.startsWith('&')) {
|
||||
return '&&'.slice(currentToken.length) + ' '
|
||||
}
|
||||
if ('||'.startsWith(currentToken) && currentToken.startsWith('|')) {
|
||||
return '||'.slice(currentToken.length) + ' '
|
||||
}
|
||||
// 'and' / 'or' only if no field name starts with these
|
||||
if (!matchingField) {
|
||||
if ('and'.startsWith(lowerToken) && lowerToken.length > 0 && !fields.some(f => f.key.toLowerCase().startsWith(lowerToken))) {
|
||||
return 'and'.slice(lowerToken.length) + ' '
|
||||
}
|
||||
if ('or'.startsWith(lowerToken) && lowerToken.length > 0 && !fields.some(f => f.key.toLowerCase().startsWith(lowerToken))) {
|
||||
return 'or'.slice(lowerToken.length) + ' '
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check if typing ! for != operator
|
||||
if (currentToken.match(/^(\w+)!$/)) {
|
||||
return '="'
|
||||
}
|
||||
|
||||
// Check if typing = and might want ==
|
||||
const singleEqMatch = currentToken.match(/^(\w+)=$/)
|
||||
if (singleEqMatch) {
|
||||
// Suggest " for fuzzy match (most common)
|
||||
return '"'
|
||||
}
|
||||
|
||||
// Check if typed == or != (no opening quote yet)
|
||||
const doubleOpMatch = currentToken.match(/^(\w+)(==|!=)$/)
|
||||
if (doubleOpMatch) {
|
||||
return '"'
|
||||
}
|
||||
|
||||
// Check if typing a value (has = and opening quote)
|
||||
const eqMatch = currentToken.match(/^(\w+)(==|!=|=)"([^"]*)$/)
|
||||
if (eqMatch) {
|
||||
const [, field, , partialValue] = eqMatch
|
||||
// Get history for this field
|
||||
const history = getFieldHistory(field)
|
||||
// Find matching history value
|
||||
const matchingValue = history.find(v =>
|
||||
v.toLowerCase().startsWith(partialValue.toLowerCase()) &&
|
||||
v.toLowerCase() !== partialValue.toLowerCase()
|
||||
)
|
||||
if (matchingValue) {
|
||||
return matchingValue.slice(partialValue.length) + '"'
|
||||
}
|
||||
// If value has content but no closing quote, suggest closing quote
|
||||
if (partialValue.length > 0) {
|
||||
return '"'
|
||||
}
|
||||
}
|
||||
|
||||
// Check if a complete expression just finished (ends with ")
|
||||
if (currentToken.match(/^\w+(==|!=|=)"[^"]+"$/)) {
|
||||
return ' && '
|
||||
}
|
||||
|
||||
return ""
|
||||
}, [inputValue, fields])
|
||||
|
||||
// Synchronize external value changes
|
||||
React.useEffect(() => {
|
||||
if (value !== undefined) {
|
||||
@@ -189,12 +332,27 @@ export function SmartFilterInput({
|
||||
|
||||
// Handle search
|
||||
const handleSearch = () => {
|
||||
// Save query values to history
|
||||
saveQueryHistory(inputValue)
|
||||
onSearch?.(parsedFilters, inputValue)
|
||||
setOpen(false)
|
||||
}
|
||||
|
||||
// Accept ghost text suggestion
|
||||
const acceptGhostText = () => {
|
||||
if (ghostText) {
|
||||
setInputValue(inputValue + ghostText)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Handle keyboard events
|
||||
const handleKeyDown = (e: React.KeyboardEvent) => {
|
||||
if (e.key === "Tab" && ghostText) {
|
||||
e.preventDefault()
|
||||
acceptGhostText()
|
||||
}
|
||||
if (e.key === "Enter" && !e.shiftKey) {
|
||||
e.preventDefault()
|
||||
handleSearch()
|
||||
@@ -202,6 +360,14 @@ export function SmartFilterInput({
|
||||
if (e.key === "Escape") {
|
||||
setOpen(false)
|
||||
}
|
||||
// Right arrow at end of input accepts ghost text
|
||||
if (e.key === "ArrowRight" && ghostText) {
|
||||
const input = inputRef.current
|
||||
if (input && input.selectionStart === input.value.length) {
|
||||
e.preventDefault()
|
||||
acceptGhostText()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append example to input box (not overwrite), then close popover
|
||||
@@ -215,36 +381,46 @@ export function SmartFilterInput({
|
||||
|
||||
return (
|
||||
<div className={className}>
|
||||
<Popover open={open} onOpenChange={setOpen} modal={false}>
|
||||
<PopoverAnchor asChild>
|
||||
<div className="flex items-center gap-2">
|
||||
<Input
|
||||
ref={inputRef}
|
||||
type="text"
|
||||
value={inputValue}
|
||||
onChange={(e) => {
|
||||
setInputValue(e.target.value)
|
||||
if (!open) setOpen(true)
|
||||
}}
|
||||
onFocus={() => setOpen(true)}
|
||||
onBlur={(e) => {
|
||||
// If focus moves to inside Popover or input itself, don't close
|
||||
const relatedTarget = e.relatedTarget as HTMLElement | null
|
||||
if (relatedTarget?.closest('[data-radix-popper-content-wrapper]')) {
|
||||
return
|
||||
}
|
||||
// Delay close to let CommandItem's onSelect execute first
|
||||
setTimeout(() => setOpen(false), 150)
|
||||
}}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder={placeholder || defaultPlaceholder}
|
||||
className="h-8 w-full"
|
||||
/>
|
||||
<Button variant="outline" size="sm" onClick={handleSearch}>
|
||||
<IconSearch className="h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
</PopoverAnchor>
|
||||
<div className="flex items-center gap-2">
|
||||
<Popover open={open} onOpenChange={setOpen} modal={false}>
|
||||
<PopoverAnchor asChild>
|
||||
<div className="relative flex-1">
|
||||
<Input
|
||||
ref={inputRef}
|
||||
type="text"
|
||||
value={inputValue}
|
||||
onChange={(e) => {
|
||||
setInputValue(e.target.value)
|
||||
if (!open) setOpen(true)
|
||||
}}
|
||||
onFocus={() => setOpen(true)}
|
||||
onBlur={(e) => {
|
||||
// If focus moves to inside Popover or input itself, don't close
|
||||
const relatedTarget = e.relatedTarget as HTMLElement | null
|
||||
if (relatedTarget?.closest('[data-radix-popper-content-wrapper]')) {
|
||||
return
|
||||
}
|
||||
// Delay close to let CommandItem's onSelect execute first
|
||||
setTimeout(() => setOpen(false), 150)
|
||||
}}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder={placeholder || defaultPlaceholder}
|
||||
className="h-8 w-full font-mono text-sm"
|
||||
/>
|
||||
{/* Ghost text overlay */}
|
||||
{ghostText && (
|
||||
<div
|
||||
className="absolute inset-0 flex items-center pointer-events-none overflow-hidden px-3"
|
||||
aria-hidden="true"
|
||||
>
|
||||
<span className="font-mono text-sm">
|
||||
<span className="invisible">{inputValue}</span>
|
||||
<span ref={ghostRef} className="text-muted-foreground/40">{ghostText}</span>
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</PopoverAnchor>
|
||||
<PopoverContent
|
||||
className="w-[var(--radix-popover-trigger-width)] p-0"
|
||||
align="start"
|
||||
@@ -343,6 +519,10 @@ export function SmartFilterInput({
|
||||
</Command>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
<Button variant="outline" size="sm" onClick={handleSearch}>
|
||||
<IconSearch className="h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ export function AssetTrendChart() {
|
||||
} satisfies ChartConfig), [t])
|
||||
|
||||
// Visible series state (show all by default)
|
||||
const [visibleSeries, setVisibleSeries] = useState<Set<SeriesKey>>(new Set(ALL_SERIES))
|
||||
const [visibleSeries, setVisibleSeries] = useState<Set<SeriesKey>>(() => new Set(ALL_SERIES))
|
||||
|
||||
// Currently hovered line
|
||||
const [hoveredLine, setHoveredLine] = useState<SeriesKey | null>(null)
|
||||
@@ -136,10 +136,13 @@ export function AssetTrendChart() {
|
||||
}
|
||||
|
||||
// Get latest data (use latest value from raw data)
|
||||
const latest = rawData && rawData.length > 0 ? rawData[rawData.length - 1] : null
|
||||
|
||||
const latest = useMemo(() =>
|
||||
rawData && rawData.length > 0 ? rawData[rawData.length - 1] : null,
|
||||
[rawData]
|
||||
)
|
||||
|
||||
// Display data: show hovered data when hovering, otherwise show latest data
|
||||
const displayData = activeData || latest
|
||||
const displayData = useMemo(() => activeData || latest, [activeData, latest])
|
||||
|
||||
return (
|
||||
<Card>
|
||||
|
||||
@@ -129,6 +129,8 @@ export function DashboardDataTable() {
|
||||
},
|
||||
tooltips: {
|
||||
vulnDetails: t('tooltips.vulnDetails'),
|
||||
reviewed: t('tooltips.reviewed'),
|
||||
pending: t('tooltips.pending'),
|
||||
},
|
||||
severity: {
|
||||
critical: t('severity.critical'),
|
||||
@@ -230,7 +232,7 @@ export function DashboardDataTable() {
|
||||
cancelled: t('common.status.cancelled'),
|
||||
completed: t('common.status.completed'),
|
||||
failed: t('common.status.failed'),
|
||||
initiated: t('common.status.pending'),
|
||||
pending: t('common.status.pending'),
|
||||
running: t('common.status.running'),
|
||||
},
|
||||
summary: {
|
||||
@@ -365,6 +367,7 @@ export function DashboardDataTable() {
|
||||
columns={scanColumns}
|
||||
getRowId={(row) => String(row.id)}
|
||||
enableRowSelection={false}
|
||||
enableAutoColumnSizing
|
||||
pagination={scanPagination}
|
||||
onPaginationChange={setScanPagination}
|
||||
paginationInfo={scanPaginationInfo}
|
||||
|
||||
@@ -49,7 +49,7 @@ export function DashboardScanHistory() {
|
||||
cancelled: tCommon("status.cancelled"),
|
||||
completed: tCommon("status.completed"),
|
||||
failed: tCommon("status.failed"),
|
||||
initiated: tCommon("status.pending"),
|
||||
pending: tCommon("status.pending"),
|
||||
running: tCommon("status.running"),
|
||||
},
|
||||
summary: {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"use client"
|
||||
|
||||
import { memo } from "react"
|
||||
import { useAssetStatistics } from "@/hooks/use-dashboard"
|
||||
import { Card, CardAction, CardDescription, CardFooter, CardHeader, CardTitle } from "@/components/ui/card"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
@@ -8,7 +9,7 @@ import { IconTarget, IconStack2, IconBug, IconPlayerPlay, IconTrendingUp, IconTr
|
||||
import { useTranslations } from "next-intl"
|
||||
import { useLocale } from "next-intl"
|
||||
|
||||
function TrendBadge({ change }: { change: number }) {
|
||||
const TrendBadge = memo(function TrendBadge({ change }: { change: number }) {
|
||||
if (change === 0) return null
|
||||
|
||||
const isPositive = change > 0
|
||||
@@ -24,9 +25,9 @@ function TrendBadge({ change }: { change: number }) {
|
||||
{isPositive ? '+' : ''}{change}
|
||||
</Badge>
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
function StatCard({
|
||||
const StatCard = memo(function StatCard({
|
||||
title,
|
||||
value,
|
||||
change,
|
||||
@@ -66,7 +67,7 @@ function StatCard({
|
||||
</CardFooter>
|
||||
</Card>
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
function formatUpdateTime(dateStr: string | null, locale: string, noDataText: string) {
|
||||
if (!dateStr) return noDataText
|
||||
|
||||
@@ -23,24 +23,18 @@ import {
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { IconExternalLink } from "@tabler/icons-react"
|
||||
import { Circle, CheckCircle2 } from "lucide-react"
|
||||
import type { VulnerabilitySeverity } from "@/types/vulnerability.types"
|
||||
import { useTranslations } from "next-intl"
|
||||
import { useLocale } from "next-intl"
|
||||
|
||||
// Unified vulnerability severity color configuration (consistent with charts)
|
||||
const severityStyles: Record<VulnerabilitySeverity, string> = {
|
||||
critical: "bg-[#da3633]/10 text-[#da3633] border border-[#da3633]/20 dark:text-[#f85149]",
|
||||
high: "bg-[#d29922]/10 text-[#d29922] border border-[#d29922]/20",
|
||||
medium: "bg-[#d4a72c]/10 text-[#d4a72c] border border-[#d4a72c]/20",
|
||||
low: "bg-[#238636]/10 text-[#238636] border border-[#238636]/20 dark:text-[#3fb950]",
|
||||
info: "bg-[#848d97]/10 text-[#848d97] border border-[#848d97]/20",
|
||||
}
|
||||
import { SEVERITY_STYLES } from "@/lib/severity-config"
|
||||
|
||||
export function RecentVulnerabilities() {
|
||||
const router = useRouter()
|
||||
const t = useTranslations("dashboard.recentVulns")
|
||||
const tSeverity = useTranslations("severity")
|
||||
const tColumns = useTranslations("columns")
|
||||
const tTooltips = useTranslations("tooltips")
|
||||
const locale = useLocale()
|
||||
|
||||
const formatTime = (dateStr: string) => {
|
||||
@@ -54,11 +48,11 @@ export function RecentVulnerabilities() {
|
||||
}
|
||||
|
||||
const severityConfig = useMemo(() => ({
|
||||
critical: { label: tSeverity("critical"), className: severityStyles.critical },
|
||||
high: { label: tSeverity("high"), className: severityStyles.high },
|
||||
medium: { label: tSeverity("medium"), className: severityStyles.medium },
|
||||
low: { label: tSeverity("low"), className: severityStyles.low },
|
||||
info: { label: tSeverity("info"), className: severityStyles.info },
|
||||
critical: { label: tSeverity("critical"), className: SEVERITY_STYLES.critical.className },
|
||||
high: { label: tSeverity("high"), className: SEVERITY_STYLES.high.className },
|
||||
medium: { label: tSeverity("medium"), className: SEVERITY_STYLES.medium.className },
|
||||
low: { label: tSeverity("low"), className: SEVERITY_STYLES.low.className },
|
||||
info: { label: tSeverity("info"), className: SEVERITY_STYLES.info.className },
|
||||
}), [tSeverity])
|
||||
|
||||
const { data, isLoading } = useQuery({
|
||||
@@ -100,6 +94,7 @@ export function RecentVulnerabilities() {
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>{tColumns("common.status")}</TableHead>
|
||||
<TableHead>{tColumns("vulnerability.severity")}</TableHead>
|
||||
<TableHead>{tColumns("vulnerability.source")}</TableHead>
|
||||
<TableHead>{tColumns("common.type")}</TableHead>
|
||||
<TableHead>{tColumns("common.url")}</TableHead>
|
||||
@@ -107,31 +102,52 @@ export function RecentVulnerabilities() {
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{vulnerabilities.map((vuln: any) => (
|
||||
<TableRow
|
||||
key={vuln.id}
|
||||
className="cursor-pointer hover:bg-muted/50"
|
||||
onClick={() => router.push(`/vulnerabilities/?id=${vuln.id}`)}
|
||||
>
|
||||
<TableCell>
|
||||
<Badge className={severityConfig[vuln.severity as VulnerabilitySeverity]?.className}>
|
||||
{severityConfig[vuln.severity as VulnerabilitySeverity]?.label ?? vuln.severity}
|
||||
</Badge>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<Badge variant="outline">{vuln.source}</Badge>
|
||||
</TableCell>
|
||||
<TableCell className="font-medium max-w-[120px] truncate">
|
||||
{vuln.vulnType}
|
||||
</TableCell>
|
||||
<TableCell className="text-muted-foreground text-xs max-w-[200px] truncate">
|
||||
{vuln.url}
|
||||
</TableCell>
|
||||
<TableCell className="text-muted-foreground text-xs whitespace-nowrap">
|
||||
{formatTime(vuln.createdAt)}
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
{vulnerabilities.map((vuln: any) => {
|
||||
const isReviewed = vuln.isReviewed
|
||||
const isPending = !isReviewed
|
||||
|
||||
return (
|
||||
<TableRow
|
||||
key={vuln.id}
|
||||
className="cursor-pointer hover:bg-muted/50"
|
||||
onClick={() => router.push(`/vulnerabilities/?id=${vuln.id}`)}
|
||||
>
|
||||
<TableCell>
|
||||
<Badge
|
||||
variant="outline"
|
||||
className={`transition-all gap-1.5 cursor-default ${isPending
|
||||
? "bg-blue-500/10 text-blue-600 border-blue-500/30 dark:text-blue-400 dark:border-blue-400/30"
|
||||
: "bg-muted/50 text-muted-foreground border-muted-foreground/20"
|
||||
}`}
|
||||
>
|
||||
{isPending ? (
|
||||
<Circle className="h-3 w-3" />
|
||||
) : (
|
||||
<CheckCircle2 className="h-3 w-3" />
|
||||
)}
|
||||
{isPending ? tTooltips("pending") : tTooltips("reviewed")}
|
||||
</Badge>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<Badge className={severityConfig[vuln.severity as VulnerabilitySeverity]?.className}>
|
||||
{severityConfig[vuln.severity as VulnerabilitySeverity]?.label ?? vuln.severity}
|
||||
</Badge>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<Badge variant="outline">{vuln.source}</Badge>
|
||||
</TableCell>
|
||||
<TableCell className="font-medium max-w-[120px] truncate">
|
||||
{vuln.vulnType}
|
||||
</TableCell>
|
||||
<TableCell className="text-muted-foreground text-xs max-w-[200px] truncate">
|
||||
{vuln.url}
|
||||
</TableCell>
|
||||
<TableCell className="text-muted-foreground text-xs whitespace-nowrap">
|
||||
{formatTime(vuln.createdAt)}
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
)
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
|
||||
@@ -18,15 +18,7 @@ import {
|
||||
} from "@/components/ui/chart"
|
||||
import { Skeleton } from "@/components/ui/skeleton"
|
||||
import { useTranslations } from "next-intl"
|
||||
|
||||
// 漏洞严重程度使用固定语义化颜色
|
||||
const SEVERITY_COLORS = {
|
||||
critical: "#dc2626", // 红色
|
||||
high: "#f97316", // 橙色
|
||||
medium: "#eab308", // 黄色
|
||||
low: "#3b82f6", // 蓝色
|
||||
info: "#6b7280", // 灰色
|
||||
}
|
||||
import { SEVERITY_COLORS } from "@/lib/severity-config"
|
||||
|
||||
export function VulnSeverityChart() {
|
||||
const { data, isLoading } = useAssetStatistics()
|
||||
|
||||
@@ -14,10 +14,7 @@ export interface DirectoryTranslations {
|
||||
url: string
|
||||
status: string
|
||||
length: string
|
||||
words: string
|
||||
lines: string
|
||||
contentType: string
|
||||
duration: string
|
||||
createdAt: string
|
||||
}
|
||||
actions: {
|
||||
@@ -56,15 +53,6 @@ function StatusBadge({ status }: { status: number | null }) {
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Format duration (nanoseconds to milliseconds)
|
||||
*/
|
||||
function formatDuration(nanoseconds: number | null): string {
|
||||
if (nanoseconds === null) return "-"
|
||||
const milliseconds = nanoseconds / 1000000
|
||||
return `${milliseconds.toFixed(2)} ms`
|
||||
}
|
||||
|
||||
/**
|
||||
* Create directory table column definitions
|
||||
*/
|
||||
@@ -138,34 +126,6 @@ export function createDirectoryColumns({
|
||||
return <span>{length !== null ? length.toLocaleString() : "-"}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "words",
|
||||
size: 80,
|
||||
minSize: 60,
|
||||
maxSize: 120,
|
||||
meta: { title: t.columns.words },
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title={t.columns.words} />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const words = row.getValue("words") as number | null
|
||||
return <span>{words !== null ? words.toLocaleString() : "-"}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "lines",
|
||||
size: 80,
|
||||
minSize: 60,
|
||||
maxSize: 120,
|
||||
meta: { title: t.columns.lines },
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title={t.columns.lines} />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const lines = row.getValue("lines") as number | null
|
||||
return <span>{lines !== null ? lines.toLocaleString() : "-"}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "contentType",
|
||||
size: 120,
|
||||
@@ -185,20 +145,6 @@ export function createDirectoryColumns({
|
||||
)
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "duration",
|
||||
size: 100,
|
||||
minSize: 80,
|
||||
maxSize: 150,
|
||||
meta: { title: t.columns.duration },
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title={t.columns.duration} />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const duration = row.getValue("duration") as number | null
|
||||
return <span className="text-muted-foreground">{formatDuration(duration)}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "createdAt",
|
||||
size: 150,
|
||||
|
||||
@@ -18,7 +18,7 @@ const DIRECTORY_FILTER_FIELDS: FilterField[] = [
|
||||
// Directory page filter examples
|
||||
const DIRECTORY_FILTER_EXAMPLES = [
|
||||
'url="/admin" && status="200"',
|
||||
'url="/api/*" || url="/config/*"',
|
||||
'url="/api/" || url="/config/"',
|
||||
'status="200" && url!="/index.html"',
|
||||
]
|
||||
|
||||
@@ -114,7 +114,7 @@ export function DirectoriesDataTable({
|
||||
onSelectionChange={handleSelectionChange}
|
||||
// Bulk operations
|
||||
onBulkDelete={onBulkDelete}
|
||||
bulkDeleteLabel="Delete"
|
||||
bulkDeleteLabel={tActions("delete")}
|
||||
showAddButton={false}
|
||||
// Bulk add button
|
||||
onBulkAdd={onBulkAdd}
|
||||
|
||||
@@ -11,6 +11,7 @@ import { useTargetDirectories, useScanDirectories } from "@/hooks/use-directorie
|
||||
import { useTarget } from "@/hooks/use-targets"
|
||||
import { DirectoryService } from "@/services/directory.service"
|
||||
import { BulkAddUrlsDialog } from "@/components/common/bulk-add-urls-dialog"
|
||||
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
|
||||
import { getDateLocale } from "@/lib/date-utils"
|
||||
import type { TargetType } from "@/lib/url-validator"
|
||||
import type { Directory } from "@/types/directory.types"
|
||||
@@ -29,11 +30,14 @@ export function DirectoriesView({
|
||||
})
|
||||
const [selectedDirectories, setSelectedDirectories] = useState<Directory[]>([])
|
||||
const [bulkAddDialogOpen, setBulkAddDialogOpen] = useState(false)
|
||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false)
|
||||
const [isDeleting, setIsDeleting] = useState(false)
|
||||
|
||||
const [filterQuery, setFilterQuery] = useState("")
|
||||
const [isSearching, setIsSearching] = useState(false)
|
||||
|
||||
// Internationalization
|
||||
const t = useTranslations("pages.targetDetail")
|
||||
const tColumns = useTranslations("columns")
|
||||
const tCommon = useTranslations("common")
|
||||
const tToast = useTranslations("toast")
|
||||
@@ -240,6 +244,26 @@ export function DirectoriesView({
|
||||
URL.revokeObjectURL(url)
|
||||
}
|
||||
|
||||
// Handle bulk delete
|
||||
const handleBulkDelete = async () => {
|
||||
if (selectedDirectories.length === 0) return
|
||||
|
||||
setIsDeleting(true)
|
||||
try {
|
||||
const ids = selectedDirectories.map(d => d.id)
|
||||
const result = await DirectoryService.bulkDelete(ids)
|
||||
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
|
||||
setSelectedDirectories([])
|
||||
setDeleteDialogOpen(false)
|
||||
refetch()
|
||||
} catch (error) {
|
||||
console.error("Failed to delete directories", error)
|
||||
toast.error(tToast("deleteFailed"))
|
||||
} finally {
|
||||
setIsDeleting(false)
|
||||
}
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center py-12">
|
||||
@@ -280,10 +304,13 @@ export function DirectoriesView({
|
||||
onSelectionChange={handleSelectionChange}
|
||||
onDownloadAll={handleDownloadAll}
|
||||
onDownloadSelected={handleDownloadSelected}
|
||||
onBulkDelete={targetId ? () => setDeleteDialogOpen(true) : undefined}
|
||||
onBulkAdd={targetId ? () => setBulkAddDialogOpen(true) : undefined}
|
||||
/>
|
||||
|
||||
{/* Bulk add dialog */}
|
||||
{/* Bulk add dialog */
|
||||
/* ... */
|
||||
}
|
||||
{targetId && (
|
||||
<BulkAddUrlsDialog
|
||||
targetId={targetId}
|
||||
@@ -295,6 +322,17 @@ export function DirectoriesView({
|
||||
onSuccess={() => refetch()}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Delete confirmation dialog */}
|
||||
<ConfirmDialog
|
||||
open={deleteDialogOpen}
|
||||
onOpenChange={setDeleteDialogOpen}
|
||||
title={tCommon("actions.confirmDelete")}
|
||||
description={tCommon("actions.deleteConfirmMessage", { count: selectedDirectories.length })}
|
||||
onConfirm={handleBulkDelete}
|
||||
loading={isDeleting}
|
||||
variant="destructive"
|
||||
/>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ const ENDPOINT_FILTER_FIELDS: FilterField[] = [
|
||||
|
||||
// Endpoint page filter examples
|
||||
const ENDPOINT_FILTER_EXAMPLES = [
|
||||
'url="/api/*" && status="200"',
|
||||
'url="/api/" && status="200"',
|
||||
'host="api.example.com" || host="admin.example.com"',
|
||||
'title="Dashboard" && status!="404"',
|
||||
'tech="php" || tech="wordpress"',
|
||||
@@ -36,6 +36,7 @@ interface EndpointsDataTableProps<TData extends { id: number | string }, TValue>
|
||||
onAddNew?: () => void
|
||||
addButtonText?: string
|
||||
onSelectionChange?: (selectedRows: TData[]) => void
|
||||
onBulkDelete?: () => void
|
||||
pagination?: { pageIndex: number; pageSize: number }
|
||||
onPaginationChange?: (pagination: { pageIndex: number; pageSize: number }) => void
|
||||
totalCount?: number
|
||||
@@ -54,6 +55,7 @@ export function EndpointsDataTable<TData extends { id: number | string }, TValue
|
||||
onAddNew,
|
||||
addButtonText = "Add",
|
||||
onSelectionChange,
|
||||
onBulkDelete,
|
||||
pagination: externalPagination,
|
||||
onPaginationChange,
|
||||
totalCount,
|
||||
@@ -135,7 +137,8 @@ export function EndpointsDataTable<TData extends { id: number | string }, TValue
|
||||
// Selection
|
||||
onSelectionChange={onSelectionChange}
|
||||
// Bulk operations
|
||||
showBulkDelete={false}
|
||||
onBulkDelete={onBulkDelete}
|
||||
bulkDeleteLabel={tActions("delete")}
|
||||
onAddNew={onAddNew}
|
||||
addButtonLabel={addButtonText}
|
||||
// Bulk add button
|
||||
|
||||
@@ -10,6 +10,7 @@ import { createEndpointColumns } from "./endpoints-columns"
|
||||
import { LoadingSpinner } from "@/components/loading-spinner"
|
||||
import { DataTableSkeleton } from "@/components/ui/data-table-skeleton"
|
||||
import { BulkAddUrlsDialog } from "@/components/common/bulk-add-urls-dialog"
|
||||
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
|
||||
import { getDateLocale } from "@/lib/date-utils"
|
||||
import type { TargetType } from "@/lib/url-validator"
|
||||
import {
|
||||
@@ -41,6 +42,8 @@ export function EndpointsDetailView({
|
||||
const [endpointToDelete, setEndpointToDelete] = useState<Endpoint | null>(null)
|
||||
const [selectedEndpoints, setSelectedEndpoints] = useState<Endpoint[]>([])
|
||||
const [bulkAddDialogOpen, setBulkAddDialogOpen] = useState(false)
|
||||
const [bulkDeleteDialogOpen, setBulkDeleteDialogOpen] = useState(false)
|
||||
const [isDeleting, setIsDeleting] = useState(false)
|
||||
|
||||
// Pagination state management
|
||||
const [pagination, setPagination] = useState({
|
||||
@@ -280,6 +283,26 @@ export function EndpointsDetailView({
|
||||
URL.revokeObjectURL(url)
|
||||
}
|
||||
|
||||
// Handle bulk delete
|
||||
const handleBulkDelete = async () => {
|
||||
if (selectedEndpoints.length === 0) return
|
||||
|
||||
setIsDeleting(true)
|
||||
try {
|
||||
const ids = selectedEndpoints.map(e => e.id)
|
||||
const result = await EndpointService.bulkDelete(ids)
|
||||
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
|
||||
setSelectedEndpoints([])
|
||||
setBulkDeleteDialogOpen(false)
|
||||
refetch()
|
||||
} catch (error) {
|
||||
console.error("Failed to delete endpoints", error)
|
||||
toast.error(tToast("deleteFailed"))
|
||||
} finally {
|
||||
setIsDeleting(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Error state
|
||||
if (error) {
|
||||
return (
|
||||
@@ -327,6 +350,7 @@ export function EndpointsDetailView({
|
||||
onSelectionChange={handleSelectionChange}
|
||||
onDownloadAll={handleDownloadAll}
|
||||
onDownloadSelected={handleDownloadSelected}
|
||||
onBulkDelete={targetId ? () => setBulkDeleteDialogOpen(true) : undefined}
|
||||
onBulkAdd={targetId ? () => setBulkAddDialogOpen(true) : undefined}
|
||||
/>
|
||||
|
||||
@@ -343,7 +367,18 @@ export function EndpointsDetailView({
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Delete confirmation dialog */}
|
||||
{/* Bulk delete confirmation dialog */}
|
||||
<ConfirmDialog
|
||||
open={bulkDeleteDialogOpen}
|
||||
onOpenChange={setBulkDeleteDialogOpen}
|
||||
title={tConfirm("deleteTitle")}
|
||||
description={tCommon("actions.deleteConfirmMessage", { count: selectedEndpoints.length })}
|
||||
onConfirm={handleBulkDelete}
|
||||
loading={isDeleting}
|
||||
variant="destructive"
|
||||
/>
|
||||
|
||||
{/* Single delete confirmation dialog */}
|
||||
<AlertDialog open={deleteDialogOpen} onOpenChange={setDeleteDialogOpen}>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader>
|
||||
|
||||
@@ -9,6 +9,7 @@ import { ExpandableCell, ExpandableMonoCell } from "@/components/ui/data-table/e
|
||||
import { ChevronDown, ChevronUp } from "lucide-react"
|
||||
import { useTranslations } from "next-intl"
|
||||
import type { FingerPrintHubFingerprint } from "@/types/fingerprint.types"
|
||||
import { getSeverityStyle } from "@/lib/severity-config"
|
||||
|
||||
interface ColumnOptions {
|
||||
formatDate: (date: string) => string
|
||||
@@ -18,15 +19,7 @@ interface ColumnOptions {
|
||||
* Severity badge with color coding (matching Vulnerabilities style)
|
||||
*/
|
||||
function SeverityBadge({ severity }: { severity: string }) {
|
||||
const severityConfig: Record<string, { className: string }> = {
|
||||
critical: { className: "bg-[#da3633]/10 text-[#da3633] border border-[#da3633]/20 dark:text-[#f85149]" },
|
||||
high: { className: "bg-[#d29922]/10 text-[#d29922] border border-[#d29922]/20" },
|
||||
medium: { className: "bg-[#d4a72c]/10 text-[#d4a72c] border border-[#d4a72c]/20" },
|
||||
low: { className: "bg-[#238636]/10 text-[#238636] border border-[#238636]/20 dark:text-[#3fb950]" },
|
||||
info: { className: "bg-[#848d97]/10 text-[#848d97] border border-[#848d97]/20" },
|
||||
}
|
||||
|
||||
const config = severityConfig[severity?.toLowerCase()] || severityConfig.info
|
||||
const config = getSeverityStyle(severity)
|
||||
|
||||
return (
|
||||
<Badge className={config.className}>
|
||||
|
||||
@@ -238,15 +238,39 @@ export function ImportFingerprintDialog({
|
||||
// Frontend basic validation for JSON files
|
||||
try {
|
||||
const text = await file.text()
|
||||
const json = JSON.parse(text)
|
||||
let json: any
|
||||
|
||||
// Try standard JSON first
|
||||
try {
|
||||
json = JSON.parse(text)
|
||||
} catch {
|
||||
// If standard JSON fails, try JSONL format (for goby)
|
||||
if (fingerprintType === "goby") {
|
||||
const lines = text.trim().split('\n').filter(line => line.trim())
|
||||
if (lines.length === 0) {
|
||||
toast.error(t("import.emptyData"))
|
||||
return
|
||||
}
|
||||
// Parse each line as JSON
|
||||
json = lines.map((line, index) => {
|
||||
try {
|
||||
return JSON.parse(line)
|
||||
} catch {
|
||||
throw new Error(`Line ${index + 1}: Invalid JSON`)
|
||||
}
|
||||
})
|
||||
} else {
|
||||
throw new Error("Invalid JSON")
|
||||
}
|
||||
}
|
||||
|
||||
const validation = config.validate(json)
|
||||
if (!validation.valid) {
|
||||
toast.error(validation.error)
|
||||
return
|
||||
}
|
||||
} catch (e) {
|
||||
toast.error(tToast("invalidJsonFile"))
|
||||
} catch (e: any) {
|
||||
toast.error(e.message || tToast("invalidJsonFile"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,41 +91,8 @@ export function createIPAddressColumns({
|
||||
),
|
||||
cell: ({ getValue }) => {
|
||||
const hosts = getValue<string[]>()
|
||||
if (!hosts || hosts.length === 0) {
|
||||
return <span className="text-muted-foreground">-</span>
|
||||
}
|
||||
|
||||
const displayHosts = hosts.slice(0, 3)
|
||||
const hasMore = hosts.length > 3
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-1">
|
||||
{displayHosts.map((host, index) => (
|
||||
<ExpandableCell key={index} value={host} maxLines={1} />
|
||||
))}
|
||||
{hasMore && (
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<Badge variant="secondary" className="text-xs w-fit cursor-pointer hover:bg-muted">
|
||||
+{hosts.length - 3} more
|
||||
</Badge>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="w-80 p-3">
|
||||
<div className="space-y-2">
|
||||
<h4 className="font-medium text-sm">{t.tooltips.allHosts} ({hosts.length})</h4>
|
||||
<div className="flex flex-col gap-1 max-h-48 overflow-y-auto">
|
||||
{hosts.map((host, index) => (
|
||||
<span key={index} className="text-sm break-all">
|
||||
{host}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
const value = hosts?.length ? hosts.join("\n") : null
|
||||
return <ExpandableCell value={value} maxLines={3} />
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -18,7 +18,7 @@ const IP_ADDRESS_FILTER_FIELDS: FilterField[] = [
|
||||
|
||||
// IP address page filter examples
|
||||
const IP_ADDRESS_FILTER_EXAMPLES = [
|
||||
'ip="192.168.1.*" && port="80"',
|
||||
'ip="192.168.1." && port="80"',
|
||||
'port="443" || port="8443"',
|
||||
'host="api.example.com" && port!="22"',
|
||||
]
|
||||
@@ -54,6 +54,7 @@ export function IPAddressesDataTable({
|
||||
}: IPAddressesDataTableProps) {
|
||||
const t = useTranslations("common.status")
|
||||
const tDownload = useTranslations("common.download")
|
||||
const tActions = useTranslations("common.actions")
|
||||
|
||||
// Smart search handler
|
||||
const handleSmartSearch = (rawQuery: string) => {
|
||||
@@ -98,7 +99,7 @@ export function IPAddressesDataTable({
|
||||
onSelectionChange={onSelectionChange}
|
||||
// Bulk operations
|
||||
onBulkDelete={onBulkDelete}
|
||||
bulkDeleteLabel="Delete"
|
||||
bulkDeleteLabel={tActions("delete")}
|
||||
showAddButton={false}
|
||||
// Download
|
||||
downloadOptions={downloadOptions.length > 0 ? downloadOptions : undefined}
|
||||
|
||||
@@ -8,6 +8,7 @@ import { createIPAddressColumns } from "./ip-addresses-columns"
|
||||
import { DataTableSkeleton } from "@/components/ui/data-table-skeleton"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { useTargetIPAddresses, useScanIPAddresses } from "@/hooks/use-ip-addresses"
|
||||
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
|
||||
import { getDateLocale } from "@/lib/date-utils"
|
||||
import type { IPAddress } from "@/types/ip-address.types"
|
||||
import { IPAddressService } from "@/services/ip-address.service"
|
||||
@@ -26,6 +27,8 @@ export function IPAddressesView({
|
||||
})
|
||||
const [selectedIPAddresses, setSelectedIPAddresses] = useState<IPAddress[]>([])
|
||||
const [filterQuery, setFilterQuery] = useState("")
|
||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false)
|
||||
const [isDeleting, setIsDeleting] = useState(false)
|
||||
|
||||
// Internationalization
|
||||
const tColumns = useTranslations("columns")
|
||||
@@ -197,22 +200,63 @@ export function IPAddressesView({
|
||||
}
|
||||
|
||||
// Handle download selected IP addresses
|
||||
const handleDownloadSelected = () => {
|
||||
const handleDownloadSelected = async () => {
|
||||
if (selectedIPAddresses.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const csvContent = generateCSV(selectedIPAddresses)
|
||||
const blob = new Blob([csvContent], { type: "text/csv;charset=utf-8" })
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = document.createElement("a")
|
||||
const prefix = scanId ? `scan-${scanId}` : targetId ? `target-${targetId}` : "ip-addresses"
|
||||
a.href = url
|
||||
a.download = `${prefix}-ip-addresses-selected-${Date.now()}.csv`
|
||||
document.body.appendChild(a)
|
||||
a.click()
|
||||
document.body.removeChild(a)
|
||||
URL.revokeObjectURL(url)
|
||||
try {
|
||||
// Get selected IPs and call backend export API
|
||||
const ips = selectedIPAddresses.map(ip => ip.ip)
|
||||
let blob: Blob | null = null
|
||||
|
||||
if (targetId) {
|
||||
blob = await IPAddressService.exportIPAddressesByTargetId(targetId, ips)
|
||||
} else if (scanId) {
|
||||
// For scan, use frontend CSV generation as fallback (scan export doesn't support IP filter yet)
|
||||
const csvContent = generateCSV(selectedIPAddresses)
|
||||
blob = new Blob([csvContent], { type: "text/csv;charset=utf-8" })
|
||||
} else {
|
||||
const csvContent = generateCSV(selectedIPAddresses)
|
||||
blob = new Blob([csvContent], { type: "text/csv;charset=utf-8" })
|
||||
}
|
||||
|
||||
if (!blob) return
|
||||
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = document.createElement("a")
|
||||
const prefix = scanId ? `scan-${scanId}` : targetId ? `target-${targetId}` : "ip-addresses"
|
||||
a.href = url
|
||||
a.download = `${prefix}-ip-addresses-selected-${Date.now()}.csv`
|
||||
document.body.appendChild(a)
|
||||
a.click()
|
||||
document.body.removeChild(a)
|
||||
URL.revokeObjectURL(url)
|
||||
} catch (error) {
|
||||
console.error("Failed to download selected IP addresses", error)
|
||||
toast.error(tToast("downloadFailed"))
|
||||
}
|
||||
}
|
||||
|
||||
// Handle bulk delete
|
||||
const handleBulkDelete = async () => {
|
||||
if (selectedIPAddresses.length === 0) return
|
||||
|
||||
setIsDeleting(true)
|
||||
try {
|
||||
// IP addresses are aggregated, pass IP strings instead of IDs
|
||||
const ips = selectedIPAddresses.map(ip => ip.ip)
|
||||
const result = await IPAddressService.bulkDelete(ips)
|
||||
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
|
||||
setSelectedIPAddresses([])
|
||||
setDeleteDialogOpen(false)
|
||||
refetch()
|
||||
} catch (error) {
|
||||
console.error("Failed to delete IP addresses", error)
|
||||
toast.error(tToast("deleteFailed"))
|
||||
} finally {
|
||||
setIsDeleting(false)
|
||||
}
|
||||
}
|
||||
|
||||
if (error) {
|
||||
@@ -253,6 +297,18 @@ export function IPAddressesView({
|
||||
onSelectionChange={handleSelectionChange}
|
||||
onDownloadAll={handleDownloadAll}
|
||||
onDownloadSelected={handleDownloadSelected}
|
||||
onBulkDelete={targetId ? () => setDeleteDialogOpen(true) : undefined}
|
||||
/>
|
||||
|
||||
{/* Delete confirmation dialog */}
|
||||
<ConfirmDialog
|
||||
open={deleteDialogOpen}
|
||||
onOpenChange={setDeleteDialogOpen}
|
||||
title={tCommon("actions.confirmDelete")}
|
||||
description={tCommon("actions.deleteConfirmMessage", { count: selectedIPAddresses.length })}
|
||||
onConfirm={handleBulkDelete}
|
||||
loading={isDeleting}
|
||||
variant="destructive"
|
||||
/>
|
||||
</>
|
||||
)
|
||||
|
||||
@@ -18,6 +18,7 @@ import { cn } from "@/lib/utils"
|
||||
import { transformBackendNotification, useNotificationSSE } from "@/hooks/use-notification-sse"
|
||||
import { useMarkAllAsRead, useNotifications } from "@/hooks/use-notifications"
|
||||
import type { Notification, NotificationType, NotificationSeverity } from "@/types/notification.types"
|
||||
import { SEVERITY_CARD_STYLES, SEVERITY_ICON_BG } from "@/lib/severity-config"
|
||||
|
||||
/**
|
||||
* Notification drawer component
|
||||
@@ -71,12 +72,52 @@ function getTimeGroup(dateStr?: string): 'today' | 'yesterday' | 'earlier' {
|
||||
const now = new Date()
|
||||
const today = new Date(now.getFullYear(), now.getMonth(), now.getDate())
|
||||
const yesterday = new Date(today.getTime() - 24 * 60 * 60 * 1000)
|
||||
|
||||
|
||||
if (date >= today) return 'today'
|
||||
if (date >= yesterday) return 'yesterday'
|
||||
return 'earlier'
|
||||
}
|
||||
|
||||
/** Severity icon class mapping */
|
||||
const SEVERITY_ICON_CLASS_MAP: Record<NotificationSeverity, string> = {
|
||||
critical: "text-[#da3633] dark:text-[#f85149]",
|
||||
high: "text-[#d29922]",
|
||||
medium: "text-[#d4a72c]",
|
||||
low: "text-[#848d97]",
|
||||
}
|
||||
|
||||
/** Severity card class mapping */
|
||||
const SEVERITY_CARD_CLASS_MAP: Record<NotificationSeverity, string> = {
|
||||
critical: SEVERITY_CARD_STYLES.critical,
|
||||
high: SEVERITY_CARD_STYLES.high,
|
||||
medium: SEVERITY_CARD_STYLES.medium,
|
||||
low: SEVERITY_CARD_STYLES.low,
|
||||
}
|
||||
|
||||
/** Get notification icon based on type and severity */
|
||||
function getNotificationIcon(type: NotificationType, severity?: NotificationSeverity) {
|
||||
const severityClass = severity ? SEVERITY_ICON_CLASS_MAP[severity] : "text-gray-500"
|
||||
|
||||
if (type === "vulnerability") {
|
||||
return <AlertTriangle className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
if (type === "scan") {
|
||||
return <Activity className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
if (type === "asset") {
|
||||
return <Server className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
return <Info className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
|
||||
/** Get notification card classes based on severity */
|
||||
function getNotificationCardClasses(severity?: NotificationSeverity) {
|
||||
if (!severity) {
|
||||
return "border-border bg-card hover:bg-accent/50"
|
||||
}
|
||||
return cn("border-border", SEVERITY_CARD_CLASS_MAP[severity] ?? "")
|
||||
}
|
||||
|
||||
export function NotificationDrawer() {
|
||||
const t = useTranslations("notificationDrawer")
|
||||
const [open, setOpen] = React.useState(false)
|
||||
@@ -94,20 +135,20 @@ export function NotificationDrawer() {
|
||||
{ value: 'system', label: t("filters.system"), icon: <Info className="h-3 w-3" /> },
|
||||
]
|
||||
|
||||
// Category title mapping
|
||||
const categoryTitleMap: Record<NotificationType, string> = {
|
||||
// Category title mapping (memoized to avoid recreation)
|
||||
const categoryTitleMap = React.useMemo<Record<NotificationType, string>>(() => ({
|
||||
scan: t("categories.scan"),
|
||||
vulnerability: t("categories.vulnerability"),
|
||||
asset: t("categories.asset"),
|
||||
system: t("categories.system"),
|
||||
}
|
||||
}), [t])
|
||||
|
||||
// Time group labels
|
||||
const timeGroupLabels = {
|
||||
// Time group labels (memoized to avoid recreation)
|
||||
const timeGroupLabels = React.useMemo(() => ({
|
||||
today: t("timeGroups.today"),
|
||||
yesterday: t("timeGroups.yesterday"),
|
||||
earlier: t("timeGroups.earlier"),
|
||||
}
|
||||
}), [t])
|
||||
|
||||
// SSE real-time notifications
|
||||
const { notifications: sseNotifications, isConnected, markNotificationsAsRead } = useNotificationSSE()
|
||||
@@ -139,7 +180,7 @@ export function NotificationDrawer() {
|
||||
}
|
||||
}
|
||||
|
||||
return merged.sort((a, b) => {
|
||||
return merged.toSorted((a, b) => {
|
||||
const aTime = a.createdAt ? new Date(a.createdAt).getTime() : 0
|
||||
const bTime = b.createdAt ? new Date(b.createdAt).getTime() : 0
|
||||
return bTime - aTime
|
||||
@@ -175,43 +216,6 @@ export function NotificationDrawer() {
|
||||
return allNotifications.filter(n => n.type === activeFilter)
|
||||
}, [allNotifications, activeFilter])
|
||||
|
||||
// Get notification icon
|
||||
const severityIconClassMap: Record<NotificationSeverity, string> = {
|
||||
critical: "text-[#da3633] dark:text-[#f85149]",
|
||||
high: "text-[#d29922]",
|
||||
medium: "text-[#d4a72c]",
|
||||
low: "text-[#848d97]",
|
||||
}
|
||||
|
||||
const getNotificationIcon = (type: NotificationType, severity?: NotificationSeverity) => {
|
||||
const severityClass = severity ? severityIconClassMap[severity] : "text-gray-500"
|
||||
|
||||
if (type === "vulnerability") {
|
||||
return <AlertTriangle className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
if (type === "scan") {
|
||||
return <Activity className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
if (type === "asset") {
|
||||
return <Server className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
return <Info className={cn("h-5 w-5", severityClass)} />
|
||||
}
|
||||
|
||||
const severityCardClassMap: Record<NotificationSeverity, string> = {
|
||||
critical: "border-[#da3633]/30 bg-[#da3633]/5 hover:bg-[#da3633]/10 dark:border-[#f85149]/30 dark:bg-[#f85149]/5 dark:hover:bg-[#f85149]/10",
|
||||
high: "border-[#d29922]/30 bg-[#d29922]/5 hover:bg-[#d29922]/10 dark:border-[#d29922]/30 dark:bg-[#d29922]/5 dark:hover:bg-[#d29922]/10",
|
||||
medium: "border-[#d4a72c]/30 bg-[#d4a72c]/5 hover:bg-[#d4a72c]/10 dark:border-[#d4a72c]/30 dark:bg-[#d4a72c]/5 dark:hover:bg-[#d4a72c]/10",
|
||||
low: "border-[#848d97]/30 bg-[#848d97]/5 hover:bg-[#848d97]/10 dark:border-[#848d97]/30 dark:bg-[#848d97]/5 dark:hover:bg-[#848d97]/10",
|
||||
}
|
||||
|
||||
const getNotificationCardClasses = (severity?: NotificationSeverity) => {
|
||||
if (!severity) {
|
||||
return "border-border bg-card hover:bg-accent/50"
|
||||
}
|
||||
return cn("border-border", severityCardClassMap[severity] ?? "")
|
||||
}
|
||||
|
||||
const handleMarkAll = React.useCallback(() => {
|
||||
if (allNotifications.length === 0 || isMarkingAll) return
|
||||
markAllAsRead(undefined, {
|
||||
@@ -240,8 +244,8 @@ export function NotificationDrawer() {
|
||||
return groups
|
||||
}, [filteredNotifications])
|
||||
|
||||
// Render single notification card
|
||||
const renderNotificationCard = (notification: Notification) => (
|
||||
// Render single notification card (memoized to avoid recreation)
|
||||
const renderNotificationCard = React.useCallback((notification: Notification) => (
|
||||
<div
|
||||
key={notification.id}
|
||||
className={cn(
|
||||
@@ -256,10 +260,10 @@ export function NotificationDrawer() {
|
||||
<div className="flex items-start gap-3">
|
||||
<div className={cn(
|
||||
"mt-0.5 p-1.5 rounded-full shrink-0",
|
||||
notification.severity === 'critical' && "bg-[#da3633]/10 dark:bg-[#f85149]/10",
|
||||
notification.severity === 'high' && "bg-[#d29922]/10",
|
||||
notification.severity === 'medium' && "bg-[#d4a72c]/10",
|
||||
(!notification.severity || notification.severity === 'low') && "bg-muted"
|
||||
notification.severity === 'critical' && SEVERITY_ICON_BG.critical,
|
||||
notification.severity === 'high' && SEVERITY_ICON_BG.high,
|
||||
notification.severity === 'medium' && SEVERITY_ICON_BG.medium,
|
||||
(!notification.severity || notification.severity === 'low') && SEVERITY_ICON_BG.info
|
||||
)}>
|
||||
{getNotificationIcon(notification.type, notification.severity)}
|
||||
</div>
|
||||
@@ -284,12 +288,12 @@ export function NotificationDrawer() {
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
), [categoryTitleMap])
|
||||
|
||||
// Render notification list (with time grouping)
|
||||
const renderNotificationList = () => {
|
||||
// Render notification list (with time grouping, memoized to avoid recreation)
|
||||
const renderNotificationList = React.useCallback(() => {
|
||||
const hasAny = filteredNotifications.length > 0
|
||||
|
||||
|
||||
if (!hasAny) {
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center h-40 text-muted-foreground">
|
||||
@@ -304,7 +308,7 @@ export function NotificationDrawer() {
|
||||
{(['today', 'yesterday', 'earlier'] as const).map(group => {
|
||||
const items = groupedNotifications[group]
|
||||
if (items.length === 0) return null
|
||||
|
||||
|
||||
return (
|
||||
<div key={group}>
|
||||
<h3 className="sticky top-0 z-10 text-xs font-medium text-muted-foreground mb-2 px-1 py-1 backdrop-blur bg-background/90">
|
||||
@@ -318,7 +322,7 @@ export function NotificationDrawer() {
|
||||
})}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
}, [filteredNotifications, groupedNotifications, timeGroupLabels, renderNotificationCard, t])
|
||||
|
||||
return (
|
||||
<Sheet open={open} onOpenChange={setOpen}>
|
||||
|
||||
@@ -32,7 +32,7 @@ import {
|
||||
} from "@/components/ui/form"
|
||||
|
||||
import { useCreateOrganization } from "@/hooks/use-organizations"
|
||||
import { useBatchCreateTargets } from "@/hooks/use-targets"
|
||||
import { batchCreateTargets } from "@/services/target.service"
|
||||
|
||||
import type { Organization } from "@/types/organization.types"
|
||||
|
||||
@@ -68,7 +68,7 @@ export function AddOrganizationDialog({
|
||||
const textareaRef = useRef<HTMLTextAreaElement | null>(null)
|
||||
|
||||
const createOrganization = useCreateOrganization()
|
||||
const batchCreateTargets = useBatchCreateTargets()
|
||||
const [isCreatingTargets, setIsCreatingTargets] = useState(false)
|
||||
|
||||
const form = useForm<FormValues>({
|
||||
resolver: zodResolver(formSchema),
|
||||
@@ -105,7 +105,7 @@ export function AddOrganizationDialog({
|
||||
}
|
||||
}
|
||||
|
||||
const onSubmit = (values: FormValues) => {
|
||||
const onSubmit = async (values: FormValues) => {
|
||||
if (targetValidation.invalid.length > 0) return
|
||||
|
||||
createOrganization.mutate(
|
||||
@@ -114,7 +114,7 @@ export function AddOrganizationDialog({
|
||||
description: values.description?.trim() || "",
|
||||
},
|
||||
{
|
||||
onSuccess: (newOrganization) => {
|
||||
onSuccess: async (newOrganization) => {
|
||||
if (values.targets && values.targets.trim()) {
|
||||
const targetList = values.targets
|
||||
.split("\n")
|
||||
@@ -123,40 +123,32 @@ export function AddOrganizationDialog({
|
||||
.map(name => ({ name }))
|
||||
|
||||
if (targetList.length > 0) {
|
||||
batchCreateTargets.mutate(
|
||||
{ targets: targetList, organizationId: newOrganization.id },
|
||||
{
|
||||
onSuccess: () => {
|
||||
form.reset()
|
||||
setOpen(false)
|
||||
if (onAdd) onAdd(newOrganization)
|
||||
}
|
||||
}
|
||||
)
|
||||
} else {
|
||||
form.reset()
|
||||
setOpen(false)
|
||||
if (onAdd) onAdd(newOrganization)
|
||||
setIsCreatingTargets(true)
|
||||
try {
|
||||
// Call service directly to avoid double toast
|
||||
await batchCreateTargets({ targets: targetList, organizationId: newOrganization.id })
|
||||
} finally {
|
||||
setIsCreatingTargets(false)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
form.reset()
|
||||
setOpen(false)
|
||||
if (onAdd) onAdd(newOrganization)
|
||||
}
|
||||
form.reset()
|
||||
setOpen(false)
|
||||
if (onAdd) onAdd(newOrganization)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
const handleOpenChange = (newOpen: boolean) => {
|
||||
if (!createOrganization.isPending && !batchCreateTargets.isPending) {
|
||||
if (!createOrganization.isPending && !isCreatingTargets) {
|
||||
setOpen(newOpen)
|
||||
if (!newOpen) form.reset()
|
||||
}
|
||||
}
|
||||
|
||||
const isFormValid = form.formState.isValid && targetValidation.invalid.length === 0
|
||||
const isSubmitting = createOrganization.isPending || batchCreateTargets.isPending
|
||||
const isSubmitting = createOrganization.isPending || isCreatingTargets
|
||||
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={handleOpenChange}>
|
||||
|
||||
@@ -41,7 +41,7 @@ export interface OrganizationTranslations {
|
||||
selectRow: string
|
||||
}
|
||||
tooltips: {
|
||||
targetSummary: string
|
||||
organizationDetails: string
|
||||
initiateScan: string
|
||||
}
|
||||
}
|
||||
@@ -240,7 +240,7 @@ export const createOrganizationColumns = ({
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="top">
|
||||
<p className="text-xs">{t.tooltips.targetSummary}</p>
|
||||
<p className="text-xs">{t.tooltips.organizationDetails}</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
|
||||
@@ -82,12 +82,20 @@ export function OrganizationDetailView({
|
||||
const [searchQuery, setSearchQuery] = useState("")
|
||||
const [isSearching, setIsSearching] = useState(false)
|
||||
|
||||
// Type filter state
|
||||
const [typeFilter, setTypeFilter] = useState<string>("")
|
||||
|
||||
const handleSearchChange = (value: string) => {
|
||||
setIsSearching(true)
|
||||
setSearchQuery(value)
|
||||
setPagination((prev) => ({ ...prev, pageIndex: 0 }))
|
||||
}
|
||||
|
||||
const handleTypeFilterChange = (value: string) => {
|
||||
setTypeFilter(value)
|
||||
setPagination((prev) => ({ ...prev, pageIndex: 0 }))
|
||||
}
|
||||
|
||||
// Use unlink targets mutation
|
||||
const unlinkTargets = useUnlinkTargetsFromOrganization()
|
||||
|
||||
@@ -111,6 +119,7 @@ export function OrganizationDetailView({
|
||||
page: pagination.pageIndex + 1,
|
||||
pageSize: pagination.pageSize,
|
||||
search: searchQuery || undefined,
|
||||
type: typeFilter || undefined,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -306,7 +315,6 @@ export function OrganizationDetailView({
|
||||
searchValue={searchQuery}
|
||||
onSearch={handleSearchChange}
|
||||
isSearching={isSearching}
|
||||
addButtonText={tCommon("actions.add")}
|
||||
pagination={pagination}
|
||||
setPagination={setPagination}
|
||||
paginationInfo={targetsData ? {
|
||||
@@ -316,6 +324,8 @@ export function OrganizationDetailView({
|
||||
totalPages: targetsData.totalPages,
|
||||
} : undefined}
|
||||
onPaginationChange={handlePaginationChange}
|
||||
typeFilter={typeFilter}
|
||||
onTypeFilterChange={handleTypeFilterChange}
|
||||
/>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ export function OrganizationList() {
|
||||
const tCommon = useTranslations("common")
|
||||
const tTooltips = useTranslations("tooltips")
|
||||
const tConfirm = useTranslations("common.confirm")
|
||||
const tOrg = useTranslations("organization")
|
||||
const locale = useLocale()
|
||||
|
||||
// 构建翻译对象
|
||||
@@ -77,7 +78,7 @@ export function OrganizationList() {
|
||||
selectRow: tCommon("actions.selectRow"),
|
||||
},
|
||||
tooltips: {
|
||||
targetSummary: tTooltips("targetSummary"),
|
||||
organizationDetails: tTooltips("organizationDetails"),
|
||||
initiateScan: tTooltips("initiateScan"),
|
||||
},
|
||||
}), [tColumns, tCommon, tTooltips])
|
||||
@@ -120,7 +121,7 @@ export function OrganizationList() {
|
||||
} = useOrganizations({
|
||||
page: pagination.pageIndex + 1, // 转换为 1-based
|
||||
pageSize: pagination.pageSize,
|
||||
search: searchQuery || undefined,
|
||||
filter: searchQuery || undefined,
|
||||
}, { enabled: true })
|
||||
|
||||
useEffect(() => {
|
||||
@@ -272,7 +273,7 @@ export function OrganizationList() {
|
||||
onAddNew={() => setAddDialogOpen(true)}
|
||||
onBulkDelete={handleBulkDelete}
|
||||
onSelectionChange={setSelectedOrganizations}
|
||||
searchPlaceholder={tColumns("organization.organization")}
|
||||
searchPlaceholder={tOrg("name")}
|
||||
searchColumn="name"
|
||||
searchValue={searchQuery}
|
||||
onSearch={handleSearchChange}
|
||||
|
||||
@@ -132,7 +132,7 @@ function TargetNameCell({
|
||||
return (
|
||||
<div className="group flex items-start gap-1 flex-1 min-w-0">
|
||||
<button
|
||||
onClick={() => navigate(`/target/${targetId}/website/`)}
|
||||
onClick={() => navigate(`/target/${targetId}/overview/`)}
|
||||
className="text-sm font-medium hover:text-primary hover:underline underline-offset-2 transition-colors cursor-pointer text-left break-all leading-relaxed whitespace-normal"
|
||||
>
|
||||
{name}
|
||||
@@ -251,7 +251,7 @@ export const createTargetColumns = ({
|
||||
cell: ({ row }) => (
|
||||
<TargetRowActions
|
||||
target={row.original}
|
||||
onView={() => navigate(`/target/${row.original.id}/website/`)}
|
||||
onView={() => navigate(`/target/${row.original.id}/overview/`)}
|
||||
onDelete={() => handleDelete(row.original)}
|
||||
t={t}
|
||||
/>
|
||||
|
||||
@@ -2,10 +2,18 @@
|
||||
|
||||
import * as React from "react"
|
||||
import { IconSearch, IconLoader2 } from "@tabler/icons-react"
|
||||
import { Filter } from "lucide-react"
|
||||
import { useTranslations } from "next-intl"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { UnifiedDataTable } from "@/components/ui/data-table"
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/ui/select"
|
||||
import type { ColumnDef } from "@tanstack/react-table"
|
||||
import type { Target } from "@/types/target.types"
|
||||
import type { PaginationInfo } from "@/types/common.types"
|
||||
@@ -26,6 +34,8 @@ interface TargetsDataTableProps {
|
||||
setPagination?: React.Dispatch<React.SetStateAction<{ pageIndex: number; pageSize: number }>>
|
||||
paginationInfo?: PaginationInfo
|
||||
onPaginationChange?: (pagination: { pageIndex: number; pageSize: number }) => void
|
||||
typeFilter?: string
|
||||
onTypeFilterChange?: (value: string) => void
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -48,9 +58,13 @@ export function TargetsDataTable({
|
||||
setPagination: setExternalPagination,
|
||||
paginationInfo,
|
||||
onPaginationChange,
|
||||
typeFilter,
|
||||
onTypeFilterChange,
|
||||
}: TargetsDataTableProps) {
|
||||
const t = useTranslations("common.status")
|
||||
const tTarget = useTranslations("target")
|
||||
const tTooltips = useTranslations("tooltips")
|
||||
const tCommon = useTranslations("common")
|
||||
|
||||
// 本地搜索输入状态
|
||||
const [localSearchValue, setLocalSearchValue] = React.useState(searchValue || "")
|
||||
@@ -71,13 +85,6 @@ export function TargetsDataTable({
|
||||
}
|
||||
}
|
||||
|
||||
// 自定义添加按钮(支持 onAddHover)
|
||||
const addButton = onAddNew ? (
|
||||
<Button onClick={onAddNew} onMouseEnter={onAddHover} size="sm">
|
||||
{addButtonText || tTarget("createTarget")}
|
||||
</Button>
|
||||
) : undefined
|
||||
|
||||
return (
|
||||
<UnifiedDataTable
|
||||
data={data}
|
||||
@@ -91,8 +98,14 @@ export function TargetsDataTable({
|
||||
// 选择
|
||||
onSelectionChange={onSelectionChange}
|
||||
// 批量操作
|
||||
showBulkDelete={false}
|
||||
showAddButton={false}
|
||||
showBulkDelete={!!onBulkDelete}
|
||||
onBulkDelete={onBulkDelete}
|
||||
bulkDeleteLabel={tTooltips("unlinkTarget")}
|
||||
// 添加按钮(在解除关联按钮之后)
|
||||
showAddButton={!!onAddNew}
|
||||
onAddNew={onAddNew}
|
||||
onAddHover={onAddHover}
|
||||
addButtonLabel={addButtonText || tTarget("addTarget")}
|
||||
// 空状态
|
||||
emptyMessage={t("noData")}
|
||||
// 自定义工具栏
|
||||
@@ -112,9 +125,22 @@ export function TargetsDataTable({
|
||||
<IconSearch className="h-4 w-4" />
|
||||
)}
|
||||
</Button>
|
||||
{onTypeFilterChange && (
|
||||
<Select value={typeFilter || "all"} onValueChange={(value) => onTypeFilterChange(value === "all" ? "" : value)}>
|
||||
<SelectTrigger size="sm" className="w-auto">
|
||||
<Filter className="h-4 w-4" />
|
||||
<SelectValue placeholder={tCommon("actions.filter")} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="all">{tCommon("actions.all")}</SelectItem>
|
||||
<SelectItem value="domain">{tTarget("types.domain")}</SelectItem>
|
||||
<SelectItem value="ip">{tTarget("types.ip")}</SelectItem>
|
||||
<SelectItem value="cidr">{tTarget("types.cidr")}</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
)}
|
||||
</div>
|
||||
}
|
||||
toolbarRight={addButton}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -248,7 +248,7 @@ export function OrganizationTargetsDetailView({
|
||||
onBulkDelete={handleBulkDelete}
|
||||
onSelectionChange={setSelectedTargets}
|
||||
searchPlaceholder={tColumns("target.target")}
|
||||
addButtonText={tCommon("actions.add")}
|
||||
addButtonText={tTarget("addTarget")}
|
||||
pagination={pagination}
|
||||
setPagination={setPagination}
|
||||
paginationInfo={targetsData ? {
|
||||
|
||||
318
frontend/components/scan/engine-preset-selector.tsx
Normal file
318
frontend/components/scan/engine-preset-selector.tsx
Normal file
@@ -0,0 +1,318 @@
|
||||
"use client"
|
||||
|
||||
import React, { useMemo, useCallback } from "react"
|
||||
import { Play, Server, Settings, Zap } from "lucide-react"
|
||||
import { useTranslations } from "next-intl"
|
||||
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import { Checkbox } from "@/components/ui/checkbox"
|
||||
import { cn } from "@/lib/utils"
|
||||
import { CAPABILITY_CONFIG, parseEngineCapabilities, mergeEngineConfigurations } from "@/lib/engine-config"
|
||||
|
||||
import type { ScanEngine } from "@/types/engine.types"
|
||||
|
||||
export interface EnginePreset {
|
||||
id: string
|
||||
label: string
|
||||
description: string
|
||||
icon: React.ComponentType<{ className?: string }>
|
||||
engineIds: number[]
|
||||
}
|
||||
|
||||
interface EnginePresetSelectorProps {
|
||||
engines: ScanEngine[]
|
||||
selectedEngineIds: number[]
|
||||
selectedPresetId: string | null
|
||||
onPresetChange: (presetId: string | null) => void
|
||||
onEngineIdsChange: (engineIds: number[]) => void
|
||||
onConfigurationChange: (config: string) => void
|
||||
disabled?: boolean
|
||||
className?: string
|
||||
}
|
||||
|
||||
export function EnginePresetSelector({
|
||||
engines,
|
||||
selectedEngineIds,
|
||||
selectedPresetId,
|
||||
onPresetChange,
|
||||
onEngineIdsChange,
|
||||
onConfigurationChange,
|
||||
disabled = false,
|
||||
className,
|
||||
}: EnginePresetSelectorProps) {
|
||||
const t = useTranslations("scan.initiate")
|
||||
const tStages = useTranslations("scan.progress.stages")
|
||||
|
||||
// Preset definitions with precise engine filtering
|
||||
const enginePresets = useMemo(() => {
|
||||
if (!engines?.length) return []
|
||||
|
||||
// Categorize engines by their capabilities
|
||||
const fullScanEngines: number[] = []
|
||||
const reconEngines: number[] = []
|
||||
const vulnEngines: number[] = []
|
||||
|
||||
engines.forEach(e => {
|
||||
const caps = parseEngineCapabilities(e.configuration || "")
|
||||
const hasRecon = caps.includes("subdomain_discovery") || caps.includes("port_scan") || caps.includes("site_scan") || caps.includes("fingerprint_detect") || caps.includes("directory_scan") || caps.includes("url_fetch") || caps.includes("screenshot")
|
||||
const hasVuln = caps.includes("vuln_scan")
|
||||
|
||||
if (hasRecon && hasVuln) {
|
||||
// Full capability engine - only for full scan
|
||||
fullScanEngines.push(e.id)
|
||||
} else if (hasRecon && !hasVuln) {
|
||||
// Recon only engine
|
||||
reconEngines.push(e.id)
|
||||
} else if (hasVuln && !hasRecon) {
|
||||
// Vuln only engine
|
||||
vulnEngines.push(e.id)
|
||||
}
|
||||
})
|
||||
|
||||
return [
|
||||
{
|
||||
id: "full",
|
||||
label: t("presets.fullScan"),
|
||||
description: t("presets.fullScanDesc"),
|
||||
icon: Zap,
|
||||
engineIds: fullScanEngines,
|
||||
},
|
||||
{
|
||||
id: "recon",
|
||||
label: t("presets.recon"),
|
||||
description: t("presets.reconDesc"),
|
||||
icon: Server,
|
||||
engineIds: reconEngines,
|
||||
},
|
||||
{
|
||||
id: "vuln",
|
||||
label: t("presets.vulnScan"),
|
||||
description: t("presets.vulnScanDesc"),
|
||||
icon: Play,
|
||||
engineIds: vulnEngines,
|
||||
},
|
||||
{
|
||||
id: "custom",
|
||||
label: t("presets.custom"),
|
||||
description: t("presets.customDesc"),
|
||||
icon: Settings,
|
||||
engineIds: [],
|
||||
},
|
||||
]
|
||||
}, [engines, t])
|
||||
|
||||
const selectedEngines = useMemo(() => {
|
||||
if (!selectedEngineIds.length || !engines) return []
|
||||
return engines.filter((e) => selectedEngineIds.includes(e.id))
|
||||
}, [selectedEngineIds, engines])
|
||||
|
||||
const selectedCapabilities = useMemo(() => {
|
||||
if (!selectedEngines.length) return []
|
||||
const allCaps = new Set<string>()
|
||||
selectedEngines.forEach((engine) => {
|
||||
parseEngineCapabilities(engine.configuration || "").forEach((cap) => allCaps.add(cap))
|
||||
})
|
||||
return Array.from(allCaps)
|
||||
}, [selectedEngines])
|
||||
|
||||
// Get currently selected preset details
|
||||
const selectedPreset = useMemo(() => {
|
||||
return enginePresets.find(p => p.id === selectedPresetId)
|
||||
}, [enginePresets, selectedPresetId])
|
||||
|
||||
// Get engines for the selected preset
|
||||
const presetEngines = useMemo(() => {
|
||||
if (!selectedPreset || selectedPreset.id === "custom") return []
|
||||
return engines?.filter(e => selectedPreset.engineIds.includes(e.id)) || []
|
||||
}, [selectedPreset, engines])
|
||||
|
||||
// Update configuration when engines change
|
||||
const updateConfigurationFromEngines = useCallback((engineIds: number[]) => {
|
||||
if (!engines) return
|
||||
const selectedEngs = engines.filter(e => engineIds.includes(e.id))
|
||||
const mergedConfig = mergeEngineConfigurations(selectedEngs.map(e => e.configuration || ""))
|
||||
onConfigurationChange(mergedConfig)
|
||||
}, [engines, onConfigurationChange])
|
||||
|
||||
const handlePresetSelect = useCallback((preset: EnginePreset) => {
|
||||
onPresetChange(preset.id)
|
||||
if (preset.id !== "custom") {
|
||||
onEngineIdsChange(preset.engineIds)
|
||||
updateConfigurationFromEngines(preset.engineIds)
|
||||
} else {
|
||||
// Custom mode - keep current selection or clear
|
||||
if (selectedEngineIds.length === 0) {
|
||||
onConfigurationChange("")
|
||||
}
|
||||
}
|
||||
}, [onPresetChange, onEngineIdsChange, updateConfigurationFromEngines, selectedEngineIds.length, onConfigurationChange])
|
||||
|
||||
const handleEngineToggle = useCallback((engineId: number, checked: boolean) => {
|
||||
let newEngineIds: number[]
|
||||
if (checked) {
|
||||
newEngineIds = [...selectedEngineIds, engineId]
|
||||
} else {
|
||||
newEngineIds = selectedEngineIds.filter((id) => id !== engineId)
|
||||
}
|
||||
onEngineIdsChange(newEngineIds)
|
||||
updateConfigurationFromEngines(newEngineIds)
|
||||
}, [selectedEngineIds, onEngineIdsChange, updateConfigurationFromEngines])
|
||||
|
||||
return (
|
||||
<div className={cn("flex flex-col h-full", className)}>
|
||||
<div className="flex-1 overflow-y-auto p-6">
|
||||
{/* Compact preset cards */}
|
||||
<div className="grid grid-cols-4 gap-3 mb-4">
|
||||
{enginePresets.map((preset) => {
|
||||
const isActive = selectedPresetId === preset.id
|
||||
const PresetIcon = preset.icon
|
||||
const matchedEngines = preset.id === "custom"
|
||||
? []
|
||||
: engines?.filter(e => preset.engineIds.includes(e.id)) || []
|
||||
|
||||
return (
|
||||
<button
|
||||
key={preset.id}
|
||||
type="button"
|
||||
onClick={() => handlePresetSelect(preset)}
|
||||
disabled={disabled}
|
||||
className={cn(
|
||||
"flex flex-col items-center p-3 rounded-lg border-2 text-center transition-all",
|
||||
isActive
|
||||
? "border-primary bg-primary/5"
|
||||
: "border-border hover:border-primary/50 hover:bg-muted/30",
|
||||
disabled && "opacity-50 cursor-not-allowed"
|
||||
)}
|
||||
>
|
||||
<div className={cn(
|
||||
"flex h-10 w-10 items-center justify-center rounded-lg mb-2",
|
||||
isActive ? "bg-primary text-primary-foreground" : "bg-muted"
|
||||
)}>
|
||||
<PresetIcon className="h-5 w-5" />
|
||||
</div>
|
||||
<span className="text-sm font-medium">{preset.label}</span>
|
||||
{preset.id !== "custom" && (
|
||||
<span className="text-xs text-muted-foreground mt-1">
|
||||
{matchedEngines.length} {t("presets.enginesCount")}
|
||||
</span>
|
||||
)}
|
||||
</button>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
|
||||
{/* Selected preset details */}
|
||||
{selectedPresetId && selectedPresetId !== "custom" && (
|
||||
<div className="border rounded-lg p-4 bg-muted/10">
|
||||
<div className="flex items-start justify-between mb-3">
|
||||
<div>
|
||||
<h3 className="font-medium">{selectedPreset?.label}</h3>
|
||||
<p className="text-sm text-muted-foreground mt-1">{selectedPreset?.description}</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Capabilities */}
|
||||
<div className="mb-4">
|
||||
<h4 className="text-xs font-medium text-muted-foreground mb-2">{t("presets.capabilities")}</h4>
|
||||
<div className="flex flex-wrap gap-1.5">
|
||||
{selectedCapabilities.map((capKey) => {
|
||||
const config = CAPABILITY_CONFIG[capKey]
|
||||
return (
|
||||
<Badge key={capKey} variant="outline" className={cn("text-xs", config?.color)}>
|
||||
{tStages(capKey)}
|
||||
</Badge>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Engines list */}
|
||||
<div>
|
||||
<h4 className="text-xs font-medium text-muted-foreground mb-2">{t("presets.usedEngines")}</h4>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{presetEngines.map((engine) => (
|
||||
<span key={engine.id} className="text-sm px-3 py-1.5 bg-background rounded-md border">
|
||||
{engine.name}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Custom mode engine selection */}
|
||||
{selectedPresetId === "custom" && (
|
||||
<div className="border rounded-lg p-4 bg-muted/10">
|
||||
<div className="flex items-start justify-between mb-3">
|
||||
<div>
|
||||
<h3 className="font-medium">{selectedPreset?.label}</h3>
|
||||
<p className="text-sm text-muted-foreground mt-1">{selectedPreset?.description}</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Capabilities - dynamically calculated from selected engines */}
|
||||
<div className="mb-4">
|
||||
<h4 className="text-xs font-medium text-muted-foreground mb-2">{t("presets.capabilities")}</h4>
|
||||
<div className="flex flex-wrap gap-1.5">
|
||||
{selectedCapabilities.length > 0 ? (
|
||||
selectedCapabilities.map((capKey) => {
|
||||
const config = CAPABILITY_CONFIG[capKey]
|
||||
return (
|
||||
<Badge key={capKey} variant="outline" className={cn("text-xs", config?.color)}>
|
||||
{tStages(capKey)}
|
||||
</Badge>
|
||||
)
|
||||
})
|
||||
) : (
|
||||
<span className="text-xs text-muted-foreground">{t("presets.noCapabilities")}</span>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Engines list - selectable */}
|
||||
<div>
|
||||
<h4 className="text-xs font-medium text-muted-foreground mb-2">{t("presets.usedEngines")}</h4>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{engines?.map((engine) => {
|
||||
const isSelected = selectedEngineIds.includes(engine.id)
|
||||
return (
|
||||
<label
|
||||
key={engine.id}
|
||||
htmlFor={`preset-engine-${engine.id}`}
|
||||
className={cn(
|
||||
"flex items-center gap-2 px-3 py-1.5 rounded-md cursor-pointer transition-all border",
|
||||
isSelected
|
||||
? "bg-primary/10 border-primary/30"
|
||||
: "hover:bg-muted/50 border-border",
|
||||
disabled && "opacity-50 cursor-not-allowed"
|
||||
)}
|
||||
>
|
||||
<Checkbox
|
||||
id={`preset-engine-${engine.id}`}
|
||||
checked={isSelected}
|
||||
onCheckedChange={(checked) => {
|
||||
handleEngineToggle(engine.id, checked as boolean)
|
||||
}}
|
||||
disabled={disabled}
|
||||
className="h-4 w-4"
|
||||
/>
|
||||
<span className="text-sm">{engine.name}</span>
|
||||
</label>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Empty state */}
|
||||
{!selectedPresetId && (
|
||||
<div className="flex flex-col items-center justify-center py-12 text-muted-foreground">
|
||||
<Server className="h-12 w-12 mb-4 opacity-50" />
|
||||
<p className="text-sm">{t("presets.selectHint")}</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -58,14 +58,6 @@ subdomain_discovery:
|
||||
enabled: true
|
||||
timeout: 600 # 10 minutes (required)
|
||||
|
||||
amass_passive:
|
||||
enabled: true
|
||||
timeout: 600 # 10 minutes (required)
|
||||
|
||||
amass_active:
|
||||
enabled: true
|
||||
timeout: 1800 # 30 minutes (required)
|
||||
|
||||
sublist3r:
|
||||
enabled: true
|
||||
timeout: 900 # 15 minutes (required)
|
||||
@@ -96,6 +88,7 @@ site_scan:
|
||||
httpx:
|
||||
enabled: true
|
||||
timeout: auto # Auto calculate
|
||||
# screenshot: true # Enable site screenshot (requires Chromium)
|
||||
|
||||
|
||||
# ==================== Directory Scan ====================
|
||||
@@ -212,16 +205,11 @@ url_fetch:
|
||||
await new Promise(resolve => setTimeout(resolve, 1000))
|
||||
}
|
||||
|
||||
toast.success(tToast("configSaveSuccess"), {
|
||||
description: tToast("configSaveSuccessDesc", { name: engine.name }),
|
||||
})
|
||||
setHasChanges(false)
|
||||
onOpenChange(false)
|
||||
} catch (error) {
|
||||
console.error("Failed to save YAML config:", error)
|
||||
toast.error(tToast("configSaveFailed"), {
|
||||
description: error instanceof Error ? error.message : tToast("unknownError"),
|
||||
})
|
||||
// Error toast is handled by useUpdateEngine hook
|
||||
} finally {
|
||||
setIsSubmitting(false)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user