Compare commits

..

39 Commits

Author SHA1 Message Date
yyhuni
53ba03d1e5 支持kali 2026-01-08 10:14:12 +08:00
github-actions[bot]
89c44ebd05 chore: bump version to v1.5.2 2026-01-08 00:20:11 +00:00
yyhuni
e0e3419edb chore(docker): improve worker dockerfile reliability with retry mechanism
- Add retry mechanism for apt-get install to handle ARM64 mirror sync delays
- Use --no-install-recommends flag to reduce image size and installation time
- Split apt-get update and install commands for better layer caching
- Add fallback installation logic for packages in case of initial failure
- Include explanatory comment about ARM64 ports.ubuntu.com potential delays
- Maintain compatibility with both ARM64 and AMD64 architectures
2026-01-08 08:14:24 +08:00
yyhuni
52ee4684a7 chore(docker): add apt-get update before playwright dependencies
- Add apt-get update before installing playwright chromium dependencies
- Ensures package lists are refreshed before installing system dependencies
- Prevents potential package installation failures in Docker builds
2026-01-08 08:09:21 +08:00
yyhuni
ce8cebf11d chore(frontend): update pnpm-lock.yaml with @radix-ui/react-hover-card
- Add @radix-ui/react-hover-card@1.1.15 package resolution entry
- Add package snapshot with all required dependencies and peer dependencies
- Update lock file to reflect new hover card component dependency
- Ensures consistent dependency management across the frontend environment
2026-01-08 07:57:58 +08:00
yyhuni
ec006d8f54 chore(frontend): add @radix-ui/react-hover-card dependency
- Add @radix-ui/react-hover-card v1.1.6 to project dependencies
- Enables hover card UI component functionality for improved user interactions
- Maintains consistency with existing Radix UI component library usage
2026-01-08 07:56:07 +08:00
yyhuni
48976a570f docs: update README with screenshot feature and sponsorship info
- Add screenshot feature documentation to features section with Playwright details
- Include WebP format compression benefits and multi-source URL support
- Add screenshot stage to scan flow architecture diagram with styling
- Add fingerprint library table with counts for public distribution
- Add sponsorship section with WeChat Pay and Alipay QR codes
- Add sponsor appreciation table
- Update frontend dependencies with @radix-ui/react-visually-hidden package
- Remove redundant installation speed note from mirror parameter documentation
- Clean up demo link formatting in online demo section
2026-01-08 07:54:31 +08:00
yyhuni
5da7229873 feat(scan-overview): add yaml configuration tab and improve logs layout
- Add yaml_configuration field to ScanHistorySerializer for backend exposure
- Implement tabbed interface with Logs and Configuration tabs in scan overview
- Add YamlEditor component to display scan configuration in read-only mode
- Refactor logs section to show status bar only when logs tab is active
- Move auto-refresh toggle to logs tab header for better UX
- Add padding to stage progress items for improved visual alignment
- Add internationalization strings for new UI elements (en and zh)
- Update ScanHistory type to include yamlConfiguration field
- Improve tab switching state management with activeTab state
2026-01-08 07:31:54 +08:00
yyhuni
8bb737a9fa feat(scan-history): add auto-refresh toggle and improve layout
- Add auto-refresh toggle switch to scan logs section for manual control
- Implement flexible polling based on auto-refresh state and scan status
- Restructure scan overview layout to use left-right split (stages + logs)
- Move stage progress to left column with vulnerability statistics
- Implement scrollable logs panel on right side with proper height constraints
- Update component imports to use Switch and Label instead of Button
- Add full-height flex layout to parent containers for proper scrolling
- Refactor grid layout from 2-column to fixed-width left + flexible right
- Update translations for new UI elements and labels
- Improve responsive design with better flex constraints and min-height handling
2026-01-07 23:30:27 +08:00
yyhuni
2d018d33f3 优化扫描历史详细页面 2026-01-07 22:44:46 +08:00
yyhuni
0c07cc8497 refactor(scan-flows): simplify logger calls by splitting multiline strings
- Split multiline logger.info() calls into separate single-line calls in initiate_scan_flow.py
- Improved log readability by removing string concatenation with newlines and separators
- Refactored 6 logger.info() calls across sequential, parallel, and completion stages
- Updated subdomain_discovery_flow.py to use consistent single-line logger pattern
- Maintains same log output while improving code maintainability and consistency
2026-01-07 22:21:50 +08:00
yyhuni
225b039985 style(system-logs): adjust log level filter dropdown width
- Increase SelectTrigger width from 100px to 130px for better label visibility
- Improve UI consistency in log toolbar component
- Prevent text truncation in log level filter dropdown
2026-01-07 22:17:07 +08:00
yyhuni
d1624627bc 一级tab加图标 2026-01-07 22:14:42 +08:00
yyhuni
7bb15e4ae4 增加:截图功能 2026-01-07 22:10:51 +08:00
github-actions[bot]
8e8cc29669 chore: bump version to v1.4.1 2026-01-07 01:33:29 +00:00
yyhuni
d6d5338acb 增加资产删除功能 2026-01-07 09:29:31 +08:00
yyhuni
c521bdb511 重构:回退逻辑 2026-01-07 08:45:27 +08:00
yyhuni
abf2d95f6f feat(targets): increase max batch size for target creation from 1000 to 5000
- Update MAX_BATCH_SIZE constant in BatchCreateTargetSerializer from 1000 to 5000
- Increase batch creation limit to support larger bulk operations
- Update documentation comment to reflect new limit
- Allows users to create up to 5000 targets in a single batch operation
2026-01-06 20:39:31 +08:00
github-actions[bot]
ab58cf0d85 chore: bump version to v1.4.0 2026-01-06 09:31:29 +00:00
yyhuni
fb0111adf2 Merge branch 'dev' 2026-01-06 17:27:35 +08:00
yyhuni
161ee9a2b1 Merge branch 'dev' 2026-01-06 17:27:16 +08:00
yyhuni
0cf75585d5 docs: 添加黑名单过滤功能说明到 README 2026-01-06 17:25:31 +08:00
yyhuni
1d8d5f51d9 feat(blacklist): add mock data and service integration for blacklist management
- Create new blacklist mock data module with global and target-specific patterns
- Add mock functions for getting and updating global blacklist rules
- Add mock functions for getting and updating target-specific blacklist rules
- Integrate mock blacklist endpoints into global-blacklist.service.ts
- Integrate mock blacklist endpoints into target.service.ts
- Export blacklist mock functions from main mock index
- Enable testing of blacklist management UI without backend API
2026-01-06 17:08:51 +08:00
github-actions[bot]
3f8de07c8c chore: bump version to v1.4.0-dev 2026-01-06 09:02:31 +00:00
yyhuni
cd5c2b9f11 chore(notifications): remove test notification endpoint
- Remove test notification route from URL patterns
- Delete notifications_test view function and associated logic
- Clean up unused test endpoint that was used for development purposes
- Simplify notification API surface by removing non-production code
2026-01-06 16:57:29 +08:00
yyhuni
54786c22dd feat(scan): increase max batch size for quick scan operations
- Increase MAX_BATCH_SIZE from 1000 to 5000 in QuickScanSerializer
- Allows processing of larger batch scans in a single operation
- Improves throughput for bulk scanning workflows
2026-01-06 16:55:28 +08:00
yyhuni
d468f975ab feat(scan): implement fallback chain for endpoint URL export
- Add fallback chain for URL data sources: Endpoint → WebSite → default generation
- Import WebSite model and Path utility for enhanced file handling
- Create output directory automatically if it doesn't exist
- Add "source" field to return value indicating data origin (endpoint/website/default)
- Update docstring to document the three-tier fallback priority system
- Implement sequential export attempts with logging at each fallback stage
- Improve error handling and data source transparency for endpoint exports
2026-01-06 16:30:42 +08:00
yyhuni
a85a12b8ad feat(asset): create incremental materialized views for asset search
- Add pg_ivm extension for incremental materialized view maintenance
- Create asset_search_view for Website model with optimized columns for full-text search
- Create endpoint_search_view for Endpoint model with matching search schema
- Add database indexes on host, url, title, status_code, and created_at columns for both views
- Enable high-performance asset search queries with automatic view maintenance
2026-01-06 16:22:24 +08:00
yyhuni
a8b0d97b7b feat(targets): update navigation routes and enhance add button UI
- Change target detail navigation route from `/website/` to `/overview/`
- Update TargetNameCell click handler to use new overview route
- Update TargetRowActions onView handler to use new overview route
- Add IconPlus icon import from @tabler/icons-react
- Add icon to create target button for improved visual clarity
- Improves navigation consistency and button affordance in targets table
2026-01-06 16:14:54 +08:00
yyhuni
b8504921c2 feat(fingerprints): add JSONL format support for Goby fingerprint imports
- Add support for JSONL format parsing in addition to standard JSON for Goby fingerprints
- Update GobyFingerprintService to validate both standard format (name/logic/rule) and JSONL format (product/rule)
- Implement _parse_json_content() method to handle both JSON and JSONL file formats with proper error handling
- Add JSONL parsing logic in frontend import dialog with per-line validation and error reporting
- Update file import endpoint documentation to indicate JSONL format support
- Improve error messages for encoding and parsing failures to aid user debugging
- Enable seamless import of Goby fingerprint data from multiple source formats
2026-01-06 16:10:14 +08:00
yyhuni
ecfc1822fb style(target): update vulnerability icon color to muted foreground
- Change ShieldAlert icon color from red-500 to muted-foreground in target overview
- Improves visual consistency with design system color palette
- Reduces visual emphasis on vulnerability section for better UI balance
2026-01-06 12:01:59 +08:00
github-actions[bot]
81633642e6 chore: bump version to v1.3.16-dev 2026-01-06 03:55:16 +00:00
yyhuni
d1ec9b7f27 feat(settings): add global blacklist management page and UI integration
- Add new global blacklist settings page with pattern management interface
- Create useGlobalBlacklist and useUpdateGlobalBlacklist React Query hooks for data fetching and mutations
- Implement global-blacklist.service.ts with API integration for blacklist operations
- Add Global Blacklist navigation item to app sidebar with Ban icon
- Add internationalization support for blacklist UI with English and Chinese translations
- Include pattern matching rules documentation (domain wildcards, keywords, IP addresses, CIDR ranges)
- Add loading states, error handling, and success/error toast notifications
- Implement textarea input with change tracking and save button state management
2026-01-06 11:50:31 +08:00
yyhuni
2a3d9b4446 feat(target): add initiate scan button and improve overview layout
- Add "Initiate Scan" button to target overview header with Play icon
- Implement InitiateScanDialog component integration for quick scan initiation
- Improve scheduled scans card layout with flexbox for better vertical spacing
- Reduce displayed scheduled scans from 3 to 2 items for better UI balance
- Enhance vulnerability statistics card styling with proper flex layout
- Add state management for scan dialog open/close functionality
- Update i18n translations (en.json, zh.json) with "initiateScan" label
- Refactor target info section to accommodate new action button with justify-between layout
- Improve empty state centering in scheduled scans card using flex layout
2026-01-06 11:10:47 +08:00
yyhuni
9b63203b5a refactor(migrations,frontend,backend): reorganize app structure and enhance target management UI
- Consolidate common migrations into dedicated common app module
- Remove asset search materialized view migration (0002) and simplify migration structure
- Reorganize target detail page with new overview and settings sub-routes
- Add target overview component displaying key asset information
- Add target settings component for configuration management
- Enhance scan history UI with improved data table and column definitions
- Update scheduled scan dialog with better form handling
- Refactor target service with improved API integration
- Update scan hooks (use-scans, use-scheduled-scans) with better state management
- Add internationalization strings for new target management features
- Update Docker initialization and startup scripts for new app structure
- Bump Django to 5.2.7 and update dependencies in requirements.txt
- Add WeChat group contact information to README
- Improve UI tabs component with better accessibility and styling
2026-01-06 10:42:38 +08:00
yyhuni
6ff86e14ec Update README.md 2026-01-06 09:59:55 +08:00
yyhuni
4c1282e9bb 完成黑名单后端逻辑 2026-01-05 23:26:50 +08:00
yyhuni
ba3a9b709d feat(system-logs): enhance ANSI log viewer with log level colorization
- Add LOG_LEVEL_COLORS configuration mapping for DEBUG, INFO, WARNING, WARN, ERROR, and CRITICAL levels
- Implement hasAnsiCodes() function to detect presence of ANSI escape sequences in log content
- Add colorizeLogContent() function to parse plain text logs and apply color styling based on log levels
- Support dual-mode log parsing: ANSI color codes and plain text log level detection
- Rename converter to ansiConverter for clarity and consistency
- Change newline handling from true to false for manual line break control
- Apply color-coded styling to timestamps (gray), log levels (level-specific colors), and messages
- Add bold font-weight styling for CRITICAL level logs for better visibility
2026-01-05 16:27:31 +08:00
github-actions[bot]
283b28b46a chore: bump version to v1.3.15-dev 2026-01-05 02:05:29 +00:00
145 changed files with 6959 additions and 1142 deletions

View File

@@ -27,7 +27,7 @@
## 🌐 在线 Demo
👉 **[https://xingrin.vercel.app/](https://xingrin.vercel.app/)**
**[https://xingrin.vercel.app/](https://xingrin.vercel.app/)**
> ⚠️ 仅用于 UI 展示,未接入后端数据库
@@ -69,11 +69,21 @@
- **自定义流程** - YAML 配置扫描流程,灵活编排
- **定时扫描** - Cron 表达式配置,自动化周期扫描
### 🚫 黑名单过滤
- **两层黑名单** - 全局黑名单 + Target 级黑名单,灵活控制扫描范围
- **智能规则识别** - 自动识别域名通配符(`*.gov`、IP、CIDR 网段
### 🔖 指纹识别
- **多源指纹库** - 内置 EHole、Goby、Wappalyzer、Fingers、FingerPrintHub、ARL 等 2.7W+ 指纹规则
- **自动识别** - 扫描流程自动执行,识别 Web 应用技术栈
- **指纹管理** - 支持查询、导入、导出指纹规则
### 📸 站点截图
- **自动截图** - 使用 Playwright 对发现的网站自动截图
- **WebP 格式** - 高压缩比存储500k图片压缩存储只占几十K
- **多来源支持** - 支持对 Websites、Endpoints 等不同来源的 URL 截图
- **资产关联** - 截图自动同步到资产表,方便查看
#### 扫描流程架构
完整的扫描流程包括子域名发现、端口扫描、站点发现、指纹识别、URL 收集、目录扫描、漏洞扫描等阶段
@@ -95,6 +105,7 @@ flowchart LR
direction TB
URL["URL 收集<br/>waymore, katana"]
DIR["目录扫描<br/>ffuf"]
SCREENSHOT["站点截图<br/>playwright"]
end
subgraph STAGE3["阶段 3: 漏洞检测"]
@@ -119,6 +130,7 @@ flowchart LR
style FINGER fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
style URL fill:#bb8fce,stroke:#9b59b6,stroke-width:1px,color:#fff
style DIR fill:#bb8fce,stroke:#9b59b6,stroke-width:1px,color:#fff
style SCREENSHOT fill:#bb8fce,stroke:#9b59b6,stroke-width:1px,color:#fff
style VULN fill:#f0b27a,stroke:#e67e22,stroke-width:1px,color:#fff
```
@@ -225,7 +237,6 @@ sudo ./install.sh --mirror
> **💡 --mirror 参数说明**
> - 自动配置 Docker 镜像加速(国内镜像源)
> - 加速 Git 仓库克隆Nuclei 模板等)
> - 大幅提升安装速度,避免网络超时
### 访问服务
@@ -254,9 +265,36 @@ sudo ./uninstall.sh
## 📧 联系
- 微信公众号: **塔罗安全学苑**
- 微信群去公众号底下的菜单,有个交流群,点击就可以看到了,链接过期可以私信我拉你
<img src="docs/wechat-qrcode.png" alt="微信公众号" width="200">
### 🎁 关注公众号免费领取指纹库
| 指纹库 | 数量 |
|--------|------|
| ehole.json | 21,977 |
| ARL.yaml | 9,264 |
| goby.json | 7,086 |
| FingerprintHub.json | 3,147 |
> 💡 关注公众号回复「指纹」即可获取
## ☕ 赞助支持
如果这个项目对你有帮助谢谢请我能喝杯蜜雪冰城你的star和赞助是我免费更新的动力
<p>
<img src="docs/wx_pay.jpg" alt="微信支付" width="200">
<img src="docs/zfb_pay.jpg" alt="支付宝" width="200">
</p>
### 🙏 感谢以下赞助
| 昵称 | 金额 |
|------|------|
| X闭关中 | ¥88 |
## ⚠️ 免责声明

View File

@@ -1 +1 @@
v1.3.14
v1.5.2

View File

@@ -1,4 +1,4 @@
# Generated by Django 5.2.7 on 2026-01-02 04:45
# Generated by Django 5.2.7 on 2026-01-06 00:55
import django.contrib.postgres.fields
import django.contrib.postgres.indexes

View File

@@ -1,196 +0,0 @@
"""
创建资产搜索 IMMV增量维护物化视图
使用 pg_ivm 扩展创建 IMMV数据变更时自动增量更新无需手动刷新。
包含:
1. asset_search_view - Website 搜索视图
2. endpoint_search_view - Endpoint 搜索视图
重要限制:
⚠️ pg_ivm 不支持数组类型字段ArrayField因为其使用 anyarray 伪类型进行比较时,
PostgreSQL 无法确定空数组的元素类型,导致错误:
"cannot determine element type of \"anyarray\" argument"
因此,所有 ArrayField 字段tech, matched_gf_patterns 等)已从 IMMV 中移除,
搜索时通过 JOIN 原表获取。
如需添加新的数组字段,请:
1. 不要将其包含在 IMMV 视图中
2. 在搜索服务中通过 JOIN 原表获取
"""
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('asset', '0001_initial'),
]
operations = [
# 1. 确保 pg_trgm 扩展已启用(用于文本模糊搜索索引)
migrations.RunSQL(
sql="CREATE EXTENSION IF NOT EXISTS pg_trgm;",
reverse_sql="-- pg_trgm extension kept for other uses"
),
# 2. 确保 pg_ivm 扩展已启用(用于 IMMV 增量维护)
migrations.RunSQL(
sql="CREATE EXTENSION IF NOT EXISTS pg_ivm;",
reverse_sql="-- pg_ivm extension kept for other uses"
),
# ==================== Website IMMV ====================
# 2. 创建 asset_search_view IMMV
# ⚠️ 注意:不包含 w.tech 数组字段pg_ivm 不支持 ArrayField
# 数组字段通过 search_service.py 中 JOIN website 表获取
migrations.RunSQL(
sql="""
SELECT pgivm.create_immv('asset_search_view', $$
SELECT
w.id,
w.url,
w.host,
w.title,
w.status_code,
w.response_headers,
w.response_body,
w.content_type,
w.content_length,
w.webserver,
w.location,
w.vhost,
w.created_at,
w.target_id
FROM website w
$$);
""",
reverse_sql="SELECT pgivm.drop_immv('asset_search_view');"
),
# 3. 创建 asset_search_view 索引
migrations.RunSQL(
sql="""
-- 唯一索引
CREATE UNIQUE INDEX IF NOT EXISTS asset_search_view_id_idx
ON asset_search_view (id);
-- host 模糊搜索索引
CREATE INDEX IF NOT EXISTS asset_search_view_host_trgm_idx
ON asset_search_view USING gin (host gin_trgm_ops);
-- title 模糊搜索索引
CREATE INDEX IF NOT EXISTS asset_search_view_title_trgm_idx
ON asset_search_view USING gin (title gin_trgm_ops);
-- url 模糊搜索索引
CREATE INDEX IF NOT EXISTS asset_search_view_url_trgm_idx
ON asset_search_view USING gin (url gin_trgm_ops);
-- response_headers 模糊搜索索引
CREATE INDEX IF NOT EXISTS asset_search_view_headers_trgm_idx
ON asset_search_view USING gin (response_headers gin_trgm_ops);
-- response_body 模糊搜索索引
CREATE INDEX IF NOT EXISTS asset_search_view_body_trgm_idx
ON asset_search_view USING gin (response_body gin_trgm_ops);
-- status_code 索引
CREATE INDEX IF NOT EXISTS asset_search_view_status_idx
ON asset_search_view (status_code);
-- created_at 排序索引
CREATE INDEX IF NOT EXISTS asset_search_view_created_idx
ON asset_search_view (created_at DESC);
""",
reverse_sql="""
DROP INDEX IF EXISTS asset_search_view_id_idx;
DROP INDEX IF EXISTS asset_search_view_host_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_title_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_url_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_headers_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_body_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_status_idx;
DROP INDEX IF EXISTS asset_search_view_created_idx;
"""
),
# ==================== Endpoint IMMV ====================
# 4. 创建 endpoint_search_view IMMV
# ⚠️ 注意:不包含 e.tech 和 e.matched_gf_patterns 数组字段pg_ivm 不支持 ArrayField
# 数组字段通过 search_service.py 中 JOIN endpoint 表获取
migrations.RunSQL(
sql="""
SELECT pgivm.create_immv('endpoint_search_view', $$
SELECT
e.id,
e.url,
e.host,
e.title,
e.status_code,
e.response_headers,
e.response_body,
e.content_type,
e.content_length,
e.webserver,
e.location,
e.vhost,
e.created_at,
e.target_id
FROM endpoint e
$$);
""",
reverse_sql="SELECT pgivm.drop_immv('endpoint_search_view');"
),
# 5. 创建 endpoint_search_view 索引
migrations.RunSQL(
sql="""
-- 唯一索引
CREATE UNIQUE INDEX IF NOT EXISTS endpoint_search_view_id_idx
ON endpoint_search_view (id);
-- host 模糊搜索索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_host_trgm_idx
ON endpoint_search_view USING gin (host gin_trgm_ops);
-- title 模糊搜索索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_title_trgm_idx
ON endpoint_search_view USING gin (title gin_trgm_ops);
-- url 模糊搜索索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_url_trgm_idx
ON endpoint_search_view USING gin (url gin_trgm_ops);
-- response_headers 模糊搜索索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_headers_trgm_idx
ON endpoint_search_view USING gin (response_headers gin_trgm_ops);
-- response_body 模糊搜索索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_body_trgm_idx
ON endpoint_search_view USING gin (response_body gin_trgm_ops);
-- status_code 索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_status_idx
ON endpoint_search_view (status_code);
-- created_at 排序索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_created_idx
ON endpoint_search_view (created_at DESC);
""",
reverse_sql="""
DROP INDEX IF EXISTS endpoint_search_view_id_idx;
DROP INDEX IF EXISTS endpoint_search_view_host_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_title_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_url_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_headers_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_body_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_status_idx;
DROP INDEX IF EXISTS endpoint_search_view_created_idx;
"""
),
]

View File

@@ -0,0 +1,104 @@
"""
创建资产搜索物化视图(使用 pg_ivm 增量维护)
这些视图用于资产搜索功能,提供高性能的全文搜索能力。
"""
from django.db import migrations
class Migration(migrations.Migration):
"""创建资产搜索所需的增量物化视图"""
dependencies = [
('asset', '0001_initial'),
]
operations = [
# 1. 确保 pg_ivm 扩展已安装
migrations.RunSQL(
sql="CREATE EXTENSION IF NOT EXISTS pg_ivm;",
reverse_sql="DROP EXTENSION IF EXISTS pg_ivm;",
),
# 2. 创建 Website 搜索视图
# 注意pg_ivm 不支持 ArrayField所以 tech 字段需要从原表 JOIN 获取
migrations.RunSQL(
sql="""
SELECT pgivm.create_immv('asset_search_view', $$
SELECT
w.id,
w.url,
w.host,
w.title,
w.status_code,
w.response_headers,
w.response_body,
w.content_type,
w.content_length,
w.webserver,
w.location,
w.vhost,
w.created_at,
w.target_id
FROM website w
$$);
""",
reverse_sql="DROP TABLE IF EXISTS asset_search_view CASCADE;",
),
# 3. 创建 Endpoint 搜索视图
migrations.RunSQL(
sql="""
SELECT pgivm.create_immv('endpoint_search_view', $$
SELECT
e.id,
e.url,
e.host,
e.title,
e.status_code,
e.response_headers,
e.response_body,
e.content_type,
e.content_length,
e.webserver,
e.location,
e.vhost,
e.created_at,
e.target_id
FROM endpoint e
$$);
""",
reverse_sql="DROP TABLE IF EXISTS endpoint_search_view CASCADE;",
),
# 4. 为搜索视图创建索引(加速查询)
migrations.RunSQL(
sql=[
# Website 搜索视图索引
"CREATE INDEX IF NOT EXISTS asset_search_view_host_idx ON asset_search_view (host);",
"CREATE INDEX IF NOT EXISTS asset_search_view_url_idx ON asset_search_view (url);",
"CREATE INDEX IF NOT EXISTS asset_search_view_title_idx ON asset_search_view (title);",
"CREATE INDEX IF NOT EXISTS asset_search_view_status_idx ON asset_search_view (status_code);",
"CREATE INDEX IF NOT EXISTS asset_search_view_created_idx ON asset_search_view (created_at DESC);",
# Endpoint 搜索视图索引
"CREATE INDEX IF NOT EXISTS endpoint_search_view_host_idx ON endpoint_search_view (host);",
"CREATE INDEX IF NOT EXISTS endpoint_search_view_url_idx ON endpoint_search_view (url);",
"CREATE INDEX IF NOT EXISTS endpoint_search_view_title_idx ON endpoint_search_view (title);",
"CREATE INDEX IF NOT EXISTS endpoint_search_view_status_idx ON endpoint_search_view (status_code);",
"CREATE INDEX IF NOT EXISTS endpoint_search_view_created_idx ON endpoint_search_view (created_at DESC);",
],
reverse_sql=[
"DROP INDEX IF EXISTS asset_search_view_host_idx;",
"DROP INDEX IF EXISTS asset_search_view_url_idx;",
"DROP INDEX IF EXISTS asset_search_view_title_idx;",
"DROP INDEX IF EXISTS asset_search_view_status_idx;",
"DROP INDEX IF EXISTS asset_search_view_created_idx;",
"DROP INDEX IF EXISTS endpoint_search_view_host_idx;",
"DROP INDEX IF EXISTS endpoint_search_view_url_idx;",
"DROP INDEX IF EXISTS endpoint_search_view_title_idx;",
"DROP INDEX IF EXISTS endpoint_search_view_status_idx;",
"DROP INDEX IF EXISTS endpoint_search_view_created_idx;",
],
),
]

View File

@@ -0,0 +1,53 @@
# Generated by Django 5.2.7 on 2026-01-07 02:21
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('asset', '0002_create_search_views'),
('scan', '0001_initial'),
('targets', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Screenshot',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url', models.TextField(help_text='截图对应的 URL')),
('image', models.BinaryField(help_text='截图 WebP 二进制数据(压缩后)')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, help_text='更新时间')),
('target', models.ForeignKey(help_text='所属目标', on_delete=django.db.models.deletion.CASCADE, related_name='screenshots', to='targets.target')),
],
options={
'verbose_name': '截图',
'verbose_name_plural': '截图',
'db_table': 'screenshot',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['target'], name='screenshot_target__2f01f6_idx'), models.Index(fields=['-created_at'], name='screenshot_created_c0ad4b_idx')],
'constraints': [models.UniqueConstraint(fields=('target', 'url'), name='unique_screenshot_per_target')],
},
),
migrations.CreateModel(
name='ScreenshotSnapshot',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url', models.TextField(help_text='截图对应的 URL')),
('image', models.BinaryField(help_text='截图 WebP 二进制数据(压缩后)')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('scan', models.ForeignKey(help_text='所属的扫描任务', on_delete=django.db.models.deletion.CASCADE, related_name='screenshot_snapshots', to='scan.scan')),
],
options={
'verbose_name': '截图快照',
'verbose_name_plural': '截图快照',
'db_table': 'screenshot_snapshot',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['scan'], name='screenshot__scan_id_fb8c4d_idx'), models.Index(fields=['-created_at'], name='screenshot__created_804117_idx')],
'constraints': [models.UniqueConstraint(fields=('scan', 'url'), name='unique_screenshot_per_scan_snapshot')],
},
),
]

View File

@@ -0,0 +1,23 @@
# Generated by Django 5.2.7 on 2026-01-07 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('asset', '0003_add_screenshot_models'),
]
operations = [
migrations.AddField(
model_name='screenshot',
name='status_code',
field=models.SmallIntegerField(blank=True, help_text='HTTP 响应状态码', null=True),
),
migrations.AddField(
model_name='screenshotsnapshot',
name='status_code',
field=models.SmallIntegerField(blank=True, help_text='HTTP 响应状态码', null=True),
),
]

View File

@@ -20,6 +20,12 @@ from .snapshot_models import (
VulnerabilitySnapshot,
)
# 截图模型
from .screenshot_models import (
Screenshot,
ScreenshotSnapshot,
)
# 统计模型
from .statistics_models import AssetStatistics, StatisticsHistory
@@ -39,6 +45,9 @@ __all__ = [
'HostPortMappingSnapshot',
'EndpointSnapshot',
'VulnerabilitySnapshot',
# 截图模型
'Screenshot',
'ScreenshotSnapshot',
# 统计模型
'AssetStatistics',
'StatisticsHistory',

View File

@@ -0,0 +1,80 @@
from django.db import models
class ScreenshotSnapshot(models.Model):
"""
截图快照
记录:某次扫描中捕获的网站截图
"""
id = models.AutoField(primary_key=True)
scan = models.ForeignKey(
'scan.Scan',
on_delete=models.CASCADE,
related_name='screenshot_snapshots',
help_text='所属的扫描任务'
)
url = models.TextField(help_text='截图对应的 URL')
status_code = models.SmallIntegerField(null=True, blank=True, help_text='HTTP 响应状态码')
image = models.BinaryField(help_text='截图 WebP 二进制数据(压缩后)')
created_at = models.DateTimeField(auto_now_add=True, help_text='创建时间')
class Meta:
db_table = 'screenshot_snapshot'
verbose_name = '截图快照'
verbose_name_plural = '截图快照'
ordering = ['-created_at']
indexes = [
models.Index(fields=['scan']),
models.Index(fields=['-created_at']),
]
constraints = [
models.UniqueConstraint(
fields=['scan', 'url'],
name='unique_screenshot_per_scan_snapshot'
),
]
def __str__(self):
return f'{self.url} (Scan #{self.scan_id})'
class Screenshot(models.Model):
"""
截图资产
存储:目标的最新截图(从快照同步)
"""
id = models.AutoField(primary_key=True)
target = models.ForeignKey(
'targets.Target',
on_delete=models.CASCADE,
related_name='screenshots',
help_text='所属目标'
)
url = models.TextField(help_text='截图对应的 URL')
status_code = models.SmallIntegerField(null=True, blank=True, help_text='HTTP 响应状态码')
image = models.BinaryField(help_text='截图 WebP 二进制数据(压缩后)')
created_at = models.DateTimeField(auto_now_add=True, help_text='创建时间')
updated_at = models.DateTimeField(auto_now=True, help_text='更新时间')
class Meta:
db_table = 'screenshot'
verbose_name = '截图'
verbose_name_plural = '截图'
ordering = ['-created_at']
indexes = [
models.Index(fields=['target']),
models.Index(fields=['-created_at']),
]
constraints = [
models.UniqueConstraint(
fields=['target', 'url'],
name='unique_screenshot_per_target'
),
]
def __str__(self):
return f'{self.url} (Target #{self.target_id})'

View File

@@ -7,6 +7,7 @@ from .models.snapshot_models import (
EndpointSnapshot,
VulnerabilitySnapshot,
)
from .models.screenshot_models import Screenshot, ScreenshotSnapshot
# 注意IPAddress 和 Port 模型已被重构为 HostPortMapping
@@ -290,3 +291,23 @@ class EndpointSnapshotSerializer(serializers.ModelSerializer):
'created_at',
]
read_only_fields = fields
# ==================== 截图序列化器 ====================
class ScreenshotListSerializer(serializers.ModelSerializer):
"""截图资产列表序列化器(不包含 image 字段)"""
class Meta:
model = Screenshot
fields = ['id', 'url', 'status_code', 'created_at', 'updated_at']
read_only_fields = fields
class ScreenshotSnapshotListSerializer(serializers.ModelSerializer):
"""截图快照列表序列化器(不包含 image 字段)"""
class Meta:
model = ScreenshotSnapshot
fields = ['id', 'url', 'status_code', 'created_at']
read_only_fields = fields

View File

@@ -0,0 +1,186 @@
"""
Playwright 截图服务
使用 Playwright 异步批量捕获网站截图
"""
import asyncio
import logging
from typing import Optional, AsyncGenerator
logger = logging.getLogger(__name__)
class PlaywrightScreenshotService:
"""Playwright 截图服务 - 异步多 Page 并发截图"""
# 内置默认值(不暴露给用户)
DEFAULT_VIEWPORT_WIDTH = 1920
DEFAULT_VIEWPORT_HEIGHT = 1080
DEFAULT_TIMEOUT = 30000 # 毫秒
DEFAULT_JPEG_QUALITY = 85
def __init__(
self,
viewport_width: int = DEFAULT_VIEWPORT_WIDTH,
viewport_height: int = DEFAULT_VIEWPORT_HEIGHT,
timeout: int = DEFAULT_TIMEOUT,
concurrency: int = 5
):
"""
初始化 Playwright 截图服务
Args:
viewport_width: 视口宽度(像素)
viewport_height: 视口高度(像素)
timeout: 页面加载超时时间(毫秒)
concurrency: 并发截图数
"""
self.viewport_width = viewport_width
self.viewport_height = viewport_height
self.timeout = timeout
self.concurrency = concurrency
async def capture_screenshot(self, url: str, page) -> tuple[Optional[bytes], Optional[int]]:
"""
捕获单个 URL 的截图
Args:
url: 目标 URL
page: Playwright Page 对象
Returns:
(screenshot_bytes, status_code) 元组
- screenshot_bytes: JPEG 格式的截图字节数据,失败返回 None
- status_code: HTTP 响应状态码,失败返回 None
"""
status_code = None
try:
# 尝试加载页面,即使返回错误状态码也继续截图
try:
response = await page.goto(url, timeout=self.timeout, wait_until='networkidle')
if response:
status_code = response.status
except Exception as goto_error:
# 页面加载失败4xx/5xx 或其他错误),但页面可能已部分渲染
# 仍然尝试截图以捕获错误页面
logger.debug("页面加载异常但尝试截图: %s, 错误: %s", url, str(goto_error)[:50])
# 尝试截图(即使 goto 失败)
screenshot_bytes = await page.screenshot(
type='jpeg',
quality=self.DEFAULT_JPEG_QUALITY,
full_page=False
)
return (screenshot_bytes, status_code)
except asyncio.TimeoutError:
logger.warning("截图超时: %s", url)
return (None, None)
except Exception as e:
logger.warning("截图失败: %s, 错误: %s", url, str(e)[:100])
return (None, None)
async def _capture_with_semaphore(
self,
url: str,
context,
semaphore: asyncio.Semaphore
) -> tuple[str, Optional[bytes], Optional[int]]:
"""
使用信号量控制并发的截图任务
Args:
url: 目标 URL
context: Playwright BrowserContext
semaphore: 并发控制信号量
Returns:
(url, screenshot_bytes, status_code) 元组
"""
async with semaphore:
page = await context.new_page()
try:
screenshot_bytes, status_code = await self.capture_screenshot(url, page)
return (url, screenshot_bytes, status_code)
finally:
await page.close()
async def capture_batch(
self,
urls: list[str]
) -> AsyncGenerator[tuple[str, Optional[bytes], Optional[int]], None]:
"""
批量捕获截图(异步生成器)
使用单个 BrowserContext + 多 Page 并发模式
通过 Semaphore 控制并发数
Args:
urls: URL 列表
Yields:
(url, screenshot_bytes, status_code) 元组
"""
if not urls:
return
from playwright.async_api import async_playwright
async with async_playwright() as p:
# 启动浏览器headless 模式)
browser = await p.chromium.launch(
headless=True,
args=[
'--no-sandbox',
'--disable-setuid-sandbox',
'--disable-dev-shm-usage',
'--disable-gpu'
]
)
try:
# 创建单个 context
context = await browser.new_context(
viewport={
'width': self.viewport_width,
'height': self.viewport_height
},
ignore_https_errors=True,
user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
)
# 使用 Semaphore 控制并发
semaphore = asyncio.Semaphore(self.concurrency)
# 创建所有任务
tasks = [
self._capture_with_semaphore(url, context, semaphore)
for url in urls
]
# 使用 as_completed 实现流式返回
for coro in asyncio.as_completed(tasks):
result = await coro
yield result
await context.close()
finally:
await browser.close()
async def capture_batch_collect(
self,
urls: list[str]
) -> list[tuple[str, Optional[bytes], Optional[int]]]:
"""
批量捕获截图(收集所有结果)
Args:
urls: URL 列表
Returns:
[(url, screenshot_bytes, status_code), ...] 列表
"""
results = []
async for result in self.capture_batch(urls):
results.append(result)
return results

View File

@@ -0,0 +1,185 @@
"""
截图服务
负责截图的压缩、保存和同步
"""
import io
import logging
import os
from typing import Optional
from PIL import Image
logger = logging.getLogger(__name__)
class ScreenshotService:
"""截图服务 - 负责压缩、保存和同步"""
def __init__(self, max_width: int = 800, target_kb: int = 100):
"""
初始化截图服务
Args:
max_width: 最大宽度(像素)
target_kb: 目标文件大小KB
"""
self.max_width = max_width
self.target_kb = target_kb
def compress_screenshot(self, image_path: str) -> Optional[bytes]:
"""
压缩截图为 WebP 格式
Args:
image_path: PNG 截图文件路径
Returns:
压缩后的 WebP 二进制数据,失败返回 None
"""
if not os.path.exists(image_path):
logger.warning(f"截图文件不存在: {image_path}")
return None
try:
with Image.open(image_path) as img:
return self._compress_image(img)
except Exception as e:
logger.error(f"压缩截图失败: {image_path}, 错误: {e}")
return None
def compress_from_bytes(self, image_bytes: bytes) -> Optional[bytes]:
"""
从字节数据压缩截图为 WebP 格式
Args:
image_bytes: JPEG/PNG 图片字节数据
Returns:
压缩后的 WebP 二进制数据,失败返回 None
"""
if not image_bytes:
return None
try:
img = Image.open(io.BytesIO(image_bytes))
return self._compress_image(img)
except Exception as e:
logger.error(f"从字节压缩截图失败: {e}")
return None
def _compress_image(self, img: Image.Image) -> Optional[bytes]:
"""
压缩 PIL Image 对象为 WebP 格式
Args:
img: PIL Image 对象
Returns:
压缩后的 WebP 二进制数据
"""
try:
if img.mode in ('RGBA', 'P'):
img = img.convert('RGB')
width, height = img.size
if width > self.max_width:
ratio = self.max_width / width
new_size = (self.max_width, int(height * ratio))
img = img.resize(new_size, Image.Resampling.LANCZOS)
quality = 80
while quality >= 10:
buffer = io.BytesIO()
img.save(buffer, format='WEBP', quality=quality, method=6)
if len(buffer.getvalue()) <= self.target_kb * 1024:
return buffer.getvalue()
quality -= 10
return buffer.getvalue()
except Exception as e:
logger.error(f"压缩图片失败: {e}")
return None
def save_screenshot_snapshot(
self,
scan_id: int,
url: str,
image_data: bytes,
status_code: int | None = None
) -> bool:
"""
保存截图快照到 ScreenshotSnapshot 表
Args:
scan_id: 扫描 ID
url: 截图对应的 URL
image_data: 压缩后的图片二进制数据
status_code: HTTP 响应状态码
Returns:
是否保存成功
"""
from apps.asset.models import ScreenshotSnapshot
try:
ScreenshotSnapshot.objects.update_or_create(
scan_id=scan_id,
url=url,
defaults={'image': image_data, 'status_code': status_code}
)
return True
except Exception as e:
logger.error(f"保存截图快照失败: scan_id={scan_id}, url={url}, 错误: {e}")
return False
def sync_screenshots_to_asset(self, scan_id: int, target_id: int) -> int:
"""
将扫描的截图快照同步到资产表
Args:
scan_id: 扫描 ID
target_id: 目标 ID
Returns:
同步的截图数量
"""
from apps.asset.models import Screenshot, ScreenshotSnapshot
snapshots = ScreenshotSnapshot.objects.filter(scan_id=scan_id)
count = 0
for snapshot in snapshots:
try:
Screenshot.objects.update_or_create(
target_id=target_id,
url=snapshot.url,
defaults={
'image': snapshot.image,
'status_code': snapshot.status_code
}
)
count += 1
except Exception as e:
logger.error(f"同步截图到资产表失败: url={snapshot.url}, 错误: {e}")
logger.info(f"同步截图完成: scan_id={scan_id}, target_id={target_id}, 数量={count}")
return count
def process_and_save_screenshot(self, scan_id: int, url: str, image_path: str) -> bool:
"""
处理并保存截图(压缩 + 保存快照)
Args:
scan_id: 扫描 ID
url: 截图对应的 URL
image_path: PNG 截图文件路径
Returns:
是否处理成功
"""
image_data = self.compress_screenshot(image_path)
if image_data is None:
return False
return self.save_screenshot_snapshot(scan_id, url, image_data)

View File

@@ -12,17 +12,22 @@ from .views import (
AssetStatisticsViewSet,
AssetSearchView,
AssetSearchExportView,
EndpointViewSet,
HostPortMappingViewSet,
ScreenshotViewSet,
)
# 创建 DRF 路由器
router = DefaultRouter()
# 注册 ViewSet
# 注意IPAddress 模型已被重构为 HostPortMapping相关路由已移除
router.register(r'subdomains', SubdomainViewSet, basename='subdomain')
router.register(r'websites', WebSiteViewSet, basename='website')
router.register(r'directories', DirectoryViewSet, basename='directory')
router.register(r'endpoints', EndpointViewSet, basename='endpoint')
router.register(r'ip-addresses', HostPortMappingViewSet, basename='ip-address')
router.register(r'vulnerabilities', VulnerabilityViewSet, basename='vulnerability')
router.register(r'screenshots', ScreenshotViewSet, basename='screenshot')
router.register(r'statistics', AssetStatisticsViewSet, basename='asset-statistics')
urlpatterns = [

View File

@@ -18,6 +18,8 @@ from .asset_views import (
EndpointSnapshotViewSet,
HostPortMappingSnapshotViewSet,
VulnerabilitySnapshotViewSet,
ScreenshotViewSet,
ScreenshotSnapshotViewSet,
)
from .search_views import AssetSearchView, AssetSearchExportView
@@ -35,6 +37,8 @@ __all__ = [
'EndpointSnapshotViewSet',
'HostPortMappingSnapshotViewSet',
'VulnerabilitySnapshotViewSet',
'ScreenshotViewSet',
'ScreenshotSnapshotViewSet',
'AssetSearchView',
'AssetSearchExportView',
]

View File

@@ -260,6 +260,35 @@ class SubdomainViewSet(viewsets.ModelViewSet):
field_formatters=formatters
)
@action(detail=False, methods=['post'], url_path='bulk-delete')
def bulk_delete(self, request, **kwargs):
"""批量删除子域名
POST /api/assets/subdomains/bulk-delete/
请求体: {"ids": [1, 2, 3]}
响应: {"deletedCount": 3}
"""
ids = request.data.get('ids', [])
if not ids or not isinstance(ids, list):
return error_response(
code=ErrorCodes.VALIDATION_ERROR,
message='ids is required and must be a list',
status_code=status.HTTP_400_BAD_REQUEST
)
try:
from ..models import Subdomain
deleted_count, _ = Subdomain.objects.filter(id__in=ids).delete()
return success_response(data={'deletedCount': deleted_count})
except Exception as e:
logger.exception("批量删除子域名失败")
return error_response(
code=ErrorCodes.SERVER_ERROR,
message='Failed to delete subdomains',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
)
class WebSiteViewSet(viewsets.ModelViewSet):
"""站点管理 ViewSet
@@ -393,6 +422,35 @@ class WebSiteViewSet(viewsets.ModelViewSet):
field_formatters=formatters
)
@action(detail=False, methods=['post'], url_path='bulk-delete')
def bulk_delete(self, request, **kwargs):
"""批量删除网站
POST /api/assets/websites/bulk-delete/
请求体: {"ids": [1, 2, 3]}
响应: {"deletedCount": 3}
"""
ids = request.data.get('ids', [])
if not ids or not isinstance(ids, list):
return error_response(
code=ErrorCodes.VALIDATION_ERROR,
message='ids is required and must be a list',
status_code=status.HTTP_400_BAD_REQUEST
)
try:
from ..models import WebSite
deleted_count, _ = WebSite.objects.filter(id__in=ids).delete()
return success_response(data={'deletedCount': deleted_count})
except Exception as e:
logger.exception("批量删除网站失败")
return error_response(
code=ErrorCodes.SERVER_ERROR,
message='Failed to delete websites',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
)
class DirectoryViewSet(viewsets.ModelViewSet):
"""目录管理 ViewSet
@@ -521,6 +579,35 @@ class DirectoryViewSet(viewsets.ModelViewSet):
field_formatters=formatters
)
@action(detail=False, methods=['post'], url_path='bulk-delete')
def bulk_delete(self, request, **kwargs):
"""批量删除目录
POST /api/assets/directories/bulk-delete/
请求体: {"ids": [1, 2, 3]}
响应: {"deletedCount": 3}
"""
ids = request.data.get('ids', [])
if not ids or not isinstance(ids, list):
return error_response(
code=ErrorCodes.VALIDATION_ERROR,
message='ids is required and must be a list',
status_code=status.HTTP_400_BAD_REQUEST
)
try:
from ..models import Directory
deleted_count, _ = Directory.objects.filter(id__in=ids).delete()
return success_response(data={'deletedCount': deleted_count})
except Exception as e:
logger.exception("批量删除目录失败")
return error_response(
code=ErrorCodes.SERVER_ERROR,
message='Failed to delete directories',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
)
class EndpointViewSet(viewsets.ModelViewSet):
"""端点管理 ViewSet
@@ -655,6 +742,35 @@ class EndpointViewSet(viewsets.ModelViewSet):
field_formatters=formatters
)
@action(detail=False, methods=['post'], url_path='bulk-delete')
def bulk_delete(self, request, **kwargs):
"""批量删除端点
POST /api/assets/endpoints/bulk-delete/
请求体: {"ids": [1, 2, 3]}
响应: {"deletedCount": 3}
"""
ids = request.data.get('ids', [])
if not ids or not isinstance(ids, list):
return error_response(
code=ErrorCodes.VALIDATION_ERROR,
message='ids is required and must be a list',
status_code=status.HTTP_400_BAD_REQUEST
)
try:
from ..models import Endpoint
deleted_count, _ = Endpoint.objects.filter(id__in=ids).delete()
return success_response(data={'deletedCount': deleted_count})
except Exception as e:
logger.exception("批量删除端点失败")
return error_response(
code=ErrorCodes.SERVER_ERROR,
message='Failed to delete endpoints',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
)
class HostPortMappingViewSet(viewsets.ModelViewSet):
"""主机端口映射管理 ViewSetIP 地址聚合视图)
@@ -728,6 +844,38 @@ class HostPortMappingViewSet(viewsets.ModelViewSet):
field_formatters=formatters
)
@action(detail=False, methods=['post'], url_path='bulk-delete')
def bulk_delete(self, request, **kwargs):
"""批量删除 IP 地址映射
POST /api/assets/ip-addresses/bulk-delete/
请求体: {"ips": ["192.168.1.1", "10.0.0.1"]}
响应: {"deletedCount": 3}
注意:由于 IP 地址是聚合显示的,删除时传入 IP 列表,
会删除该 IP 下的所有 host:port 映射记录
"""
ips = request.data.get('ips', [])
if not ips or not isinstance(ips, list):
return error_response(
code=ErrorCodes.VALIDATION_ERROR,
message='ips is required and must be a list',
status_code=status.HTTP_400_BAD_REQUEST
)
try:
from ..models import HostPortMapping
deleted_count, _ = HostPortMapping.objects.filter(ip__in=ips).delete()
return success_response(data={'deletedCount': deleted_count})
except Exception as e:
logger.exception("批量删除 IP 地址映射失败")
return error_response(
code=ErrorCodes.SERVER_ERROR,
message='Failed to delete ip addresses',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
)
class VulnerabilityViewSet(viewsets.ModelViewSet):
"""漏洞资产管理 ViewSet只读
@@ -1077,3 +1225,162 @@ class VulnerabilitySnapshotViewSet(viewsets.ModelViewSet):
if scan_pk:
return self.service.get_by_scan(scan_pk, filter_query=filter_query)
return self.service.get_all(filter_query=filter_query)
# ==================== 截图 ViewSet ====================
class ScreenshotViewSet(viewsets.ModelViewSet):
"""截图资产 ViewSet
支持两种访问方式:
1. 嵌套路由GET /api/targets/{target_pk}/screenshots/
2. 独立路由GET /api/screenshots/(全局查询)
支持智能过滤语法filter 参数):
- url="example" URL 模糊匹配
"""
from ..serializers import ScreenshotListSerializer
serializer_class = ScreenshotListSerializer
pagination_class = BasePagination
filter_backends = [filters.OrderingFilter]
ordering = ['-created_at']
def get_queryset(self):
"""根据是否有 target_pk 参数决定查询范围"""
from ..models import Screenshot
target_pk = self.kwargs.get('target_pk')
filter_query = self.request.query_params.get('filter', None)
queryset = Screenshot.objects.all()
if target_pk:
queryset = queryset.filter(target_id=target_pk)
if filter_query:
# 简单的 URL 模糊匹配
queryset = queryset.filter(url__icontains=filter_query)
return queryset.order_by('-created_at')
@action(detail=True, methods=['get'], url_path='image')
def image(self, request, pk=None, **kwargs):
"""获取截图图片
GET /api/assets/screenshots/{id}/image/
返回 WebP 格式的图片二进制数据
"""
from django.http import HttpResponse
from ..models import Screenshot
try:
screenshot = Screenshot.objects.get(pk=pk)
if not screenshot.image:
return error_response(
code=ErrorCodes.NOT_FOUND,
message='Screenshot image not found',
status_code=status.HTTP_404_NOT_FOUND
)
response = HttpResponse(screenshot.image, content_type='image/webp')
response['Content-Disposition'] = f'inline; filename="screenshot_{pk}.webp"'
return response
except Screenshot.DoesNotExist:
return error_response(
code=ErrorCodes.NOT_FOUND,
message='Screenshot not found',
status_code=status.HTTP_404_NOT_FOUND
)
@action(detail=False, methods=['post'], url_path='bulk-delete')
def bulk_delete(self, request, **kwargs):
"""批量删除截图
POST /api/assets/screenshots/bulk-delete/
请求体: {"ids": [1, 2, 3]}
响应: {"deletedCount": 3}
"""
ids = request.data.get('ids', [])
if not ids or not isinstance(ids, list):
return error_response(
code=ErrorCodes.VALIDATION_ERROR,
message='ids is required and must be a list',
status_code=status.HTTP_400_BAD_REQUEST
)
try:
from ..models import Screenshot
deleted_count, _ = Screenshot.objects.filter(id__in=ids).delete()
return success_response(data={'deletedCount': deleted_count})
except Exception as e:
logger.exception("批量删除截图失败")
return error_response(
code=ErrorCodes.SERVER_ERROR,
message='Failed to delete screenshots',
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
)
class ScreenshotSnapshotViewSet(viewsets.ModelViewSet):
"""截图快照 ViewSet - 嵌套路由GET /api/scans/{scan_pk}/screenshots/
支持智能过滤语法filter 参数):
- url="example" URL 模糊匹配
"""
from ..serializers import ScreenshotSnapshotListSerializer
serializer_class = ScreenshotSnapshotListSerializer
pagination_class = BasePagination
filter_backends = [filters.OrderingFilter]
ordering = ['-created_at']
def get_queryset(self):
"""根据 scan_pk 参数查询"""
from ..models import ScreenshotSnapshot
scan_pk = self.kwargs.get('scan_pk')
filter_query = self.request.query_params.get('filter', None)
queryset = ScreenshotSnapshot.objects.all()
if scan_pk:
queryset = queryset.filter(scan_id=scan_pk)
if filter_query:
# 简单的 URL 模糊匹配
queryset = queryset.filter(url__icontains=filter_query)
return queryset.order_by('-created_at')
@action(detail=True, methods=['get'], url_path='image')
def image(self, request, pk=None, **kwargs):
"""获取截图快照图片
GET /api/scans/{scan_pk}/screenshots/{id}/image/
返回 WebP 格式的图片二进制数据
"""
from django.http import HttpResponse
from ..models import ScreenshotSnapshot
try:
screenshot = ScreenshotSnapshot.objects.get(pk=pk)
if not screenshot.image:
return error_response(
code=ErrorCodes.NOT_FOUND,
message='Screenshot image not found',
status_code=status.HTTP_404_NOT_FOUND
)
response = HttpResponse(screenshot.image, content_type='image/webp')
response['Content-Disposition'] = f'inline; filename="screenshot_snapshot_{pk}.webp"'
return response
except ScreenshotSnapshot.DoesNotExist:
return error_response(
code=ErrorCodes.NOT_FOUND,
message='Screenshot snapshot not found',
status_code=status.HTTP_404_NOT_FOUND
)

View File

@@ -0,0 +1,34 @@
# Generated by Django 5.2.7 on 2026-01-06 00:55
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('targets', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BlacklistRule',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('pattern', models.CharField(help_text='规则模式,如 *.gov, 10.0.0.0/8, 192.168.1.1', max_length=255)),
('rule_type', models.CharField(choices=[('domain', '域名'), ('ip', 'IP地址'), ('cidr', 'CIDR范围'), ('keyword', '关键词')], help_text='规则类型domain, ip, cidr', max_length=20)),
('scope', models.CharField(choices=[('global', '全局规则'), ('target', 'Target规则')], db_index=True, help_text='作用域global 或 target', max_length=20)),
('description', models.CharField(blank=True, default='', help_text='规则描述', max_length=500)),
('created_at', models.DateTimeField(auto_now_add=True)),
('target', models.ForeignKey(blank=True, help_text='关联的 Target仅 scope=target 时有值)', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='blacklist_rules', to='targets.target')),
],
options={
'db_table': 'blacklist_rule',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['scope', 'rule_type'], name='blacklist_r_scope_6ff77f_idx'), models.Index(fields=['target', 'scope'], name='blacklist_r_target__191441_idx')],
'constraints': [models.UniqueConstraint(fields=('pattern', 'scope', 'target'), name='unique_blacklist_rule')],
},
),
]

View File

@@ -0,0 +1,4 @@
"""Common models"""
from apps.common.models.blacklist import BlacklistRule
__all__ = ['BlacklistRule']

View File

@@ -0,0 +1,71 @@
"""黑名单规则模型"""
from django.db import models
class BlacklistRule(models.Model):
"""黑名单规则模型
用于存储黑名单过滤规则支持域名、IP、CIDR 三种类型。
支持两层作用域:全局规则和 Target 级规则。
"""
class RuleType(models.TextChoices):
DOMAIN = 'domain', '域名'
IP = 'ip', 'IP地址'
CIDR = 'cidr', 'CIDR范围'
KEYWORD = 'keyword', '关键词'
class Scope(models.TextChoices):
GLOBAL = 'global', '全局规则'
TARGET = 'target', 'Target规则'
id = models.AutoField(primary_key=True)
pattern = models.CharField(
max_length=255,
help_text='规则模式,如 *.gov, 10.0.0.0/8, 192.168.1.1'
)
rule_type = models.CharField(
max_length=20,
choices=RuleType.choices,
help_text='规则类型domain, ip, cidr'
)
scope = models.CharField(
max_length=20,
choices=Scope.choices,
db_index=True,
help_text='作用域global 或 target'
)
target = models.ForeignKey(
'targets.Target',
on_delete=models.CASCADE,
null=True,
blank=True,
related_name='blacklist_rules',
help_text='关联的 Target仅 scope=target 时有值)'
)
description = models.CharField(
max_length=500,
blank=True,
default='',
help_text='规则描述'
)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'blacklist_rule'
indexes = [
models.Index(fields=['scope', 'rule_type']),
models.Index(fields=['target', 'scope']),
]
constraints = [
models.UniqueConstraint(
fields=['pattern', 'scope', 'target'],
name='unique_blacklist_rule'
),
]
ordering = ['-created_at']
def __str__(self):
if self.scope == self.Scope.TARGET and self.target:
return f"[{self.scope}:{self.target_id}] {self.pattern}"
return f"[{self.scope}] {self.pattern}"

View File

@@ -0,0 +1,12 @@
"""Common serializers"""
from .blacklist_serializers import (
BlacklistRuleSerializer,
GlobalBlacklistRuleSerializer,
TargetBlacklistRuleSerializer,
)
__all__ = [
'BlacklistRuleSerializer',
'GlobalBlacklistRuleSerializer',
'TargetBlacklistRuleSerializer',
]

View File

@@ -0,0 +1,68 @@
"""黑名单规则序列化器"""
from rest_framework import serializers
from apps.common.models import BlacklistRule
from apps.common.utils import detect_rule_type
class BlacklistRuleSerializer(serializers.ModelSerializer):
"""黑名单规则序列化器"""
class Meta:
model = BlacklistRule
fields = [
'id',
'pattern',
'rule_type',
'scope',
'target',
'description',
'created_at',
]
read_only_fields = ['id', 'rule_type', 'created_at']
def validate_pattern(self, value):
"""验证规则模式"""
if not value or not value.strip():
raise serializers.ValidationError("规则模式不能为空")
return value.strip()
def create(self, validated_data):
"""创建规则时自动识别规则类型"""
pattern = validated_data.get('pattern', '')
validated_data['rule_type'] = detect_rule_type(pattern)
return super().create(validated_data)
def update(self, instance, validated_data):
"""更新规则时重新识别规则类型"""
if 'pattern' in validated_data:
pattern = validated_data['pattern']
validated_data['rule_type'] = detect_rule_type(pattern)
return super().update(instance, validated_data)
class GlobalBlacklistRuleSerializer(BlacklistRuleSerializer):
"""全局黑名单规则序列化器"""
class Meta(BlacklistRuleSerializer.Meta):
fields = ['id', 'pattern', 'rule_type', 'description', 'created_at']
read_only_fields = ['id', 'rule_type', 'created_at']
def create(self, validated_data):
"""创建全局规则"""
validated_data['scope'] = BlacklistRule.Scope.GLOBAL
validated_data['target'] = None
return super().create(validated_data)
class TargetBlacklistRuleSerializer(BlacklistRuleSerializer):
"""Target 黑名单规则序列化器"""
class Meta(BlacklistRuleSerializer.Meta):
fields = ['id', 'pattern', 'rule_type', 'description', 'created_at']
read_only_fields = ['id', 'rule_type', 'created_at']
def create(self, validated_data):
"""创建 Target 规则target_id 由 view 设置)"""
validated_data['scope'] = BlacklistRule.Scope.TARGET
return super().create(validated_data)

View File

@@ -3,13 +3,16 @@
提供系统级别的公共服务,包括:
- SystemLogService: 系统日志读取服务
- BlacklistService: 黑名单过滤服务
注意FilterService 已移至 apps.common.utils.filter_utils
推荐使用: from apps.common.utils.filter_utils import apply_filters
"""
from .system_log_service import SystemLogService
from .blacklist_service import BlacklistService
__all__ = [
'SystemLogService',
'BlacklistService',
]

View File

@@ -0,0 +1,176 @@
"""
黑名单规则管理服务
负责黑名单规则的 CRUD 操作(数据库层面)。
过滤逻辑请使用 apps.common.utils.BlacklistFilter。
架构说明:
- Model: BlacklistRule (apps.common.models.blacklist)
- Service: BlacklistService (本文件) - 规则 CRUD
- Utils: BlacklistFilter (apps.common.utils.blacklist_filter) - 过滤逻辑
- View: GlobalBlacklistView, TargetViewSet.blacklist
"""
import logging
from typing import List, Dict, Any, Optional
from django.db.models import QuerySet
from apps.common.utils import detect_rule_type
logger = logging.getLogger(__name__)
def _normalize_patterns(patterns: List[str]) -> List[str]:
"""
规范化规则列表:去重 + 过滤空行
Args:
patterns: 原始规则列表
Returns:
List[str]: 去重后的规则列表(保持顺序)
"""
return list(dict.fromkeys(filter(None, (p.strip() for p in patterns))))
class BlacklistService:
"""
黑名单规则管理服务
只负责规则的 CRUD 操作,不包含过滤逻辑。
过滤逻辑请使用 BlacklistFilter 工具类。
"""
def get_global_rules(self) -> QuerySet:
"""
获取全局黑名单规则列表
Returns:
QuerySet: 全局规则查询集
"""
from apps.common.models import BlacklistRule
return BlacklistRule.objects.filter(scope=BlacklistRule.Scope.GLOBAL)
def get_target_rules(self, target_id: int) -> QuerySet:
"""
获取 Target 级黑名单规则列表
Args:
target_id: Target ID
Returns:
QuerySet: Target 级规则查询集
"""
from apps.common.models import BlacklistRule
return BlacklistRule.objects.filter(
scope=BlacklistRule.Scope.TARGET,
target_id=target_id
)
def get_rules(self, target_id: Optional[int] = None) -> List:
"""
获取黑名单规则(全局 + Target 级)
Args:
target_id: Target ID用于加载 Target 级规则
Returns:
List[BlacklistRule]: 规则列表
"""
from apps.common.models import BlacklistRule
# 加载全局规则
rules = list(BlacklistRule.objects.filter(scope=BlacklistRule.Scope.GLOBAL))
# 加载 Target 级规则
if target_id:
target_rules = BlacklistRule.objects.filter(
scope=BlacklistRule.Scope.TARGET,
target_id=target_id
)
rules.extend(target_rules)
return rules
def replace_global_rules(self, patterns: List[str]) -> Dict[str, Any]:
"""
全量替换全局黑名单规则PUT 语义)
Args:
patterns: 新的规则模式列表
Returns:
Dict: {'count': int} 最终规则数量
"""
from apps.common.models import BlacklistRule
count = self._replace_rules(
patterns=patterns,
scope=BlacklistRule.Scope.GLOBAL,
target=None
)
logger.info("全量替换全局黑名单规则: %d", count)
return {'count': count}
def replace_target_rules(self, target, patterns: List[str]) -> Dict[str, Any]:
"""
全量替换 Target 级黑名单规则PUT 语义)
Args:
target: Target 对象
patterns: 新的规则模式列表
Returns:
Dict: {'count': int} 最终规则数量
"""
from apps.common.models import BlacklistRule
count = self._replace_rules(
patterns=patterns,
scope=BlacklistRule.Scope.TARGET,
target=target
)
logger.info("全量替换 Target 黑名单规则: %d 条 (Target: %s)", count, target.name)
return {'count': count}
def _replace_rules(self, patterns: List[str], scope: str, target=None) -> int:
"""
内部方法:全量替换规则
Args:
patterns: 规则模式列表
scope: 规则作用域 (GLOBAL/TARGET)
target: Target 对象(仅 TARGET 作用域需要)
Returns:
int: 最终规则数量
"""
from apps.common.models import BlacklistRule
from django.db import transaction
patterns = _normalize_patterns(patterns)
with transaction.atomic():
# 1. 删除旧规则
delete_filter = {'scope': scope}
if target:
delete_filter['target'] = target
BlacklistRule.objects.filter(**delete_filter).delete()
# 2. 创建新规则
if patterns:
rules = [
BlacklistRule(
pattern=pattern,
rule_type=detect_rule_type(pattern),
scope=scope,
target=target
)
for pattern in patterns
]
BlacklistRule.objects.bulk_create(rules)
return len(patterns)

View File

@@ -2,13 +2,19 @@
通用模块 URL 配置
路由说明:
- /api/health/ 健康检查接口(无需认证)
- /api/auth/* 认证相关接口(登录、登出、用户信息)
- /api/system/* 系统管理接口(日志查看等)
- /api/health/ 健康检查接口(无需认证)
- /api/auth/* 认证相关接口(登录、登出、用户信息)
- /api/system/* 系统管理接口(日志查看等)
- /api/blacklist/* 黑名单管理接口
"""
from django.urls import path
from .views import LoginView, LogoutView, MeView, ChangePasswordView, SystemLogsView, SystemLogFilesView, HealthCheckView
from .views import (
LoginView, LogoutView, MeView, ChangePasswordView,
SystemLogsView, SystemLogFilesView, HealthCheckView,
GlobalBlacklistView,
)
urlpatterns = [
# 健康检查(无需认证)
@@ -23,4 +29,7 @@ urlpatterns = [
# 系统管理
path('system/logs/', SystemLogsView.as_view(), name='system-logs'),
path('system/logs/files/', SystemLogFilesView.as_view(), name='system-log-files'),
# 黑名单管理PUT 全量替换模式)
path('blacklist/rules/', GlobalBlacklistView.as_view(), name='blacklist-rules'),
]

View File

@@ -14,6 +14,11 @@ from .csv_utils import (
create_csv_export_response,
UTF8_BOM,
)
from .blacklist_filter import (
BlacklistFilter,
detect_rule_type,
extract_host,
)
__all__ = [
'deduplicate_for_bulk',
@@ -27,4 +32,7 @@ __all__ = [
'format_datetime',
'create_csv_export_response',
'UTF8_BOM',
'BlacklistFilter',
'detect_rule_type',
'extract_host',
]

View File

@@ -0,0 +1,246 @@
"""
黑名单过滤工具
提供域名、IP、CIDR、关键词的黑名单匹配功能。
纯工具类,不涉及数据库操作。
支持的规则类型:
1. 域名精确匹配: example.com
- 规则: example.com
- 匹配: example.com
- 不匹配: sub.example.com, other.com
2. 域名后缀匹配: *.example.com
- 规则: *.example.com
- 匹配: sub.example.com, a.b.example.com, example.com
- 不匹配: other.com, example.com.cn
3. 关键词匹配: *cdn*
- 规则: *cdn*
- 匹配: cdn.example.com, a.cdn.b.com, mycdn123.com
- 不匹配: example.com (不包含 cdn)
4. IP 精确匹配: 192.168.1.1
- 规则: 192.168.1.1
- 匹配: 192.168.1.1
- 不匹配: 192.168.1.2
5. CIDR 范围匹配: 192.168.0.0/24
- 规则: 192.168.0.0/24
- 匹配: 192.168.0.1, 192.168.0.255
- 不匹配: 192.168.1.1
使用方式:
from apps.common.utils import BlacklistFilter
# 创建过滤器(传入规则列表)
rules = BlacklistRule.objects.filter(...)
filter = BlacklistFilter(rules)
# 检查单个目标
if filter.is_allowed('http://example.com'):
process(url)
# 流式处理
for url in urls:
if filter.is_allowed(url):
process(url)
"""
import ipaddress
import logging
from typing import List, Optional
from urllib.parse import urlparse
from apps.common.validators import is_valid_ip, validate_cidr
logger = logging.getLogger(__name__)
def detect_rule_type(pattern: str) -> str:
"""
自动识别规则类型
支持的模式:
- 域名精确匹配: example.com
- 域名后缀匹配: *.example.com
- 关键词匹配: *cdn* (匹配包含 cdn 的域名)
- IP 精确匹配: 192.168.1.1
- CIDR 范围: 192.168.0.0/24
Args:
pattern: 规则模式字符串
Returns:
str: 规则类型 ('domain', 'ip', 'cidr', 'keyword')
"""
if not pattern:
return 'domain'
pattern = pattern.strip()
# 检查关键词模式: *keyword* (前后都有星号,中间无点)
if pattern.startswith('*') and pattern.endswith('*') and len(pattern) > 2:
keyword = pattern[1:-1]
# 关键词中不能有点(否则可能是域名模式)
if '.' not in keyword:
return 'keyword'
# 检查 CIDR包含 /
if '/' in pattern:
try:
validate_cidr(pattern)
return 'cidr'
except ValueError:
pass
# 检查 IP去掉通配符前缀后验证
clean_pattern = pattern.lstrip('*').lstrip('.')
if is_valid_ip(clean_pattern):
return 'ip'
# 默认为域名
return 'domain'
def extract_host(target: str) -> str:
"""
从目标字符串中提取主机名
支持:
- 纯域名example.com
- 纯 IP192.168.1.1
- URLhttp://example.com/path
Args:
target: 目标字符串
Returns:
str: 提取的主机名
"""
if not target:
return ''
target = target.strip()
# 如果是 URL提取 hostname
if '://' in target:
try:
parsed = urlparse(target)
return parsed.hostname or target
except Exception:
return target
return target
class BlacklistFilter:
"""
黑名单过滤器
预编译规则,提供高效的匹配功能。
"""
def __init__(self, rules: List):
"""
初始化过滤器
Args:
rules: BlacklistRule 对象列表
"""
from apps.common.models import BlacklistRule
# 预解析:按类型分类 + CIDR 预编译
self._domain_rules = [] # (pattern, is_wildcard, suffix)
self._ip_rules = set() # 精确 IP 用 setO(1) 查找
self._cidr_rules = [] # (pattern, network_obj)
self._keyword_rules = [] # 关键词列表(小写)
# 去重:跨 scope 可能有重复规则
seen_patterns = set()
for rule in rules:
if rule.pattern in seen_patterns:
continue
seen_patterns.add(rule.pattern)
if rule.rule_type == BlacklistRule.RuleType.DOMAIN:
pattern = rule.pattern.lower()
if pattern.startswith('*.'):
self._domain_rules.append((pattern, True, pattern[1:]))
else:
self._domain_rules.append((pattern, False, None))
elif rule.rule_type == BlacklistRule.RuleType.IP:
self._ip_rules.add(rule.pattern)
elif rule.rule_type == BlacklistRule.RuleType.CIDR:
try:
network = ipaddress.ip_network(rule.pattern, strict=False)
self._cidr_rules.append((rule.pattern, network))
except ValueError:
pass
elif rule.rule_type == BlacklistRule.RuleType.KEYWORD:
# *cdn* -> cdn
keyword = rule.pattern[1:-1].lower()
self._keyword_rules.append(keyword)
def is_allowed(self, target: str) -> bool:
"""
检查目标是否通过过滤
Args:
target: 要检查的目标(域名/IP/URL
Returns:
bool: True 表示通过不在黑名单False 表示被过滤
"""
if not target:
return True
host = extract_host(target)
if not host:
return True
# 先判断输入类型,再走对应分支
if is_valid_ip(host):
return self._check_ip_rules(host)
else:
return self._check_domain_rules(host)
def _check_domain_rules(self, host: str) -> bool:
"""检查域名规则(精确匹配 + 后缀匹配 + 关键词匹配)"""
host_lower = host.lower()
# 1. 域名规则(精确 + 后缀)
for pattern, is_wildcard, suffix in self._domain_rules:
if is_wildcard:
if host_lower.endswith(suffix) or host_lower == pattern[2:]:
return False
else:
if host_lower == pattern:
return False
# 2. 关键词匹配(字符串 in 操作O(n*m)
for keyword in self._keyword_rules:
if keyword in host_lower:
return False
return True
def _check_ip_rules(self, host: str) -> bool:
"""检查 IP 规则(精确匹配 + CIDR"""
# 1. IP 精确匹配O(1)
if host in self._ip_rules:
return False
# 2. CIDR 匹配
if self._cidr_rules:
try:
ip_obj = ipaddress.ip_address(host)
for _, network in self._cidr_rules:
if ip_obj in network:
return False
except ValueError:
pass
return True

View File

@@ -5,14 +5,17 @@
- 健康检查视图Docker 健康检查
- 认证相关视图:登录、登出、用户信息、修改密码
- 系统日志视图:实时日志查看
- 黑名单视图:全局黑名单规则管理
"""
from .health_views import HealthCheckView
from .auth_views import LoginView, LogoutView, MeView, ChangePasswordView
from .system_log_views import SystemLogsView, SystemLogFilesView
from .blacklist_views import GlobalBlacklistView
__all__ = [
'HealthCheckView',
'LoginView', 'LogoutView', 'MeView', 'ChangePasswordView',
'SystemLogsView', 'SystemLogFilesView',
'GlobalBlacklistView',
]

View File

@@ -0,0 +1,80 @@
"""全局黑名单 API 视图"""
import logging
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from apps.common.response_helpers import success_response, error_response
from apps.common.services import BlacklistService
logger = logging.getLogger(__name__)
class GlobalBlacklistView(APIView):
"""
全局黑名单规则 API
Endpoints:
- GET /api/blacklist/rules/ - 获取全局黑名单列表
- PUT /api/blacklist/rules/ - 全量替换规则(文本框保存场景)
设计说明:
- 使用 PUT 全量替换模式,适合"文本框每行一个规则"的前端场景
- 用户编辑文本框 -> 点击保存 -> 后端全量替换
架构MVS 模式
- View: 参数验证、响应格式化
- Service: 业务逻辑BlacklistService
- Model: 数据持久化BlacklistRule
"""
permission_classes = [IsAuthenticated]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.blacklist_service = BlacklistService()
def get(self, request):
"""
获取全局黑名单规则列表
返回格式:
{
"patterns": ["*.gov", "*.edu", "10.0.0.0/8"]
}
"""
rules = self.blacklist_service.get_global_rules()
patterns = list(rules.values_list('pattern', flat=True))
return success_response(data={'patterns': patterns})
def put(self, request):
"""
全量替换全局黑名单规则
请求格式:
{
"patterns": ["*.gov", "*.edu", "10.0.0.0/8"]
}
或者空数组清空所有规则:
{
"patterns": []
}
"""
patterns = request.data.get('patterns', [])
# 兼容字符串输入(换行分隔)
if isinstance(patterns, str):
patterns = [p for p in patterns.split('\n') if p.strip()]
if not isinstance(patterns, list):
return error_response(
code='VALIDATION_ERROR',
message='patterns 必须是数组'
)
# 调用 Service 层全量替换
result = self.blacklist_service.replace_global_rules(patterns)
return success_response(data=result)

View File

@@ -1,4 +1,4 @@
# Generated by Django 5.2.7 on 2026-01-02 04:45
# Generated by Django 5.2.7 on 2026-01-06 00:55
from django.db import migrations, models

View File

@@ -16,10 +16,9 @@ class GobyFingerprintService(BaseFingerprintService):
"""
校验单条 Goby 指纹
校验规则
- name 字段必须存在且非空
- logic 字段必须存在
- rule 字段必须是数组
支持两种格式
1. 标准格式: {"name": "...", "logic": "...", "rule": [...]}
2. JSONL 格式: {"product": "...", "rule": "..."}
Args:
item: 单条指纹数据
@@ -27,25 +26,43 @@ class GobyFingerprintService(BaseFingerprintService):
Returns:
bool: 是否有效
"""
# 标准格式name + logic + rule(数组)
name = item.get('name', '')
logic = item.get('logic', '')
rule = item.get('rule')
return bool(name and str(name).strip()) and bool(logic) and isinstance(rule, list)
if name and item.get('logic') is not None and isinstance(item.get('rule'), list):
return bool(str(name).strip())
# JSONL 格式product + rule(字符串)
product = item.get('product', '')
rule = item.get('rule', '')
return bool(product and str(product).strip() and rule and str(rule).strip())
def to_model_data(self, item: dict) -> dict:
"""
转换 Goby JSON 格式为 Model 字段
支持两种输入格式:
1. 标准格式: {"name": "...", "logic": "...", "rule": [...]}
2. JSONL 格式: {"product": "...", "rule": "..."}
Args:
item: 原始 Goby JSON 数据
Returns:
dict: Model 字段数据
"""
# 标准格式
if 'name' in item and isinstance(item.get('rule'), list):
return {
'name': str(item.get('name', '')).strip(),
'logic': item.get('logic', ''),
'rule': item.get('rule', []),
}
# JSONL 格式:将 rule 字符串转为单元素数组
return {
'name': str(item.get('name', '')).strip(),
'logic': item.get('logic', ''),
'rule': item.get('rule', []),
'name': str(item.get('product', '')).strip(),
'logic': 'or', # JSONL 格式默认 or 逻辑
'rule': [item.get('rule', '')] if item.get('rule') else [],
}
def get_export_data(self) -> list:

View File

@@ -139,7 +139,7 @@ class BaseFingerprintViewSet(viewsets.ModelViewSet):
POST /api/engine/fingerprints/{type}/import_file/
请求格式multipart/form-data
- file: JSON 文件
- file: JSON 文件(支持标准 JSON 和 JSONL 格式)
返回:同 batch_create
"""
@@ -148,9 +148,12 @@ class BaseFingerprintViewSet(viewsets.ModelViewSet):
raise ValidationError('缺少文件')
try:
json_data = json.load(file)
content = file.read().decode('utf-8')
json_data = self._parse_json_content(content)
except json.JSONDecodeError as e:
raise ValidationError(f'无效的 JSON 格式: {e}')
except UnicodeDecodeError as e:
raise ValidationError(f'文件编码错误: {e}')
fingerprints = self.parse_import_data(json_data)
if not fingerprints:
@@ -159,6 +162,41 @@ class BaseFingerprintViewSet(viewsets.ModelViewSet):
result = self.get_service().batch_create_fingerprints(fingerprints)
return success_response(data=result, status_code=status.HTTP_201_CREATED)
def _parse_json_content(self, content: str):
"""
解析 JSON 内容,支持标准 JSON 和 JSONL 格式
Args:
content: 文件内容字符串
Returns:
解析后的数据list 或 dict
"""
content = content.strip()
# 尝试标准 JSON 解析
try:
return json.loads(content)
except json.JSONDecodeError:
pass
# 尝试 JSONL 格式(每行一个 JSON 对象)
lines = content.split('\n')
result = []
for i, line in enumerate(lines):
line = line.strip()
if not line:
continue
try:
result.append(json.loads(line))
except json.JSONDecodeError as e:
raise json.JSONDecodeError(f'{i + 1} 行解析失败: {e.msg}', e.doc, e.pos)
if not result:
raise json.JSONDecodeError('文件为空或格式无效', content, 0)
return result
@action(detail=False, methods=['post'], url_path='bulk-delete')
def bulk_delete(self, request):
"""

View File

@@ -263,11 +263,16 @@ COMMAND_TEMPLATES = {
'directory_scan': DIRECTORY_SCAN_COMMANDS,
'url_fetch': URL_FETCH_COMMANDS,
'vuln_scan': VULN_SCAN_COMMANDS,
'screenshot': {}, # 使用 Python 原生库Playwright无命令模板
}
# ==================== 扫描类型配置 ====================
# 执行阶段定义(按顺序执行)
# Stage 1: 资产发现 - 子域名 → 端口 → 站点探测 → 指纹识别
# Stage 2: URL 收集 - URL 获取 + 目录扫描(并行)
# Stage 3: 截图 - 在 URL 收集完成后执行,捕获更多发现的页面
# Stage 4: 漏洞扫描 - 最后执行
EXECUTION_STAGES = [
{
'mode': 'sequential',
@@ -277,6 +282,10 @@ EXECUTION_STAGES = [
'mode': 'parallel',
'flows': ['url_fetch', 'directory_scan']
},
{
'mode': 'sequential',
'flows': ['screenshot']
},
{
'mode': 'sequential',
'flows': ['vuln_scan']

View File

@@ -101,6 +101,16 @@ directory_scan:
match-codes: 200,201,301,302,401,403 # 匹配的 HTTP 状态码
# rate: 0 # 每秒请求数(默认 0 不限制)
screenshot:
# ==================== 网站截图 ====================
# 使用 Playwright 对网站进行截图,保存为 WebP 格式
# 在 Stage 2 与 url_fetch、directory_scan 并行执行
tools:
playwright:
enabled: true
concurrency: 5 # 并发截图数(默认 5
url_sources: [websites] # URL 来源当前对website截图还可以用 [websites, endpoints]
url_fetch:
# ==================== URL 获取 ====================
tools:

View File

@@ -99,15 +99,13 @@ def initiate_scan_flow(
raise ValueError("engine_name is required")
logger.info(
"="*60 + "\n" +
"开始初始化扫描任务\n" +
f" Scan ID: {scan_id}\n" +
f" Target: {target_name}\n" +
f" Engine: {engine_name}\n" +
f" Workspace: {scan_workspace_dir}\n" +
"="*60
)
logger.info("="*60)
logger.info("开始初始化扫描任务")
logger.info(f"Scan ID: {scan_id}")
logger.info(f"Target: {target_name}")
logger.info(f"Engine: {engine_name}")
logger.info(f"Workspace: {scan_workspace_dir}")
logger.info("="*60)
# ==================== Task 1: 创建 Scan 工作空间 ====================
scan_workspace_path = setup_scan_workspace(scan_workspace_dir)
@@ -126,11 +124,9 @@ def initiate_scan_flow(
# FlowOrchestrator 已经解析了所有工具配置
enabled_tools_by_type = orchestrator.enabled_tools_by_type
logger.info(
f"执行计划生成成功:\n"
f" 扫描类型: {''.join(orchestrator.scan_types)}\n"
f" 总共 {len(orchestrator.scan_types)} 个 Flow"
)
logger.info("执行计划生成成功")
logger.info(f"扫描类型: {''.join(orchestrator.scan_types)}")
logger.info(f"总共 {len(orchestrator.scan_types)} 个 Flow")
# ==================== 初始化阶段进度 ====================
# 在解析完配置后立即初始化,此时已有完整的 scan_types 列表
@@ -209,9 +205,13 @@ def initiate_scan_flow(
for mode, enabled_flows in orchestrator.get_execution_stages():
if mode == 'sequential':
# 顺序执行
logger.info(f"\n{'='*60}\n顺序执行阶段: {', '.join(enabled_flows)}\n{'='*60}")
logger.info("="*60)
logger.info(f"顺序执行阶段: {', '.join(enabled_flows)}")
logger.info("="*60)
for scan_type, flow_func, flow_specific_kwargs in get_valid_flows(enabled_flows):
logger.info(f"\n{'='*60}\n执行 Flow: {scan_type}\n{'='*60}")
logger.info("="*60)
logger.info(f"执行 Flow: {scan_type}")
logger.info("="*60)
try:
result = flow_func(**flow_specific_kwargs)
record_flow_result(scan_type, result=result)
@@ -220,12 +220,16 @@ def initiate_scan_flow(
elif mode == 'parallel':
# 并行执行阶段:通过 Task 包装子 Flow并使用 Prefect TaskRunner 并发运行
logger.info(f"\n{'='*60}\n并行执行阶段: {', '.join(enabled_flows)}\n{'='*60}")
logger.info("="*60)
logger.info(f"并行执行阶段: {', '.join(enabled_flows)}")
logger.info("="*60)
futures = []
# 提交所有并行子 Flow 任务
for scan_type, flow_func, flow_specific_kwargs in get_valid_flows(enabled_flows):
logger.info(f"\n{'='*60}\n提交并行子 Flow 任务: {scan_type}\n{'='*60}")
logger.info("="*60)
logger.info(f"提交并行子 Flow 任务: {scan_type}")
logger.info("="*60)
future = _run_subflow_task.submit(
scan_type=scan_type,
flow_func=flow_func,
@@ -246,12 +250,10 @@ def initiate_scan_flow(
record_flow_result(scan_type, error=e)
# ==================== 完成 ====================
logger.info(
"="*60 + "\n" +
"✓ 扫描任务初始化完成\n" +
f" 执行的 Flow: {', '.join(executed_flows)}\n" +
"="*60
)
logger.info("="*60)
logger.info("✓ 扫描任务初始化完成")
logger.info(f"执行的 Flow: {', '.join(executed_flows)}")
logger.info("="*60)
# ==================== 返回结果 ====================
return {

View File

@@ -20,7 +20,7 @@ from pathlib import Path
from typing import Callable
from prefect import flow
from apps.scan.tasks.port_scan import (
export_scan_targets_task,
export_hosts_task,
run_and_stream_save_ports_task
)
from apps.scan.handlers.scan_flow_handlers import (
@@ -157,9 +157,9 @@ def _parse_port_count(tool_config: dict) -> int:
def _export_scan_targets(target_id: int, port_scan_dir: Path) -> tuple[str, int, str]:
def _export_hosts(target_id: int, port_scan_dir: Path) -> tuple[str, int, str]:
"""
导出扫描目标到文件
导出主机列表到文件
根据 Target 类型自动决定导出内容:
- DOMAIN: 从 Subdomain 表导出子域名
@@ -171,31 +171,31 @@ def _export_scan_targets(target_id: int, port_scan_dir: Path) -> tuple[str, int,
port_scan_dir: 端口扫描目录
Returns:
tuple: (targets_file, target_count, target_type)
tuple: (hosts_file, host_count, target_type)
"""
logger.info("Step 1: 导出扫描目标列表")
logger.info("Step 1: 导出主机列表")
targets_file = str(port_scan_dir / 'targets.txt')
export_result = export_scan_targets_task(
hosts_file = str(port_scan_dir / 'hosts.txt')
export_result = export_hosts_task(
target_id=target_id,
output_file=targets_file,
output_file=hosts_file,
batch_size=1000 # 每次读取 1000 条,优化内存占用
)
target_count = export_result['total_count']
host_count = export_result['total_count']
target_type = export_result.get('target_type', 'unknown')
logger.info(
"扫描目标导出完成 - 类型: %s, 文件: %s, 数量: %d",
"主机列表导出完成 - 类型: %s, 文件: %s, 数量: %d",
target_type,
export_result['output_file'],
target_count
host_count
)
if target_count == 0:
logger.warning("目标下没有可扫描的地址,无法执行端口扫描")
if host_count == 0:
logger.warning("目标下没有可扫描的主机,无法执行端口扫描")
return export_result['output_file'], target_count, target_type
return export_result['output_file'], host_count, target_type
def _run_scans_sequentially(
@@ -382,8 +382,8 @@ def port_scan_flow(
'scan_id': int,
'target': str,
'scan_workspace_dir': str,
'domains_file': str,
'domain_count': int,
'hosts_file': str,
'host_count': int,
'processed_records': int,
'executed_tasks': list,
'tool_stats': {
@@ -432,22 +432,22 @@ def port_scan_flow(
from apps.scan.utils import setup_scan_directory
port_scan_dir = setup_scan_directory(scan_workspace_dir, 'port_scan')
# Step 1: 导出扫描目标列表到文件(根据 Target 类型自动决定内容)
targets_file, target_count, target_type = _export_scan_targets(target_id, port_scan_dir)
# Step 1: 导出主机列表到文件(根据 Target 类型自动决定内容)
hosts_file, host_count, target_type = _export_hosts(target_id, port_scan_dir)
if target_count == 0:
logger.warning("跳过端口扫描:没有目标可扫描 - Scan ID: %s", scan_id)
user_log(scan_id, "port_scan", "Skipped: no targets to scan", "warning")
if host_count == 0:
logger.warning("跳过端口扫描:没有主机可扫描 - Scan ID: %s", scan_id)
user_log(scan_id, "port_scan", "Skipped: no hosts to scan", "warning")
return {
'success': True,
'scan_id': scan_id,
'target': target_name,
'scan_workspace_dir': scan_workspace_dir,
'targets_file': targets_file,
'target_count': 0,
'hosts_file': hosts_file,
'host_count': 0,
'target_type': target_type,
'processed_records': 0,
'executed_tasks': ['export_scan_targets'],
'executed_tasks': ['export_hosts'],
'tool_stats': {
'total': 0,
'successful': 0,
@@ -469,7 +469,7 @@ def port_scan_flow(
logger.info("Step 3: 串行执行扫描工具")
tool_stats, processed_records, successful_tool_names, failed_tools = _run_scans_sequentially(
enabled_tools=enabled_tools,
domains_file=targets_file, # 现在是 targets_file兼容原参数名
domains_file=hosts_file,
port_scan_dir=port_scan_dir,
scan_id=scan_id,
target_id=target_id,
@@ -481,7 +481,7 @@ def port_scan_flow(
user_log(scan_id, "port_scan", f"port_scan completed: found {processed_records} ports")
# 动态生成已执行的任务列表
executed_tasks = ['export_scan_targets', 'parse_config']
executed_tasks = ['export_hosts', 'parse_config']
executed_tasks.extend([f'run_and_stream_save_ports ({tool})' for tool in tool_stats.keys()])
return {
@@ -489,8 +489,8 @@ def port_scan_flow(
'scan_id': scan_id,
'target': target_name,
'scan_workspace_dir': scan_workspace_dir,
'targets_file': targets_file,
'target_count': target_count,
'hosts_file': hosts_file,
'host_count': host_count,
'target_type': target_type,
'processed_records': processed_records,
'executed_tasks': executed_tasks,
@@ -499,8 +499,8 @@ def port_scan_flow(
'successful': len(successful_tool_names),
'failed': len(failed_tools),
'successful_tools': successful_tool_names,
'failed_tools': failed_tools, # [{'tool': 'naabu_active', 'reason': '超时'}]
'details': tool_stats # 详细结果(保留向后兼容)
'failed_tools': failed_tools,
'details': tool_stats
}
}

View File

@@ -0,0 +1,202 @@
"""
截图 Flow
负责编排截图的完整流程:
1. 从数据库获取 URL 列表websites 和/或 endpoints
2. 批量截图并保存快照
3. 同步到资产表
"""
# Django 环境初始化
from apps.common.prefect_django_setup import setup_django_for_prefect
import logging
from pathlib import Path
from prefect import flow
from apps.scan.tasks.screenshot import capture_screenshots_task
from apps.scan.handlers.scan_flow_handlers import (
on_scan_flow_running,
on_scan_flow_completed,
on_scan_flow_failed,
)
from apps.scan.utils import user_log
from apps.scan.services.target_export_service import (
get_urls_with_fallback,
DataSource,
)
logger = logging.getLogger(__name__)
def _parse_screenshot_config(enabled_tools: dict) -> dict:
"""
解析截图配置
Args:
enabled_tools: 启用的工具配置
Returns:
截图配置字典
"""
# 从 enabled_tools 中获取 playwright 配置
playwright_config = enabled_tools.get('playwright', {})
return {
'concurrency': playwright_config.get('concurrency', 5),
'url_sources': playwright_config.get('url_sources', ['websites'])
}
def _map_url_sources_to_data_sources(url_sources: list[str]) -> list[str]:
"""
将配置中的 url_sources 映射为 DataSource 常量
Args:
url_sources: 配置中的来源列表,如 ['websites', 'endpoints']
Returns:
DataSource 常量列表
"""
source_mapping = {
'websites': DataSource.WEBSITE,
'endpoints': DataSource.ENDPOINT,
}
sources = []
for source in url_sources:
if source in source_mapping:
sources.append(source_mapping[source])
else:
logger.warning("未知的 URL 来源: %s,跳过", source)
# 添加默认回退(从 subdomain 构造)
sources.append(DataSource.DEFAULT)
return sources
@flow(
name="screenshot",
log_prints=True,
on_running=[on_scan_flow_running],
on_completion=[on_scan_flow_completed],
on_failure=[on_scan_flow_failed],
)
def screenshot_flow(
scan_id: int,
target_name: str,
target_id: int,
scan_workspace_dir: str,
enabled_tools: dict
) -> dict:
"""
截图 Flow
工作流程:
Step 1: 解析配置
Step 2: 收集 URL 列表
Step 3: 批量截图并保存快照
Step 4: 同步到资产表
Args:
scan_id: 扫描任务 ID
target_name: 目标名称
target_id: 目标 ID
scan_workspace_dir: 扫描工作空间目录
enabled_tools: 启用的工具配置
Returns:
dict: {
'success': bool,
'scan_id': int,
'target': str,
'total_urls': int,
'successful': int,
'failed': int,
'synced': int
}
"""
try:
logger.info(
"="*60 + "\n" +
"开始截图扫描\n" +
f" Scan ID: {scan_id}\n" +
f" Target: {target_name}\n" +
f" Workspace: {scan_workspace_dir}\n" +
"="*60
)
user_log(scan_id, "screenshot", "Starting screenshot capture")
# Step 1: 解析配置
config = _parse_screenshot_config(enabled_tools)
concurrency = config['concurrency']
url_sources = config['url_sources']
logger.info("截图配置 - 并发: %d, URL来源: %s", concurrency, url_sources)
# Step 2: 使用统一服务收集 URL带黑名单过滤和回退
data_sources = _map_url_sources_to_data_sources(url_sources)
result = get_urls_with_fallback(target_id, sources=data_sources)
urls = result['urls']
logger.info(
"URL 收集完成 - 来源: %s, 数量: %d, 尝试过: %s",
result['source'], result['total_count'], result['tried_sources']
)
if not urls:
logger.warning("没有可截图的 URL跳过截图任务")
user_log(scan_id, "screenshot", "Skipped: no URLs to capture", "warning")
return {
'success': True,
'scan_id': scan_id,
'target': target_name,
'total_urls': 0,
'successful': 0,
'failed': 0,
'synced': 0
}
user_log(scan_id, "screenshot", f"Found {len(urls)} URLs to capture (source: {result['source']})")
# Step 3: 批量截图
logger.info("Step 3: 批量截图 - %d 个 URL", len(urls))
capture_result = capture_screenshots_task(
urls=urls,
scan_id=scan_id,
target_id=target_id,
config={'concurrency': concurrency}
)
# Step 4: 同步到资产表
logger.info("Step 4: 同步截图到资产表")
from apps.asset.services.screenshot_service import ScreenshotService
screenshot_service = ScreenshotService()
synced = screenshot_service.sync_screenshots_to_asset(scan_id, target_id)
logger.info(
"✓ 截图完成 - 总数: %d, 成功: %d, 失败: %d, 同步: %d",
capture_result['total'], capture_result['successful'], capture_result['failed'], synced
)
user_log(
scan_id, "screenshot",
f"Screenshot completed: {capture_result['successful']}/{capture_result['total']} captured, {synced} synced"
)
return {
'success': True,
'scan_id': scan_id,
'target': target_name,
'total_urls': capture_result['total'],
'successful': capture_result['successful'],
'failed': capture_result['failed'],
'synced': synced
}
except Exception as e:
logger.exception("截图 Flow 失败: %s", e)
user_log(scan_id, "screenshot", f"Screenshot failed: {e}", "error")
raise

View File

@@ -165,12 +165,12 @@ def _run_scans_sequentially(
for tool_name, tool_config in enabled_tools.items():
# 1. 构建完整命令(变量替换)
try:
command_params = {'url_file': urls_file}
command = build_scan_command(
tool_name=tool_name,
scan_type='site_scan',
command_params={
'url_file': urls_file
},
command_params=command_params,
tool_config=tool_config
)
except Exception as e:

View File

@@ -732,7 +732,9 @@ def subdomain_discovery_flow(
executed_tasks.append('save_domains')
# 记录 Flow 完成
logger.info("="*60 + "\n✓ 子域名发现扫描完成\n" + "="*60)
logger.info("="*60)
logger.info("✓ 子域名发现扫描完成")
logger.info("="*60)
user_log(scan_id, "subdomain_discovery", f"subdomain_discovery completed: found {processed_domains} subdomains")
return {

View File

@@ -1,4 +1,4 @@
# Generated by Django 5.2.7 on 2026-01-02 04:45
# Generated by Django 5.2.7 on 2026-01-06 00:55
import django.contrib.postgres.fields
import django.db.models.deletion
@@ -31,6 +31,20 @@ class Migration(migrations.Migration):
'db_table': 'notification_settings',
},
),
migrations.CreateModel(
name='SubfinderProviderSettings',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('providers', models.JSONField(default=dict, help_text='各 Provider 的 API Key 配置')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Subfinder Provider 配置',
'verbose_name_plural': 'Subfinder Provider 配置',
'db_table': 'subfinder_provider_settings',
},
),
migrations.CreateModel(
name='Notification',
fields=[
@@ -87,7 +101,22 @@ class Migration(migrations.Migration):
'verbose_name_plural': '扫描任务',
'db_table': 'scan',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='scan_created_0bb6c7_idx'), models.Index(fields=['target'], name='scan_target__718b9d_idx'), models.Index(fields=['deleted_at', '-created_at'], name='scan_deleted_eb17e8_idx')],
},
),
migrations.CreateModel(
name='ScanLog',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('level', models.CharField(choices=[('info', 'Info'), ('warning', 'Warning'), ('error', 'Error')], default='info', help_text='日志级别', max_length=10)),
('content', models.TextField(help_text='日志内容')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, help_text='创建时间')),
('scan', models.ForeignKey(help_text='关联的扫描任务', on_delete=django.db.models.deletion.CASCADE, related_name='logs', to='scan.scan')),
],
options={
'verbose_name': '扫描日志',
'verbose_name_plural': '扫描日志',
'db_table': 'scan_log',
'ordering': ['created_at'],
},
),
migrations.CreateModel(
@@ -113,38 +142,34 @@ class Migration(migrations.Migration):
'verbose_name_plural': '定时扫描任务',
'db_table': 'scheduled_scan',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='scheduled_s_created_9b9c2e_idx'), models.Index(fields=['is_enabled', '-created_at'], name='scheduled_s_is_enab_23d660_idx'), models.Index(fields=['name'], name='scheduled_s_name_bf332d_idx')],
},
),
migrations.CreateModel(
name='ScanLog',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('level', models.CharField(choices=[('info', 'Info'), ('warning', 'Warning'), ('error', 'Error')], default='info', help_text='日志级别', max_length=10)),
('content', models.TextField(help_text='日志内容')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, help_text='创建时间')),
('scan', models.ForeignKey(db_index=True, help_text='关联的扫描任务', on_delete=django.db.models.deletion.CASCADE, related_name='logs', to='scan.scan')),
],
options={
'verbose_name': '扫描日志',
'verbose_name_plural': '扫描日志',
'db_table': 'scan_log',
'ordering': ['created_at'],
'indexes': [models.Index(fields=['scan', 'created_at'], name='scan_log_scan_id_e8c8f5_idx')],
},
migrations.AddIndex(
model_name='scan',
index=models.Index(fields=['-created_at'], name='scan_created_0bb6c7_idx'),
),
migrations.CreateModel(
name='SubfinderProviderSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('providers', models.JSONField(default=dict, help_text='各 Provider 的 API Key 配置')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Subfinder Provider 配置',
'verbose_name_plural': 'Subfinder Provider 配置',
'db_table': 'subfinder_provider_settings',
},
migrations.AddIndex(
model_name='scan',
index=models.Index(fields=['target'], name='scan_target__718b9d_idx'),
),
migrations.AddIndex(
model_name='scan',
index=models.Index(fields=['deleted_at', '-created_at'], name='scan_deleted_eb17e8_idx'),
),
migrations.AddIndex(
model_name='scanlog',
index=models.Index(fields=['scan', 'created_at'], name='scan_log_scan_id_c4814a_idx'),
),
migrations.AddIndex(
model_name='scheduledscan',
index=models.Index(fields=['-created_at'], name='scheduled_s_created_9b9c2e_idx'),
),
migrations.AddIndex(
model_name='scheduledscan',
index=models.Index(fields=['is_enabled', '-created_at'], name='scheduled_s_is_enab_23d660_idx'),
),
migrations.AddIndex(
model_name='scheduledscan',
index=models.Index(fields=['name'], name='scheduled_s_name_bf332d_idx'),
),
]

View File

@@ -0,0 +1,18 @@
# Generated by Django 5.2.7 on 2026-01-07 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scan', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='scan',
name='cached_screenshots_count',
field=models.IntegerField(default=0, help_text='缓存的截图数量'),
),
]

View File

@@ -84,6 +84,7 @@ class Scan(models.Model):
cached_endpoints_count = models.IntegerField(default=0, help_text='缓存的端点数量')
cached_ips_count = models.IntegerField(default=0, help_text='缓存的IP地址数量')
cached_directories_count = models.IntegerField(default=0, help_text='缓存的目录数量')
cached_screenshots_count = models.IntegerField(default=0, help_text='缓存的截图数量')
cached_vulns_total = models.IntegerField(default=0, help_text='缓存的漏洞总数')
cached_vulns_critical = models.IntegerField(default=0, help_text='缓存的严重漏洞数量')
cached_vulns_high = models.IntegerField(default=0, help_text='缓存的高危漏洞数量')

View File

@@ -21,9 +21,6 @@ urlpatterns = [
# 标记全部已读
path('mark-all-as-read/', NotificationMarkAllAsReadView.as_view(), name='mark-all-as-read'),
# 测试通知
path('test/', views.notifications_test, name='test'),
]
# WebSocket 实时通知路由在 routing.py 中定义ws://host/ws/notifications/

View File

@@ -23,45 +23,7 @@ from .services import NotificationService, NotificationSettingsService
logger = logging.getLogger(__name__)
def notifications_test(request):
"""
测试通知推送
"""
try:
from .services import create_notification
from django.http import JsonResponse
level_param = request.GET.get('level', NotificationLevel.LOW)
try:
level_choice = NotificationLevel(level_param)
except ValueError:
level_choice = NotificationLevel.LOW
title = request.GET.get('title') or "测试通知"
message = request.GET.get('message') or "这是一条测试通知消息"
# 创建测试通知
notification = create_notification(
title=title,
message=message,
level=level_choice
)
return JsonResponse({
'success': True,
'message': '测试通知已发送',
'notification_id': notification.id
})
except Exception as e:
logger.error(f"发送测试通知失败: {e}")
return JsonResponse({
'success': False,
'error': str(e)
}, status=500)
# build_api_response 已废弃,请使用 success_response/error_response
def _parse_bool(value: str | None) -> bool | None:

View File

@@ -147,10 +147,10 @@ class FlowOrchestrator:
return True
return False
# 其他扫描类型:检查 tools
# 其他扫描类型(包括 screenshot:检查 tools
tools = scan_config.get('tools', {})
for tool_config in tools.values():
if tool_config.get('enabled', False):
if isinstance(tool_config, dict) and tool_config.get('enabled', False):
return True
return False
@@ -222,6 +222,10 @@ class FlowOrchestrator:
from apps.scan.flows.vuln_scan import vuln_scan_flow
return vuln_scan_flow
elif scan_type == 'screenshot':
from apps.scan.flows.screenshot_flow import screenshot_flow
return screenshot_flow
else:
logger.warning(f"未实现的扫描类型: {scan_type}")
return None

View File

@@ -464,6 +464,7 @@ class DjangoScanRepository:
'endpoints': scan.endpoint_snapshots.count(),
'ips': ips_count,
'directories': scan.directory_snapshots.count(),
'screenshots': scan.screenshot_snapshots.count(),
'vulns_total': total_vulns,
'vulns_critical': severity_stats['critical'],
'vulns_high': severity_stats['high'],
@@ -478,6 +479,7 @@ class DjangoScanRepository:
'cached_endpoints_count': stats['endpoints'],
'cached_ips_count': stats['ips'],
'cached_directories_count': stats['directories'],
'cached_screenshots_count': stats['screenshots'],
'cached_vulns_total': stats['vulns_total'],
'cached_vulns_critical': stats['vulns_critical'],
'cached_vulns_high': stats['vulns_high'],

View File

@@ -41,7 +41,7 @@ class ScanHistorySerializer(serializers.ModelSerializer):
fields = [
'id', 'target', 'target_name', 'engine_ids', 'engine_names',
'worker_name', 'created_at', 'status', 'error_message', 'summary',
'progress', 'current_stage', 'stage_progress'
'progress', 'current_stage', 'stage_progress', 'yaml_configuration'
]
def get_summary(self, obj):
@@ -51,6 +51,7 @@ class ScanHistorySerializer(serializers.ModelSerializer):
'endpoints': obj.cached_endpoints_count or 0,
'ips': obj.cached_ips_count or 0,
'directories': obj.cached_directories_count or 0,
'screenshots': obj.cached_screenshots_count or 0,
}
summary['vulnerabilities'] = {
'total': obj.cached_vulns_total or 0,
@@ -65,7 +66,7 @@ class ScanHistorySerializer(serializers.ModelSerializer):
class QuickScanSerializer(ScanConfigValidationMixin, serializers.Serializer):
"""快速扫描序列化器"""
MAX_BATCH_SIZE = 1000
MAX_BATCH_SIZE = 5000
targets = serializers.ListField(
child=serializers.DictField(),

View File

@@ -17,8 +17,12 @@ from .scan_state_service import ScanStateService
from .scan_control_service import ScanControlService
from .scan_stats_service import ScanStatsService
from .scheduled_scan_service import ScheduledScanService
from .blacklist_service import BlacklistService
from .target_export_service import TargetExportService
from .target_export_service import (
TargetExportService,
create_export_service,
export_urls_with_fallback,
DataSource,
)
__all__ = [
'ScanService', # 主入口(向后兼容)
@@ -27,7 +31,9 @@ __all__ = [
'ScanControlService',
'ScanStatsService',
'ScheduledScanService',
'BlacklistService', # 黑名单过滤服务
'TargetExportService', # 目标导出服务
'create_export_service',
'export_urls_with_fallback',
'DataSource',
]

View File

@@ -1,82 +0,0 @@
"""
黑名单过滤服务
过滤敏感域名(如 .gov、.edu、.mil 等)
当前版本使用默认规则,后续将支持从前端配置加载。
"""
from typing import List, Optional
from django.db.models import QuerySet
import re
import logging
logger = logging.getLogger(__name__)
class BlacklistService:
"""
黑名单过滤服务 - 过滤敏感域名
TODO: 后续版本支持从前端配置加载黑名单规则
- 用户在开始扫描时配置黑名单 URL、域名、IP
- 黑名单规则存储在数据库中,与 Scan 或 Engine 关联
"""
# 默认黑名单正则规则
DEFAULT_PATTERNS = [
r'\.gov$', # .gov 结尾
r'\.gov\.[a-z]{2}$', # .gov.cn, .gov.uk 等
]
def __init__(self, patterns: Optional[List[str]] = None):
"""
初始化黑名单服务
Args:
patterns: 正则表达式列表None 使用默认规则
"""
self.patterns = patterns or self.DEFAULT_PATTERNS
self._compiled_patterns = [re.compile(p) for p in self.patterns]
def filter_queryset(
self,
queryset: QuerySet,
url_field: str = 'url'
) -> QuerySet:
"""
数据库层面过滤 queryset
使用 PostgreSQL 正则表达式排除黑名单 URL
Args:
queryset: 原始 queryset
url_field: URL 字段名
Returns:
QuerySet: 过滤后的 queryset
"""
for pattern in self.patterns:
queryset = queryset.exclude(**{f'{url_field}__regex': pattern})
return queryset
def filter_url(self, url: str) -> bool:
"""
检查单个 URL 是否通过黑名单过滤
Args:
url: 要检查的 URL
Returns:
bool: True 表示通过不在黑名单False 表示被过滤
"""
for pattern in self._compiled_patterns:
if pattern.search(url):
return False
return True
# TODO: 后续版本实现
# @classmethod
# def from_scan(cls, scan_id: int) -> 'BlacklistService':
# """从数据库加载扫描配置的黑名单规则"""
# pass

View File

@@ -2,7 +2,9 @@
目标导出服务
提供统一的目标提取和文件导出功能,支持:
- URL 导出(流式写入 + 默认值回退)
- URL 导出(纯导出,不做隐式回退)
- 默认 URL 生成(独立方法)
- 带回退链的 URL 导出(用例层编排)
- 域名/IP 导出(用于端口扫描)
- 黑名单过滤集成
"""
@@ -10,37 +12,326 @@
import ipaddress
import logging
from pathlib import Path
from typing import Dict, Any, Optional, Iterator
from typing import Dict, Any, Optional, List, Iterator, Tuple, Callable
from django.db.models import QuerySet
from .blacklist_service import BlacklistService
from apps.common.utils import BlacklistFilter
logger = logging.getLogger(__name__)
class DataSource:
"""数据源类型常量"""
ENDPOINT = "endpoint"
WEBSITE = "website"
HOST_PORT = "host_port"
DEFAULT = "default"
def create_export_service(target_id: int) -> 'TargetExportService':
"""
工厂函数:创建带黑名单过滤的导出服务
Args:
target_id: 目标 ID用于加载黑名单规则
Returns:
TargetExportService: 配置好黑名单过滤器的导出服务实例
"""
from apps.common.services import BlacklistService
rules = BlacklistService().get_rules(target_id)
blacklist_filter = BlacklistFilter(rules)
return TargetExportService(blacklist_filter=blacklist_filter)
def _iter_default_urls_from_target(
target_id: int,
blacklist_filter: Optional[BlacklistFilter] = None
) -> Iterator[str]:
"""
内部生成器:从 Target 本身生成默认 URL
根据 Target 类型生成 URL
- DOMAIN: http(s)://domain
- IP: http(s)://ip
- CIDR: 展开为所有 IP 的 http(s)://ip
- URL: 直接使用目标 URL
Args:
target_id: 目标 ID
blacklist_filter: 黑名单过滤器
Yields:
str: URL
"""
from apps.targets.services import TargetService
from apps.targets.models import Target
target_service = TargetService()
target = target_service.get_target(target_id)
if not target:
logger.warning("Target ID %d 不存在,无法生成默认 URL", target_id)
return
target_name = target.name
target_type = target.type
# 根据 Target 类型生成 URL
if target_type == Target.TargetType.DOMAIN:
urls = [f"http://{target_name}", f"https://{target_name}"]
elif target_type == Target.TargetType.IP:
urls = [f"http://{target_name}", f"https://{target_name}"]
elif target_type == Target.TargetType.CIDR:
try:
network = ipaddress.ip_network(target_name, strict=False)
urls = []
for ip in network.hosts():
urls.extend([f"http://{ip}", f"https://{ip}"])
# /32 或 /128 特殊处理
if not urls:
ip = str(network.network_address)
urls = [f"http://{ip}", f"https://{ip}"]
except ValueError as e:
logger.error("CIDR 解析失败: %s - %s", target_name, e)
return
elif target_type == Target.TargetType.URL:
urls = [target_name]
else:
logger.warning("不支持的 Target 类型: %s", target_type)
return
# 过滤并产出
for url in urls:
if blacklist_filter and not blacklist_filter.is_allowed(url):
continue
yield url
def _iter_urls_with_fallback(
target_id: int,
sources: List[str],
blacklist_filter: Optional[BlacklistFilter] = None,
batch_size: int = 1000,
tried_sources: Optional[List[str]] = None
) -> Iterator[Tuple[str, str]]:
"""
内部生成器:流式产出 URL带回退链
按 sources 顺序尝试每个数据源,直到有数据返回。
回退逻辑:
- 数据源有数据且通过过滤 → 产出 URL停止回退
- 数据源有数据但全被过滤 → 不回退,停止(避免意外暴露)
- 数据源为空 → 继续尝试下一个
Args:
target_id: 目标 ID
sources: 数据源优先级列表
blacklist_filter: 黑名单过滤器
batch_size: 批次大小
tried_sources: 可选,用于记录尝试过的数据源(外部传入列表,会被修改)
Yields:
Tuple[str, str]: (url, source) - URL 和来源标识
"""
from apps.asset.models import Endpoint, WebSite
for source in sources:
if tried_sources is not None:
tried_sources.append(source)
has_output = False # 是否有输出(通过过滤的)
has_raw_data = False # 是否有原始数据(过滤前)
if source == DataSource.DEFAULT:
# 默认 URL 生成(从 Target 本身构造,复用共用生成器)
for url in _iter_default_urls_from_target(target_id, blacklist_filter):
has_raw_data = True
has_output = True
yield url, source
# 检查是否有原始数据(需要单独判断,因为生成器可能被过滤后为空)
if not has_raw_data:
# 再次检查 Target 是否存在
from apps.targets.services import TargetService
target = TargetService().get_target(target_id)
has_raw_data = target is not None
if has_raw_data:
if not has_output:
logger.info("%s 有数据但全被黑名单过滤,不回退", source)
return
continue
# 构建对应数据源的 queryset
if source == DataSource.ENDPOINT:
queryset = Endpoint.objects.filter(target_id=target_id).values_list('url', flat=True)
elif source == DataSource.WEBSITE:
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
else:
logger.warning("未知的数据源类型: %s,跳过", source)
continue
for url in queryset.iterator(chunk_size=batch_size):
if url:
has_raw_data = True
if blacklist_filter and not blacklist_filter.is_allowed(url):
continue
has_output = True
yield url, source
# 有原始数据就停止(不管是否被过滤)
if has_raw_data:
if not has_output:
logger.info("%s 有数据但全被黑名单过滤,不回退", source)
return
logger.info("%s 为空,尝试下一个数据源", source)
def get_urls_with_fallback(
target_id: int,
sources: List[str],
batch_size: int = 1000
) -> Dict[str, Any]:
"""
带回退链的 URL 获取用例函数(返回列表)
按 sources 顺序尝试每个数据源,直到有数据返回。
Args:
target_id: 目标 ID
sources: 数据源优先级列表,如 ["website", "endpoint", "default"]
batch_size: 批次大小
Returns:
dict: {
'success': bool,
'urls': List[str],
'total_count': int,
'source': str, # 实际使用的数据源
'tried_sources': List[str], # 尝试过的数据源
}
"""
from apps.common.services import BlacklistService
rules = BlacklistService().get_rules(target_id)
blacklist_filter = BlacklistFilter(rules)
urls = []
actual_source = 'none'
tried_sources = []
for url, source in _iter_urls_with_fallback(target_id, sources, blacklist_filter, batch_size, tried_sources):
urls.append(url)
actual_source = source
if urls:
logger.info("%s 获取 %d 条 URL", actual_source, len(urls))
else:
logger.warning("所有数据源都为空,无法获取 URL")
return {
'success': True,
'urls': urls,
'total_count': len(urls),
'source': actual_source,
'tried_sources': tried_sources,
}
def export_urls_with_fallback(
target_id: int,
output_file: str,
sources: List[str],
batch_size: int = 1000
) -> Dict[str, Any]:
"""
带回退链的 URL 导出用例函数(写入文件)
按 sources 顺序尝试每个数据源,直到有数据返回。
流式写入,内存占用 O(1)。
Args:
target_id: 目标 ID
output_file: 输出文件路径
sources: 数据源优先级列表,如 ["endpoint", "website", "default"]
batch_size: 批次大小
Returns:
dict: {
'success': bool,
'output_file': str,
'total_count': int,
'source': str, # 实际使用的数据源
'tried_sources': List[str], # 尝试过的数据源
}
"""
from apps.common.services import BlacklistService
rules = BlacklistService().get_rules(target_id)
blacklist_filter = BlacklistFilter(rules)
output_path = Path(output_file)
output_path.parent.mkdir(parents=True, exist_ok=True)
total_count = 0
actual_source = 'none'
tried_sources = []
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
for url, source in _iter_urls_with_fallback(target_id, sources, blacklist_filter, batch_size, tried_sources):
f.write(f"{url}\n")
total_count += 1
actual_source = source
if total_count % 10000 == 0:
logger.info("已导出 %d 个 URL...", total_count)
if total_count > 0:
logger.info("%s 导出 %d 条 URL 到 %s", actual_source, total_count, output_file)
else:
logger.warning("所有数据源都为空,无法导出 URL")
return {
'success': True,
'output_file': str(output_path),
'total_count': total_count,
'source': actual_source,
'tried_sources': tried_sources,
}
class TargetExportService:
"""
目标导出服务 - 提供统一的目标提取和文件导出功能
使用方式:
# Task 层决定数据源
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
# 方式 1使用用例函数推荐
from apps.scan.services.target_export_service import export_urls_with_fallback, DataSource
# 使用导出服务
blacklist_service = BlacklistService()
export_service = TargetExportService(blacklist_service=blacklist_service)
result = export_urls_with_fallback(
target_id=1,
output_file='/path/to/output.txt',
sources=[DataSource.ENDPOINT, DataSource.WEBSITE, DataSource.DEFAULT]
)
# 方式 2直接使用 Service纯导出不带回退
export_service = create_export_service(target_id)
result = export_service.export_urls(target_id, output_path, queryset)
"""
def __init__(self, blacklist_service: Optional[BlacklistService] = None):
def __init__(self, blacklist_filter: Optional[BlacklistFilter] = None):
"""
初始化导出服务
Args:
blacklist_service: 黑名单过滤服务None 表示禁用过滤
blacklist_filter: 黑名单过滤None 表示禁用过滤
"""
self.blacklist_service = blacklist_service
self.blacklist_filter = blacklist_filter
def export_urls(
self,
@@ -51,16 +342,14 @@ class TargetExportService:
batch_size: int = 1000
) -> Dict[str, Any]:
"""
统一 URL 导出函数
URL 导出函数 - 只负责将 queryset 数据写入文件
自动判断数据库有无数据:
- 有数据:流式写入数据库数据到文件
- 无数据:调用默认值生成器生成 URL
不做任何隐式回退或默认 URL 生成。
Args:
target_id: 目标 ID
output_path: 输出文件路径
queryset: 数据源 queryset Task 层构建,应为 values_list flat=True
queryset: 数据源 queryset调用方构建,应为 values_list flat=True
url_field: URL 字段名(用于黑名单过滤)
batch_size: 批次大小
@@ -68,7 +357,9 @@ class TargetExportService:
dict: {
'success': bool,
'output_file': str,
'total_count': int
'total_count': int, # 实际写入数量
'queryset_count': int, # 原始数据数量(迭代计数)
'filtered_count': int, # 被黑名单过滤的数量
}
Raises:
@@ -79,19 +370,18 @@ class TargetExportService:
logger.info("开始导出 URL - target_id=%s, output=%s", target_id, output_path)
# 应用黑名单过滤(数据库层面)
if self.blacklist_service:
# 注意queryset 应该是原始 queryset不是 values_list
# 这里假设 Task 层传入的是 values_list需要在 Task 层处理过滤
pass
total_count = 0
filtered_count = 0
queryset_count = 0
try:
with open(output_file, 'w', encoding='utf-8', buffering=8192) as f:
for url in queryset.iterator(chunk_size=batch_size):
queryset_count += 1
if url:
# Python 层面黑名单过滤
if self.blacklist_service and not self.blacklist_service.filter_url(url):
# 黑名单过滤
if self.blacklist_filter and not self.blacklist_filter.is_allowed(url):
filtered_count += 1
continue
f.write(f"{url}\n")
total_count += 1
@@ -102,25 +392,29 @@ class TargetExportService:
logger.error("文件写入失败: %s - %s", output_path, e)
raise
# 默认值回退模式
if total_count == 0:
total_count = self._generate_default_urls(target_id, output_file)
if filtered_count > 0:
logger.info("黑名单过滤: 过滤 %d 个 URL", filtered_count)
logger.info("✓ URL 导出完成 - 数量: %d, 文件: %s", total_count, output_path)
logger.info(
"✓ URL 导出完成 - 写入: %d, 原始: %d, 过滤: %d, 文件: %s",
total_count, queryset_count, filtered_count, output_path
)
return {
'success': True,
'output_file': str(output_file),
'total_count': total_count
'total_count': total_count,
'queryset_count': queryset_count,
'filtered_count': filtered_count,
}
def _generate_default_urls(
def generate_default_urls(
self,
target_id: int,
output_path: Path
) -> int:
output_path: str
) -> Dict[str, Any]:
"""
默认值生成器(内部函数)
默认 URL 生成器
根据 Target 类型生成默认 URL
- DOMAIN: http(s)://domain
@@ -133,91 +427,43 @@ class TargetExportService:
output_path: 输出文件路径
Returns:
int: 写入的 URL 总数
dict: {
'success': bool,
'output_file': str,
'total_count': int,
}
"""
from apps.targets.services import TargetService
from apps.targets.models import Target
output_file = Path(output_path)
output_file.parent.mkdir(parents=True, exist_ok=True)
target_service = TargetService()
target = target_service.get_target(target_id)
if not target:
logger.warning("Target ID %d 不存在,无法生成默认 URL", target_id)
return 0
target_name = target.name
target_type = target.type
logger.info("懒加载模式Target 类型=%s, 名称=%s", target_type, target_name)
logger.info("生成默认 URL - target_id=%d", target_id)
total_urls = 0
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
if target_type == Target.TargetType.DOMAIN:
urls = [f"http://{target_name}", f"https://{target_name}"]
for url in urls:
if self._should_write_url(url):
f.write(f"{url}\n")
total_urls += 1
elif target_type == Target.TargetType.IP:
urls = [f"http://{target_name}", f"https://{target_name}"]
for url in urls:
if self._should_write_url(url):
f.write(f"{url}\n")
total_urls += 1
elif target_type == Target.TargetType.CIDR:
try:
network = ipaddress.ip_network(target_name, strict=False)
for ip in network.hosts():
urls = [f"http://{ip}", f"https://{ip}"]
for url in urls:
if self._should_write_url(url):
f.write(f"{url}\n")
total_urls += 1
if total_urls % 10000 == 0:
logger.info("已生成 %d 个 URL...", total_urls)
# /32 或 /128 特殊处理
if total_urls == 0:
ip = str(network.network_address)
urls = [f"http://{ip}", f"https://{ip}"]
for url in urls:
if self._should_write_url(url):
f.write(f"{url}\n")
total_urls += 1
except ValueError as e:
logger.error("CIDR 解析失败: %s - %s", target_name, e)
raise ValueError(f"无效的 CIDR: {target_name}") from e
elif target_type == Target.TargetType.URL:
if self._should_write_url(target_name):
f.write(f"{target_name}\n")
total_urls = 1
else:
logger.warning("不支持的 Target 类型: %s", target_type)
with open(output_file, 'w', encoding='utf-8', buffering=8192) as f:
for url in _iter_default_urls_from_target(target_id, self.blacklist_filter):
f.write(f"{url}\n")
total_urls += 1
if total_urls % 10000 == 0:
logger.info("已生成 %d 个 URL...", total_urls)
logger.info("懒加载生成默认 URL - 数量: %d", total_urls)
return total_urls
def _should_write_url(self, url: str) -> bool:
"""检查 URL 是否应该写入(通过黑名单过滤)"""
if self.blacklist_service:
return self.blacklist_service.filter_url(url)
return True
logger.info("✓ 默认 URL 生成完成 - 数量: %d", total_urls)
return {
'success': True,
'output_file': str(output_file),
'total_count': total_urls,
}
def export_targets(
def export_hosts(
self,
target_id: int,
output_path: str,
batch_size: int = 1000
) -> Dict[str, Any]:
"""
域名/IP 导出函数(用于端口扫描)
主机列表导出函数(用于端口扫描)
根据 Target 类型选择导出逻辑:
- DOMAIN: 从 Subdomain 表流式导出子域名
@@ -255,7 +501,7 @@ class TargetExportService:
target_name = target.name
logger.info(
"开始导出扫描目标 - Target ID: %d, Name: %s, Type: %s, 输出文件: %s",
"开始导出主机列表 - Target ID: %d, Name: %s, Type: %s, 输出文件: %s",
target_id, target_name, target_type, output_path
)
@@ -277,7 +523,7 @@ class TargetExportService:
raise ValueError(f"不支持的目标类型: {target_type}")
logger.info(
"扫描目标导出完成 - 类型: %s, 总数: %d, 文件: %s",
"主机列表导出完成 - 类型: %s, 总数: %d, 文件: %s",
type_desc, total_count, output_path
)
@@ -295,7 +541,7 @@ class TargetExportService:
output_path: Path,
batch_size: int
) -> int:
"""导出域名类型目标的子域名"""
"""导出域名类型目标的根域名 + 子域名"""
from apps.asset.services.asset.subdomain_service import SubdomainService
subdomain_service = SubdomainService()
@@ -305,23 +551,27 @@ class TargetExportService:
)
total_count = 0
written_domains = set() # 去重(子域名表可能已包含根域名)
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
# 1. 先写入根域名
if self._should_write_target(target_name):
f.write(f"{target_name}\n")
written_domains.add(target_name)
total_count += 1
# 2. 再写入子域名(跳过已写入的根域名)
for domain_name in domain_iterator:
if domain_name in written_domains:
continue
if self._should_write_target(domain_name):
f.write(f"{domain_name}\n")
written_domains.add(domain_name)
total_count += 1
if total_count % 10000 == 0:
logger.info("已导出 %d 个域名...", total_count)
# 默认值模式:如果没有子域名,使用根域名
if total_count == 0:
logger.info("采用默认域名:%s (target_id=%d)", target_name, target_id)
if self._should_write_target(target_name):
with open(output_path, 'w', encoding='utf-8') as f:
f.write(f"{target_name}\n")
total_count = 1
return total_count
def _export_ip(self, target_name: str, output_path: Path) -> int:
@@ -359,6 +609,6 @@ class TargetExportService:
def _should_write_target(self, target: str) -> bool:
"""检查目标是否应该写入(通过黑名单过滤)"""
if self.blacklist_service:
return self.blacklist_service.filter_url(target)
if self.blacklist_filter:
return self.blacklist_filter.is_allowed(target)
return True

View File

@@ -1,14 +1,16 @@
"""
导出站点 URL 到 TXT 文件的 Task
使用 TargetExportService 统一处理导出逻辑和默认值回退
数据源: WebSite.url
使用 export_urls_with_fallback 用例函数处理回退链逻辑
数据源: WebSite.url → Default
"""
import logging
from prefect import task
from apps.asset.models import WebSite
from apps.scan.services import TargetExportService, BlacklistService
from apps.scan.services.target_export_service import (
export_urls_with_fallback,
DataSource,
)
logger = logging.getLogger(__name__)
@@ -22,13 +24,9 @@ def export_sites_task(
"""
导出目标下的所有站点 URL 到 TXT 文件
数据源: WebSite.url
懒加载模式:
- 如果数据库为空,根据 Target 类型生成默认 URL
- DOMAIN: http(s)://domain
- IP: http(s)://ip
- CIDR: 展开为所有 IP 的 URL
数据源优先级(回退链):
1. WebSite 表 - 站点级别 URL
2. 默认生成 - 根据 Target 类型生成 http(s)://target_name
Args:
target_id: 目标 ID
@@ -46,26 +44,21 @@ def export_sites_task(
ValueError: 参数错误
IOError: 文件写入失败
"""
# 构建数据源 querysetTask 层决定数据源)
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
# 使用 TargetExportService 处理导出
blacklist_service = BlacklistService()
export_service = TargetExportService(blacklist_service=blacklist_service)
result = export_service.export_urls(
result = export_urls_with_fallback(
target_id=target_id,
output_path=output_file,
queryset=queryset,
batch_size=batch_size
output_file=output_file,
sources=[DataSource.WEBSITE, DataSource.DEFAULT],
batch_size=batch_size,
)
logger.info(
"站点 URL 导出完成 - source=%s, count=%d",
result['source'], result['total_count']
)
# 保持返回值格式不变(向后兼容)
return {
'success': result['success'],
'output_file': result['output_file'],
'total_count': result['total_count']
'total_count': result['total_count'],
}

View File

@@ -2,15 +2,17 @@
导出 URL 任务
用于指纹识别前导出目标下的 URL 到文件
使用 TargetExportService 统一处理导出逻辑和默认值回退
使用 export_urls_with_fallback 用例函数处理回退链逻辑
"""
import logging
from prefect import task
from apps.asset.models import WebSite
from apps.scan.services import TargetExportService, BlacklistService
from apps.scan.services.target_export_service import (
export_urls_with_fallback,
DataSource,
)
logger = logging.getLogger(__name__)
@@ -19,47 +21,40 @@ logger = logging.getLogger(__name__)
def export_urls_for_fingerprint_task(
target_id: int,
output_file: str,
source: str = 'website',
source: str = 'website', # 保留参数,兼容旧调用(实际值由回退链决定)
batch_size: int = 1000
) -> dict:
"""
导出目标下的 URL 到文件(用于指纹识别)
数据源: WebSite.url
懒加载模式:
- 如果数据库为空,根据 Target 类型生成默认 URL
- DOMAIN: http(s)://domain
- IP: http(s)://ip
- CIDR: 展开为所有 IP 的 URL
- URL: 直接使用目标 URL
数据源优先级(回退链):
1. WebSite 表 - 站点级别 URL
2. 默认生成 - 根据 Target 类型生成 http(s)://target_name
Args:
target_id: 目标 ID
output_file: 输出文件路径
source: 数据源类型(保留参数,兼容旧调用)
source: 数据源类型(保留参数,兼容旧调用,实际值由回退链决定
batch_size: 批量读取大小
Returns:
dict: {'output_file': str, 'total_count': int, 'source': str}
"""
# 构建数据源 querysetTask 层决定数据源)
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
# 使用 TargetExportService 处理导出
blacklist_service = BlacklistService()
export_service = TargetExportService(blacklist_service=blacklist_service)
result = export_service.export_urls(
result = export_urls_with_fallback(
target_id=target_id,
output_path=output_file,
queryset=queryset,
batch_size=batch_size
output_file=output_file,
sources=[DataSource.WEBSITE, DataSource.DEFAULT],
batch_size=batch_size,
)
# 保持返回值格式不变(向后兼容)
logger.info(
"指纹识别 URL 导出完成 - source=%s, count=%d",
result['source'], result['total_count']
)
# 返回实际使用的数据源(不再固定为 "website"
return {
'output_file': result['output_file'],
'total_count': result['total_count'],
'source': source
'source': result['source'],
}

View File

@@ -4,12 +4,12 @@
提供端口扫描流程所需的原子化任务
"""
from .export_scan_targets_task import export_scan_targets_task
from .export_hosts_task import export_hosts_task
from .run_and_stream_save_ports_task import run_and_stream_save_ports_task
from .types import PortScanRecord
__all__ = [
'export_scan_targets_task',
'export_hosts_task',
'run_and_stream_save_ports_task',
'PortScanRecord',
]

View File

@@ -1,7 +1,7 @@
"""
导出扫描目标 TXT 文件的 Task
导出主机列表 TXT 文件的 Task
使用 TargetExportService.export_targets() 统一处理导出逻辑
使用 TargetExportService.export_hosts() 统一处理导出逻辑
根据 Target 类型决定导出内容
- DOMAIN: Subdomain 表导出子域名
@@ -11,19 +11,19 @@
import logging
from prefect import task
from apps.scan.services import TargetExportService, BlacklistService
from apps.scan.services.target_export_service import create_export_service
logger = logging.getLogger(__name__)
@task(name="export_scan_targets")
def export_scan_targets_task(
@task(name="export_hosts")
def export_hosts_task(
target_id: int,
output_file: str,
batch_size: int = 1000
) -> dict:
"""
导出扫描目标 TXT 文件
导出主机列表 TXT 文件
根据 Target 类型自动决定导出内容
- DOMAIN: Subdomain 表导出子域名流式处理支持 10+ 域名
@@ -47,11 +47,10 @@ def export_scan_targets_task(
ValueError: Target 不存在
IOError: 文件写入失败
"""
# 使用 TargetExportService 处理导出
blacklist_service = BlacklistService()
export_service = TargetExportService(blacklist_service=blacklist_service)
# 使用工厂函数创建导出服务
export_service = create_export_service(target_id)
result = export_service.export_targets(
result = export_service.export_hosts(
target_id=target_id,
output_path=output_file,
batch_size=batch_size

View File

@@ -0,0 +1,12 @@
"""
截图任务模块
包含截图相关的所有任务:
- capture_screenshots_task: 批量截图任务
"""
from .capture_screenshots_task import capture_screenshots_task
__all__ = [
'capture_screenshots_task',
]

View File

@@ -0,0 +1,194 @@
"""
批量截图任务
使用 Playwright 批量捕获网站截图,压缩后保存到数据库
"""
import asyncio
import logging
import time
from prefect import task
logger = logging.getLogger(__name__)
def _run_async(coro):
"""
在同步环境中运行异步协程
Args:
coro: 异步协程
Returns:
协程执行结果
"""
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(coro)
def _save_screenshot_with_retry(
screenshot_service,
scan_id: int,
url: str,
webp_data: bytes,
status_code: int | None = None,
max_retries: int = 3
) -> bool:
"""
保存截图到数据库(带重试机制)
Args:
screenshot_service: ScreenshotService 实例
scan_id: 扫描 ID
url: URL
webp_data: WebP 图片数据
status_code: HTTP 响应状态码
max_retries: 最大重试次数
Returns:
是否保存成功
"""
for attempt in range(max_retries):
try:
if screenshot_service.save_screenshot_snapshot(scan_id, url, webp_data, status_code):
return True
# save 返回 False等待后重试
if attempt < max_retries - 1:
wait_time = 2 ** attempt # 指数退避1s, 2s, 4s
logger.warning(
"保存截图失败(第 %d 次尝试),%d秒后重试: %s",
attempt + 1, wait_time, url
)
time.sleep(wait_time)
except Exception as e:
if attempt < max_retries - 1:
wait_time = 2 ** attempt
logger.warning(
"保存截图异常(第 %d 次尝试),%d秒后重试: %s, 错误: %s",
attempt + 1, wait_time, url, str(e)[:100]
)
time.sleep(wait_time)
else:
logger.error("保存截图失败(已重试 %d 次): %s", max_retries, url)
return False
async def _capture_and_save_screenshots(
urls: list[str],
scan_id: int,
concurrency: int
) -> dict:
"""
异步批量截图并保存
Args:
urls: URL 列表
scan_id: 扫描 ID
concurrency: 并发数
Returns:
统计信息字典
"""
from asgiref.sync import sync_to_async
from apps.asset.services.playwright_screenshot_service import PlaywrightScreenshotService
from apps.asset.services.screenshot_service import ScreenshotService
# 初始化服务
playwright_service = PlaywrightScreenshotService(concurrency=concurrency)
screenshot_service = ScreenshotService()
# 包装同步的保存函数为异步
async_save_with_retry = sync_to_async(_save_screenshot_with_retry, thread_sensitive=True)
# 统计
total = len(urls)
successful = 0
failed = 0
logger.info("开始批量截图 - URL数: %d, 并发数: %d", total, concurrency)
# 批量截图
async for url, screenshot_bytes, status_code in playwright_service.capture_batch(urls):
if screenshot_bytes is None:
failed += 1
continue
# 压缩为 WebP
webp_data = screenshot_service.compress_from_bytes(screenshot_bytes)
if webp_data is None:
logger.warning("压缩截图失败: %s", url)
failed += 1
continue
# 保存到数据库(带重试,使用 sync_to_async
if await async_save_with_retry(screenshot_service, scan_id, url, webp_data, status_code):
successful += 1
if successful % 10 == 0:
logger.info("截图进度: %d/%d 成功", successful, total)
else:
failed += 1
return {
'total': total,
'successful': successful,
'failed': failed
}
@task(name='capture_screenshots', retries=0)
def capture_screenshots_task(
urls: list[str],
scan_id: int,
target_id: int,
config: dict
) -> dict:
"""
批量截图任务
Args:
urls: URL 列表
scan_id: 扫描 ID
target_id: 目标 ID用于日志
config: 截图配置
- concurrency: 并发数(默认 5
Returns:
dict: {
'total': int, # 总 URL 数
'successful': int, # 成功截图数
'failed': int # 失败数
}
"""
if not urls:
logger.info("URL 列表为空,跳过截图任务")
return {'total': 0, 'successful': 0, 'failed': 0}
concurrency = config.get('concurrency', 5)
logger.info(
"开始截图任务 - scan_id=%d, target_id=%d, URL数=%d, 并发=%d",
scan_id, target_id, len(urls), concurrency
)
try:
result = _run_async(_capture_and_save_screenshots(
urls=urls,
scan_id=scan_id,
concurrency=concurrency
))
logger.info(
"✓ 截图任务完成 - 总数: %d, 成功: %d, 失败: %d",
result['total'], result['successful'], result['failed']
)
return result
except Exception as e:
logger.error("截图任务失败: %s", e, exc_info=True)
raise RuntimeError(f"截图任务失败: {e}") from e

View File

@@ -2,7 +2,7 @@
导出站点URL到文件的Task
直接使用 HostPortMapping 表查询 host+port 组合拼接成URL格式写入文件
使用 TargetExportService 处理默认值回退逻辑
使用 TargetExportService.generate_default_urls() 处理默认值回退逻辑
特殊逻辑:
- 80 端口:只生成 HTTP URL省略端口号
@@ -14,7 +14,9 @@ from pathlib import Path
from prefect import task
from apps.asset.services import HostPortMappingService
from apps.scan.services import TargetExportService, BlacklistService
from apps.scan.services.target_export_service import create_export_service
from apps.common.services import BlacklistService
from apps.common.utils import BlacklistFilter
logger = logging.getLogger(__name__)
@@ -44,18 +46,15 @@ def export_site_urls_task(
"""
导出目标下的所有站点URL到文件基于 HostPortMapping 表)
数据源: HostPortMapping (host + port)
数据源: HostPortMapping (host + port) → Default
特殊逻辑:
- 80 端口:只生成 HTTP URL省略端口号
- 443 端口:只生成 HTTPS URL省略端口号
- 其他端口:生成 HTTP 和 HTTPS 两个URL带端口号
懒加载模式
- 如果数据库为空,根据 Target 类型生成默认 URL
- DOMAIN: http(s)://domain
- IP: http(s)://ip
- CIDR: 展开为所有 IP 的 URL
回退逻辑
- 如果 HostPortMapping 为空,使用 generate_default_urls() 生成默认 URL
Args:
target_id: 目标ID
@@ -67,7 +66,8 @@ def export_site_urls_task(
'success': bool,
'output_file': str,
'total_urls': int,
'association_count': int # 主机端口关联数量
'association_count': int, # 主机端口关联数量
'source': str, # 数据来源: "host_port" | "default"
}
Raises:
@@ -80,8 +80,8 @@ def export_site_urls_task(
output_path = Path(output_file)
output_path.parent.mkdir(parents=True, exist_ok=True)
# 初始化黑名单服务
blacklist_service = BlacklistService()
# 获取规则并创建过滤器
blacklist_filter = BlacklistFilter(BlacklistService().get_rules(target_id))
# 直接查询 HostPortMapping 表,按 host 排序
service = HostPortMappingService()
@@ -92,6 +92,7 @@ def export_site_urls_task(
total_urls = 0
association_count = 0
filtered_count = 0
# 流式写入文件(特殊端口逻辑)
with open(output_path, 'w', encoding='utf-8', buffering=8192) as f:
@@ -100,28 +101,53 @@ def export_site_urls_task(
host = assoc['host']
port = assoc['port']
# 先校验 host通过了再生成 URL
if not blacklist_filter.is_allowed(host):
filtered_count += 1
continue
# 根据端口号生成URL
for url in _generate_urls_from_port(host, port):
if blacklist_service.filter_url(url):
f.write(f"{url}\n")
total_urls += 1
f.write(f"{url}\n")
total_urls += 1
if association_count % 1000 == 0:
logger.info("已处理 %d 条关联,生成 %d 个URL...", association_count, total_urls)
if filtered_count > 0:
logger.info("黑名单过滤: 过滤 %d 条关联", filtered_count)
logger.info(
"✓ 站点URL导出完成 - 关联数: %d, 总URL数: %d, 文件: %s",
association_count, total_urls, str(output_path)
)
# 默认值回退模式:使用 TargetExportService
# 判断数据来源
source = "host_port"
# 数据存在但全被过滤,不回退
if association_count > 0 and total_urls == 0:
logger.info("HostPortMapping 有 %d 条数据,但全被黑名单过滤,不回退", association_count)
return {
'success': True,
'output_file': str(output_path),
'total_urls': 0,
'association_count': association_count,
'source': source,
}
# 数据源为空,回退到默认 URL 生成
if total_urls == 0:
export_service = TargetExportService(blacklist_service=blacklist_service)
total_urls = export_service._generate_default_urls(target_id, output_path)
logger.info("HostPortMapping 为空,使用默认 URL 生成")
export_service = create_export_service(target_id)
result = export_service.generate_default_urls(target_id, str(output_path))
total_urls = result['total_count']
source = "default"
return {
'success': True,
'output_file': str(output_path),
'total_urls': total_urls,
'association_count': association_count
'association_count': association_count,
'source': source,
}

View File

@@ -341,11 +341,12 @@ def _save_batch(
)
snapshot_items.append(snapshot_dto)
except Exception as e:
logger.error("处理记录失败: %s,错误: %s", record.url, e)
continue
# ========== Step 3: 保存快照并同步到资产表(通过快照 Service==========
# ========== Step 2: 保存快照并同步到资产表(通过快照 Service==========
if snapshot_items:
services.snapshot.save_and_sync(snapshot_items)

View File

@@ -111,6 +111,7 @@ def save_domains_task(
continue
# 只有通过验证的域名才添加到批次和计数
# 注意:不在此处过滤黑名单,最大化资产发现
batch.append(domain)
total_domains += 1

View File

@@ -1,16 +1,17 @@
"""
导出站点 URL 列表任务
使用 TargetExportService 统一处理导出逻辑和默认值回退
数据源: WebSite.url用于 katana 等爬虫工具)
使用 export_urls_with_fallback 用例函数处理回退链逻辑
数据源: WebSite.url → Default(用于 katana 等爬虫工具)
"""
import logging
from prefect import task
from typing import Optional
from apps.asset.models import WebSite
from apps.scan.services import TargetExportService, BlacklistService
from apps.scan.services.target_export_service import (
export_urls_with_fallback,
DataSource,
)
logger = logging.getLogger(__name__)
@@ -29,13 +30,9 @@ def export_sites_task(
"""
导出站点 URL 列表到文件(用于 katana 等爬虫工具)
数据源: WebSite.url
懒加载模式:
- 如果数据库为空,根据 Target 类型生成默认 URL
- DOMAIN: http(s)://domain
- IP: http(s)://ip
- CIDR: 展开为所有 IP 的 URL
数据源优先级(回退链):
1. WebSite 表 - 站点级别 URL
2. 默认生成 - 根据 Target 类型生成 http(s)://target_name
Args:
output_file: 输出文件路径
@@ -53,18 +50,16 @@ def export_sites_task(
ValueError: 参数错误
RuntimeError: 执行失败
"""
# 构建数据源 querysetTask 层决定数据源)
queryset = WebSite.objects.filter(target_id=target_id).values_list('url', flat=True)
# 使用 TargetExportService 处理导出
blacklist_service = BlacklistService()
export_service = TargetExportService(blacklist_service=blacklist_service)
result = export_service.export_urls(
result = export_urls_with_fallback(
target_id=target_id,
output_path=output_file,
queryset=queryset,
batch_size=batch_size
output_file=output_file,
sources=[DataSource.WEBSITE, DataSource.DEFAULT],
batch_size=batch_size,
)
logger.info(
"站点 URL 导出完成 - source=%s, count=%d",
result['source'], result['total_count']
)
# 保持返回值格式不变(向后兼容)

View File

@@ -1,16 +1,22 @@
"""导出 Endpoint URL 到文件的 Task
使用 TargetExportService 统一处理导出逻辑和默认值回退
数据源: Endpoint.url
使用 export_urls_with_fallback 用例函数处理回退链逻辑
数据源优先级(回退链):
1. Endpoint.url - 最精细的 URL含路径、参数等
2. WebSite.url - 站点级别 URL
3. 默认生成 - 根据 Target 类型生成 http(s)://target_name
"""
import logging
from typing import Dict, Optional
from typing import Dict
from prefect import task
from apps.asset.models import Endpoint
from apps.scan.services import TargetExportService, BlacklistService
from apps.scan.services.target_export_service import (
export_urls_with_fallback,
DataSource,
)
logger = logging.getLogger(__name__)
@@ -23,13 +29,10 @@ def export_endpoints_task(
) -> Dict[str, object]:
"""导出目标下的所有 Endpoint URL 到文本文件。
数据源: Endpoint.url
懒加载模式:
- 如果数据库为空,根据 Target 类型生成默认 URL
- DOMAIN: http(s)://domain
- IP: http(s)://ip
- CIDR: 展开为所有 IP 的 URL
数据源优先级(回退链):
1. Endpoint 表 - 最精细的 URL含路径、参数等
2. WebSite 表 - 站点级别 URL
3. 默认生成 - 根据 Target 类型生成 http(s)://target_name
Args:
target_id: 目标 ID
@@ -41,25 +44,24 @@ def export_endpoints_task(
"success": bool,
"output_file": str,
"total_count": int,
"source": str, # 数据来源: "endpoint" | "website" | "default" | "none"
}
"""
# 构建数据源 querysetTask 层决定数据源)
queryset = Endpoint.objects.filter(target_id=target_id).values_list('url', flat=True)
# 使用 TargetExportService 处理导出
blacklist_service = BlacklistService()
export_service = TargetExportService(blacklist_service=blacklist_service)
result = export_service.export_urls(
result = export_urls_with_fallback(
target_id=target_id,
output_path=output_file,
queryset=queryset,
batch_size=batch_size
output_file=output_file,
sources=[DataSource.ENDPOINT, DataSource.WEBSITE, DataSource.DEFAULT],
batch_size=batch_size,
)
logger.info(
"URL 导出完成 - source=%s, count=%d, tried=%s",
result['source'], result['total_count'], result['tried_sources']
)
# 保持返回值格式不变(向后兼容)
return {
"success": result['success'],
"output_file": result['output_file'],
"total_count": result['total_count'],
"source": result['source'],
}

View File

@@ -4,7 +4,8 @@ from .views import ScanViewSet, ScheduledScanViewSet, ScanLogListView, Subfinder
from .notifications.views import notification_callback
from apps.asset.views import (
SubdomainSnapshotViewSet, WebsiteSnapshotViewSet, DirectorySnapshotViewSet,
EndpointSnapshotViewSet, HostPortMappingSnapshotViewSet, VulnerabilitySnapshotViewSet
EndpointSnapshotViewSet, HostPortMappingSnapshotViewSet, VulnerabilitySnapshotViewSet,
ScreenshotSnapshotViewSet
)
# 创建路由器
@@ -26,6 +27,8 @@ scan_endpoints_export = EndpointSnapshotViewSet.as_view({'get': 'export'})
scan_ip_addresses_list = HostPortMappingSnapshotViewSet.as_view({'get': 'list'})
scan_ip_addresses_export = HostPortMappingSnapshotViewSet.as_view({'get': 'export'})
scan_vulnerabilities_list = VulnerabilitySnapshotViewSet.as_view({'get': 'list'})
scan_screenshots_list = ScreenshotSnapshotViewSet.as_view({'get': 'list'})
scan_screenshots_image = ScreenshotSnapshotViewSet.as_view({'get': 'image'})
urlpatterns = [
path('', include(router.urls)),
@@ -47,5 +50,7 @@ urlpatterns = [
path('scans/<int:scan_pk>/ip-addresses/', scan_ip_addresses_list, name='scan-ip-addresses-list'),
path('scans/<int:scan_pk>/ip-addresses/export/', scan_ip_addresses_export, name='scan-ip-addresses-export'),
path('scans/<int:scan_pk>/vulnerabilities/', scan_vulnerabilities_list, name='scan-vulnerabilities-list'),
path('scans/<int:scan_pk>/screenshots/', scan_screenshots_list, name='scan-screenshots-list'),
path('scans/<int:scan_pk>/screenshots/<int:pk>/image/', scan_screenshots_image, name='scan-screenshots-image'),
]

View File

@@ -3,6 +3,7 @@ from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.exceptions import NotFound, APIException
from rest_framework.filters import SearchFilter
from django_filters.rest_framework import DjangoFilterBackend
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.utils import DatabaseError, IntegrityError, OperationalError
import logging
@@ -33,7 +34,8 @@ class ScanViewSet(viewsets.ModelViewSet):
"""扫描任务视图集"""
serializer_class = ScanSerializer
pagination_class = BasePagination
filter_backends = [SearchFilter]
filter_backends = [DjangoFilterBackend, SearchFilter]
filterset_fields = ['target'] # 支持 ?target=123 过滤
search_fields = ['target__name'] # 按目标名称搜索
def get_queryset(self):

View File

@@ -37,6 +37,11 @@ class ScheduledScanViewSet(viewsets.ModelViewSet):
- PUT /scheduled-scans/{id}/ 更新定时扫描
- DELETE /scheduled-scans/{id}/ 删除定时扫描
- POST /scheduled-scans/{id}/toggle/ 切换启用状态
查询参数:
- target_id: 按目标 ID 过滤
- organization_id: 按组织 ID 过滤
- search: 按名称搜索
"""
queryset = ScheduledScan.objects.all().order_by('-created_at')
@@ -49,6 +54,19 @@ class ScheduledScanViewSet(viewsets.ModelViewSet):
super().__init__(*args, **kwargs)
self.service = ScheduledScanService()
def get_queryset(self):
"""支持按 target_id 和 organization_id 过滤"""
queryset = super().get_queryset()
target_id = self.request.query_params.get('target_id')
organization_id = self.request.query_params.get('organization_id')
if target_id:
queryset = queryset.filter(target_id=target_id)
if organization_id:
queryset = queryset.filter(organization_id=organization_id)
return queryset
def get_serializer_class(self):
"""根据 action 返回不同的序列化器"""
if self.action == 'create':

View File

@@ -1,4 +1,4 @@
# Generated by Django 5.2.7 on 2026-01-02 04:45
# Generated by Django 5.2.7 on 2026-01-06 00:55
from django.db import migrations, models

View File

@@ -119,6 +119,7 @@ class TargetDetailSerializer(serializers.ModelSerializer):
- endpoints: 端点数量
- ips: IP地址数量
- directories: 目录数量
- screenshots: 截图数量
- vulnerabilities: 漏洞统计(暂时返回 0待后续实现
性能说明:
@@ -134,6 +135,7 @@ class TargetDetailSerializer(serializers.ModelSerializer):
endpoints_count = obj.endpoints.count()
ips_count = obj.host_port_mappings.values('ip').distinct().count()
directories_count = obj.directories.count()
screenshots_count = obj.screenshots.count()
# 漏洞统计:按目标维度实时统计 Vulnerability 资产表
vuln_qs = obj.vulnerabilities.all()
@@ -159,6 +161,7 @@ class TargetDetailSerializer(serializers.ModelSerializer):
'endpoints': endpoints_count,
'ips': ips_count,
'directories': directories_count,
'screenshots': screenshots_count,
'vulnerabilities': {
'total': total,
**severity_stats,
@@ -182,12 +185,12 @@ class BatchCreateTargetSerializer(serializers.Serializer):
批量创建目标的序列化器
安全限制:
- 最多支持 1000 个目标的批量创建
- 最多支持 5000 个目标的批量创建
- 防止恶意用户提交大量数据导致服务器过载
"""
# 批量创建的最大数量限制
MAX_BATCH_SIZE = 1000
MAX_BATCH_SIZE = 5000
# 目标列表
targets = serializers.ListField(

View File

@@ -99,31 +99,6 @@ class TargetService:
# ==================== 创建操作 ====================
def create_or_get_target(
self,
name: str,
target_type: str
) -> Tuple[Target, bool]:
"""
创建或获取目标
Args:
name: 目标名称
target_type: 目标类型
Returns:
(Target对象, 是否新创建)
"""
logger.debug("创建或获取目标 - Name: %s, Type: %s", name, target_type)
target, created = self.repo.get_or_create(name, target_type)
if created:
logger.info("创建新目标 - ID: %s, Name: %s", target.id, name)
else:
logger.debug("目标已存在 - ID: %s, Name: %s", target.id, name)
return target, created
def batch_create_targets(
self,
targets_data: List[Dict[str, Any]],

View File

@@ -3,7 +3,8 @@ from rest_framework.routers import DefaultRouter
from .views import OrganizationViewSet, TargetViewSet
from apps.asset.views import (
SubdomainViewSet, WebSiteViewSet, DirectoryViewSet,
EndpointViewSet, HostPortMappingViewSet, VulnerabilityViewSet
EndpointViewSet, HostPortMappingViewSet, VulnerabilityViewSet,
ScreenshotViewSet
)
# 创建路由器
@@ -29,6 +30,8 @@ target_endpoints_bulk_create = EndpointViewSet.as_view({'post': 'bulk_create'})
target_ip_addresses_list = HostPortMappingViewSet.as_view({'get': 'list'})
target_ip_addresses_export = HostPortMappingViewSet.as_view({'get': 'export'})
target_vulnerabilities_list = VulnerabilityViewSet.as_view({'get': 'list'})
target_screenshots_list = ScreenshotViewSet.as_view({'get': 'list'})
target_screenshots_bulk_delete = ScreenshotViewSet.as_view({'post': 'bulk_delete'})
urlpatterns = [
path('', include(router.urls)),
@@ -48,4 +51,6 @@ urlpatterns = [
path('targets/<int:target_pk>/ip-addresses/', target_ip_addresses_list, name='target-ip-addresses-list'),
path('targets/<int:target_pk>/ip-addresses/export/', target_ip_addresses_export, name='target-ip-addresses-export'),
path('targets/<int:target_pk>/vulnerabilities/', target_vulnerabilities_list, name='target-vulnerabilities-list'),
path('targets/<int:target_pk>/screenshots/', target_screenshots_list, name='target-screenshots-list'),
path('targets/<int:target_pk>/screenshots/bulk-delete/', target_screenshots_bulk_delete, name='target-screenshots-bulk-delete'),
]

View File

@@ -11,6 +11,8 @@ from .services.target_service import TargetService
from .services.organization_service import OrganizationService
from apps.common.pagination import BasePagination
from apps.common.response_helpers import success_response
from apps.common.models import BlacklistRule
from apps.common.serializers import TargetBlacklistRuleSerializer
logger = logging.getLogger(__name__)
@@ -405,3 +407,48 @@ class TargetViewSet(viewsets.ModelViewSet):
# GET /api/targets/{id}/ip-addresses/ -> HostPortMappingViewSet
# GET /api/targets/{id}/ip-addresses/export/ -> HostPortMappingViewSet.export
# GET /api/targets/{id}/vulnerabilities/ -> VulnerabilityViewSet
# ==================== 黑名单管理 ====================
@action(detail=True, methods=['get', 'put'], url_path='blacklist')
def blacklist(self, request, pk=None):
"""
Target 黑名单规则管理
GET /api/targets/{id}/blacklist/ - 获取 Target 黑名单列表
PUT /api/targets/{id}/blacklist/ - 全量替换规则(文本框保存场景)
设计说明:
- 使用 PUT 全量替换模式,适合"文本框每行一个规则"的前端场景
- 用户编辑文本框 -> 点击保存 -> 后端全量替换
架构MVS 模式
- View: 参数验证、响应格式化
- Service: 业务逻辑BlacklistService
- Model: 数据持久化BlacklistRule
"""
from apps.common.services import BlacklistService
target = self.get_object()
blacklist_service = BlacklistService()
if request.method == 'GET':
# 获取 Target 的黑名单规则
rules = blacklist_service.get_target_rules(target.id)
patterns = list(rules.values_list('pattern', flat=True))
return success_response(data={'patterns': patterns})
elif request.method == 'PUT':
# 全量替换
patterns = request.data.get('patterns', [])
if not isinstance(patterns, list):
return Response(
{'error': {'code': 'VALIDATION_ERROR', 'message': 'patterns 必须是数组'}},
status=status.HTTP_400_BAD_REQUEST
)
# 调用 Service 层全量替换
result = blacklist_service.replace_target_rules(target, patterns)
return success_response(data=result)

View File

@@ -51,6 +51,7 @@ INSTALLED_APPS = [
'django.contrib.staticfiles',
# 第三方应用
'rest_framework',
'django_filters', # DRF 过滤器支持
'drf_yasg',
'corsheaders',
'channels', # WebSocket 支持

View File

@@ -11,6 +11,9 @@ setuptools==75.6.0
# CORS 支持
django-cors-headers==4.3.1
# 过滤器支持
django-filter==24.3
# 环境变量管理
python-dotenv==1.0.1
@@ -38,6 +41,8 @@ pytest-django==4.7.0
# 工具库
python-dateutil==2.9.0
Pillow>=10.0.0 # 图像处理(截图服务)
playwright>=1.40.0 # 浏览器自动化(截图服务)
pytz==2024.1
validators==0.22.0
PyYAML==6.0.1

View File

@@ -639,19 +639,19 @@ class TestDataGenerator:
target_id, engine_ids, engine_names, yaml_configuration, status, worker_id, progress, current_stage,
results_dir, error_message, container_ids, stage_progress,
cached_subdomains_count, cached_websites_count, cached_endpoints_count,
cached_ips_count, cached_directories_count, cached_vulns_total,
cached_ips_count, cached_directories_count, cached_screenshots_count, cached_vulns_total,
cached_vulns_critical, cached_vulns_high, cached_vulns_medium, cached_vulns_low,
created_at, stopped_at, deleted_at
) VALUES (
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
NOW() - INTERVAL '%s days', %s, NULL
)
RETURNING id
""", (
target_id, selected_engine_ids, json.dumps(selected_engine_names), '', status, worker_id, progress, stage,
f'/app/results/scan_{target_id}_{random.randint(1000, 9999)}', error_msg, '{}', '{}',
subdomains, websites, endpoints, ips, directories, vulns_total,
subdomains, websites, endpoints, ips, directories, 0, vulns_total,
vulns_critical, vulns_high, vulns_medium, vulns_low,
days_ago,
datetime.now() - timedelta(days=days_ago, hours=random.randint(0, 23)) if status in ['completed', 'failed', 'cancelled'] else None

View File

@@ -2,7 +2,7 @@
# ============================================
# XingRin 远程节点安装脚本
# 用途:安装 Docker 环境 + 预拉取镜像
# 支持Ubuntu / Debian
# 支持Ubuntu / Debian / Kali
#
# 架构说明:
# 1. 安装 Docker 环境
@@ -101,8 +101,8 @@ detect_os() {
exit 1
fi
if [[ "$OS" != "ubuntu" && "$OS" != "debian" ]]; then
log_error "仅支持 Ubuntu/Debian 系统"
if [[ "$OS" != "ubuntu" && "$OS" != "debian" && "$OS" != "kali" ]]; then
log_error "仅支持 Ubuntu/Debian/Kali 系统"
exit 1
fi
}

View File

@@ -17,7 +17,8 @@
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
DOCKER_DIR="$(dirname "$SCRIPT_DIR")"
cd "$DOCKER_DIR"
# 颜色输出
GREEN='\033[0;32m'
@@ -33,7 +34,9 @@ log_step() { echo -e " ${CYAN}>>${NC} $1"; }
# 检查服务是否运行
check_server() {
if ! docker compose ps --status running 2>/dev/null | grep -q "server"; then
# 使用 docker compose ps 的 --format 选项获取服务状态
# 这种方式不依赖容器名称格式,只检查服务名
if ! docker compose ps --format '{{.Service}} {{.State}}' 2>/dev/null | grep -E "^server\s+running" > /dev/null; then
echo "Server 容器未运行,跳过数据初始化"
return 1
fi
@@ -63,11 +66,7 @@ wait_for_server() {
run_migrations() {
log_step "执行数据库迁移..."
# 开发环境:先 makemigrations
if [ "$DEV_MODE" = "true" ]; then
docker compose exec -T server python backend/manage.py makemigrations --noinput 2>/dev/null || true
fi
# 迁移文件应手动生成并提交到仓库,这里只执行 migrate
docker compose exec -T server python backend/manage.py migrate --noinput
log_info "数据库迁移完成"
}

View File

@@ -3,26 +3,21 @@ set -e
echo "[START] 启动 XingRin Server..."
# 1. 生成和迁移数据库
echo " [1/3] 生成数据库迁移文件..."
# 1. 执行数据库迁移(迁移文件应提交到仓库,这里只执行 migrate
echo " [1/3] 执行数据库迁移..."
cd /app/backend
python manage.py makemigrations
echo " ✓ 迁移文件生成完成"
echo " [1.1/3] 执行数据库迁移..."
python manage.py migrate --noinput
echo " ✓ 数据库迁移完成"
echo " [1.2/3] 初始化默认扫描引擎..."
echo " [1.1/3] 初始化默认扫描引擎..."
python manage.py init_default_engine
echo " ✓ 默认扫描引擎已就绪"
echo " [1.3/3] 初始化默认目录字典..."
echo " [1.2/3] 初始化默认目录字典..."
python manage.py init_wordlists
echo " ✓ 默认目录字典已就绪"
echo " [1.4/3] 初始化默认指纹库..."
echo " [1.3/3] 初始化默认指纹库..."
python manage.py init_fingerprints
echo " ✓ 默认指纹库已就绪"

View File

@@ -155,7 +155,11 @@ fi
echo -e "${GREEN}[OK]${NC} 服务已启动"
# 数据初始化
./scripts/init-data.sh
if [ "$DEV_MODE" = true ]; then
./scripts/init-data.sh --dev
else
./scripts/init-data.sh
fi
# 静默模式下不显示结果(由调用方显示)
if [ "$QUIET_MODE" = true ]; then

View File

@@ -45,7 +45,9 @@ ENV DEBIAN_FRONTEND=noninteractive
WORKDIR /app
# 1. 安装基础工具和 Python
RUN apt-get update && apt-get install -y \
# 注意ARM64 使用 ports.ubuntu.com可能存在镜像同步延迟需要重试机制
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3 \
python3-pip \
python3-venv \
@@ -60,8 +62,32 @@ RUN apt-get update && apt-get install -y \
masscan \
libpcap-dev \
ca-certificates \
fonts-liberation \
libnss3 \
libxss1 \
libasound2t64 \
|| (rm -rf /var/lib/apt/lists/* && apt-get update && apt-get install -y --no-install-recommends \
python3 python3-pip python3-venv pipx git curl wget unzip jq tmux nmap masscan libpcap-dev \
ca-certificates fonts-liberation libnss3 libxss1 libasound2t64) \
&& rm -rf /var/lib/apt/lists/*
# 安装 Chromium通过 Playwright 安装,支持 ARM64 和 AMD64
# Ubuntu 24.04 的 chromium-browser 是 snap 过渡包Docker 中不可用
RUN pip install playwright --break-system-packages && \
playwright install chromium && \
apt-get update && \
playwright install-deps chromium && \
rm -rf /var/lib/apt/lists/*
# 设置 Chrome 路径供 httpx 等工具使用Playwright 安装位置)
ENV CHROME_PATH=/root/.cache/ms-playwright/chromium-*/chrome-linux/chrome
# 创建软链接确保 httpx 的 -system-chrome 能找到浏览器
RUN CHROME_BIN=$(find /root/.cache/ms-playwright -name chrome -type f 2>/dev/null | head -1) && \
ln -sf "$CHROME_BIN" /usr/bin/chromium-browser && \
ln -sf "$CHROME_BIN" /usr/bin/chromium && \
ln -sf "$CHROME_BIN" /usr/bin/chrome && \
ln -sf "$CHROME_BIN" /usr/bin/google-chrome-stable
# 建立 python 软链接
RUN ln -s /usr/bin/python3 /usr/bin/python

View File

@@ -54,10 +54,10 @@ flowchart TB
TARGET --> SUBLIST3R
TARGET --> ASSETFINDER
subgraph STAGE2["Stage 2: Analysis Parallel"]
subgraph STAGE2["Stage 2: URL Collection Parallel"]
direction TB
subgraph URL["URL Collection"]
subgraph URL["URL Fetch"]
direction TB
WAYMORE[waymore<br/>Historical URLs]
KATANA[katana<br/>Crawler]
@@ -78,7 +78,15 @@ flowchart TB
XINGFINGER --> KATANA
XINGFINGER --> FFUF
subgraph STAGE3["Stage 3: Vulnerability Sequential"]
subgraph STAGE3["Stage 3: Screenshot Sequential"]
direction TB
SCREENSHOT[Playwright<br/>Page Screenshot]
end
HTTPX2 --> SCREENSHOT
FFUF --> SCREENSHOT
subgraph STAGE4["Stage 4: Vulnerability Sequential"]
direction TB
subgraph VULN["Vulnerability Scan"]
@@ -88,12 +96,11 @@ flowchart TB
end
end
HTTPX2 --> DALFOX
HTTPX2 --> NUCLEI
SCREENSHOT --> DALFOX
SCREENSHOT --> NUCLEI
DALFOX --> FINISH
NUCLEI --> FINISH
FFUF --> FINISH
FINISH[Scan Complete]
@@ -109,9 +116,14 @@ flowchart TB
```python
# backend/apps/scan/configs/command_templates.py
# Stage 1: 资产发现 - 子域名 → 端口 → 站点探测 → 指纹识别
# Stage 2: URL 收集 - URL 获取 + 目录扫描(并行)
# Stage 3: 截图 - 在 URL 收集完成后执行,捕获更多发现的页面
# Stage 4: 漏洞扫描 - 最后执行
EXECUTION_STAGES = [
{'mode': 'sequential', 'flows': ['subdomain_discovery', 'port_scan', 'site_scan', 'fingerprint_detect']},
{'mode': 'parallel', 'flows': ['url_fetch', 'directory_scan']},
{'mode': 'sequential', 'flows': ['screenshot']},
{'mode': 'sequential', 'flows': ['vuln_scan']},
]
```
@@ -126,4 +138,5 @@ EXECUTION_STAGES = [
| fingerprint_detect | xingfinger | WebSite.tech更新 |
| url_fetch | waymore, katana, uro, httpx | Endpoint |
| directory_scan | ffuf | Directory |
| screenshot | Playwright | Screenshot |
| vuln_scan | dalfox, nuclei | Vulnerability |

BIN
docs/wx_pay.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

BIN
docs/zfb_pay.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 144 KiB

View File

@@ -34,6 +34,7 @@ const FEATURE_LIST = [
{ key: "site_scan" },
{ key: "fingerprint_detect" },
{ key: "directory_scan" },
{ key: "screenshot" },
{ key: "url_fetch" },
{ key: "vuln_scan" },
] as const
@@ -48,6 +49,7 @@ function parseEngineFeatures(engine: ScanEngine): Record<FeatureKey, boolean> {
site_scan: false,
fingerprint_detect: false,
directory_scan: false,
screenshot: false,
url_fetch: false,
vuln_scan: false,
}
@@ -64,6 +66,7 @@ function parseEngineFeatures(engine: ScanEngine): Record<FeatureKey, boolean> {
site_scan: !!config.site_scan,
fingerprint_detect: !!config.fingerprint_detect,
directory_scan: !!config.directory_scan,
screenshot: !!config.screenshot,
url_fetch: !!config.url_fetch,
vuln_scan: !!config.vuln_scan,
}

View File

@@ -3,9 +3,10 @@
import React from "react"
import { usePathname, useParams } from "next/navigation"
import Link from "next/link"
import { Target } from "lucide-react"
import { Target, LayoutDashboard, Package, Image, ShieldAlert } from "lucide-react"
import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs"
import { Badge } from "@/components/ui/badge"
import { Skeleton } from "@/components/ui/skeleton"
import { useScan } from "@/hooks/use-scans"
import { useTranslations } from "next-intl"
@@ -19,104 +20,136 @@ export default function ScanHistoryLayout({
const { data: scanData, isLoading } = useScan(parseInt(id))
const t = useTranslations("scan.history")
const getActiveTab = () => {
if (pathname.includes("/subdomain")) return "subdomain"
if (pathname.includes("/endpoints")) return "endpoints"
if (pathname.includes("/websites")) return "websites"
if (pathname.includes("/directories")) return "directories"
// Get primary navigation active tab
const getPrimaryTab = () => {
if (pathname.includes("/overview")) return "overview"
if (pathname.includes("/screenshots")) return "screenshots"
if (pathname.includes("/vulnerabilities")) return "vulnerabilities"
if (pathname.includes("/ip-addresses")) return "ip-addresses"
return ""
// All asset pages fall under "assets"
if (
pathname.includes("/websites") ||
pathname.includes("/subdomain") ||
pathname.includes("/ip-addresses") ||
pathname.includes("/endpoints") ||
pathname.includes("/directories")
) {
return "assets"
}
return "overview"
}
// Get secondary navigation active tab (for assets)
const getSecondaryTab = () => {
if (pathname.includes("/websites")) return "websites"
if (pathname.includes("/subdomain")) return "subdomain"
if (pathname.includes("/ip-addresses")) return "ip-addresses"
if (pathname.includes("/endpoints")) return "endpoints"
if (pathname.includes("/directories")) return "directories"
return "websites"
}
// Check if we should show secondary navigation
const showSecondaryNav = getPrimaryTab() === "assets"
const basePath = `/scan/history/${id}`
const tabPaths = {
subdomain: `${basePath}/subdomain/`,
endpoints: `${basePath}/endpoints/`,
websites: `${basePath}/websites/`,
directories: `${basePath}/directories/`,
const primaryPaths = {
overview: `${basePath}/overview/`,
assets: `${basePath}/websites/`, // Default to websites when clicking assets
screenshots: `${basePath}/screenshots/`,
vulnerabilities: `${basePath}/vulnerabilities/`,
}
const secondaryPaths = {
websites: `${basePath}/websites/`,
subdomain: `${basePath}/subdomain/`,
"ip-addresses": `${basePath}/ip-addresses/`,
endpoints: `${basePath}/endpoints/`,
directories: `${basePath}/directories/`,
}
// Get counts for each tab from scan data
const summary = scanData?.summary as any
const counts = {
subdomain: scanData?.summary?.subdomains || 0,
endpoints: scanData?.summary?.endpoints || 0,
websites: scanData?.summary?.websites || 0,
directories: scanData?.summary?.directories || 0,
vulnerabilities: scanData?.summary?.vulnerabilities?.total || 0,
"ip-addresses": scanData?.summary?.ips || 0,
subdomain: summary?.subdomains || 0,
endpoints: summary?.endpoints || 0,
websites: summary?.websites || 0,
directories: summary?.directories || 0,
screenshots: summary?.screenshots || 0,
vulnerabilities: summary?.vulnerabilities?.total || 0,
"ip-addresses": summary?.ips || 0,
}
// Calculate total assets count
const totalAssets = counts.websites + counts.subdomain + counts["ip-addresses"] + counts.endpoints + counts.directories
// Loading state
if (isLoading) {
return (
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
{/* Header skeleton */}
<div className="flex items-center gap-2 px-4 lg:px-6">
<Skeleton className="h-4 w-16" />
<span className="text-muted-foreground">/</span>
<Skeleton className="h-4 w-32" />
</div>
{/* Tabs skeleton */}
<div className="flex gap-1 px-4 lg:px-6">
<Skeleton className="h-9 w-20" />
<Skeleton className="h-9 w-20" />
<Skeleton className="h-9 w-24" />
</div>
</div>
)
}
return (
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
<div className="flex items-center justify-between px-4 lg:px-6">
<div>
<h2 className="text-2xl font-bold tracking-tight flex items-center gap-2">
<Target />
Scan Results
</h2>
<p className="text-muted-foreground">{t("taskId", { id })}</p>
</div>
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6 h-full">
{/* Header: Page label + Scan info */}
<div className="flex items-center gap-2 text-sm px-4 lg:px-6">
<span className="text-muted-foreground">{t("breadcrumb.scanHistory")}</span>
<span className="text-muted-foreground">/</span>
<span className="font-medium flex items-center gap-1.5">
<Target className="h-4 w-4" />
{(scanData?.target as any)?.name || t("taskId", { id })}
</span>
</div>
<div className="flex items-center justify-between px-4 lg:px-6">
<Tabs value={getActiveTab()} className="w-full">
{/* Primary navigation */}
<div className="px-4 lg:px-6">
<Tabs value={getPrimaryTab()}>
<TabsList>
<TabsTrigger value="websites" asChild>
<Link href={tabPaths.websites} className="flex items-center gap-0.5">
Websites
{counts.websites > 0 && (
<TabsTrigger value="overview" asChild>
<Link href={primaryPaths.overview} className="flex items-center gap-1.5">
<LayoutDashboard className="h-4 w-4" />
{t("tabs.overview")}
</Link>
</TabsTrigger>
<TabsTrigger value="assets" asChild>
<Link href={primaryPaths.assets} className="flex items-center gap-1.5">
<Package className="h-4 w-4" />
{t("tabs.assets")}
{totalAssets > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.websites}
{totalAssets}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="subdomain" asChild>
<Link href={tabPaths.subdomain} className="flex items-center gap-0.5">
Subdomains
{counts.subdomain > 0 && (
<TabsTrigger value="screenshots" asChild>
<Link href={primaryPaths.screenshots} className="flex items-center gap-1.5">
<Image className="h-4 w-4" />
{t("tabs.screenshots")}
{counts.screenshots > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.subdomain}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="ip-addresses" asChild>
<Link href={tabPaths["ip-addresses"]} className="flex items-center gap-0.5">
IP Addresses
{counts["ip-addresses"] > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts["ip-addresses"]}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="endpoints" asChild>
<Link href={tabPaths.endpoints} className="flex items-center gap-0.5">
URLs
{counts.endpoints > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.endpoints}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="directories" asChild>
<Link href={tabPaths.directories} className="flex items-center gap-0.5">
Directories
{counts.directories > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.directories}
{counts.screenshots}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="vulnerabilities" asChild>
<Link href={tabPaths.vulnerabilities} className="flex items-center gap-0.5">
Vulnerabilities
<Link href={primaryPaths.vulnerabilities} className="flex items-center gap-1.5">
<ShieldAlert className="h-4 w-4" />
{t("tabs.vulnerabilities")}
{counts.vulnerabilities > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.vulnerabilities}
@@ -128,6 +161,67 @@ export default function ScanHistoryLayout({
</Tabs>
</div>
{/* Secondary navigation (only for assets) */}
{showSecondaryNav && (
<div className="flex items-center px-4 lg:px-6">
<Tabs value={getSecondaryTab()} className="w-full">
<TabsList variant="underline">
<TabsTrigger value="websites" variant="underline" asChild>
<Link href={secondaryPaths.websites} className="flex items-center gap-0.5">
Websites
{counts.websites > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.websites}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="subdomain" variant="underline" asChild>
<Link href={secondaryPaths.subdomain} className="flex items-center gap-0.5">
Subdomains
{counts.subdomain > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.subdomain}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="ip-addresses" variant="underline" asChild>
<Link href={secondaryPaths["ip-addresses"]} className="flex items-center gap-0.5">
IPs
{counts["ip-addresses"] > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts["ip-addresses"]}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="endpoints" variant="underline" asChild>
<Link href={secondaryPaths.endpoints} className="flex items-center gap-0.5">
URLs
{counts.endpoints > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.endpoints}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="directories" variant="underline" asChild>
<Link href={secondaryPaths.directories} className="flex items-center gap-0.5">
Directories
{counts.directories > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.directories}
</Badge>
)}
</Link>
</TabsTrigger>
</TabsList>
</Tabs>
</div>
)}
{/* Sub-page content */}
{children}
</div>
)

View File

@@ -0,0 +1,19 @@
"use client"
import { useParams } from "next/navigation"
import { ScanOverview } from "@/components/scan/history/scan-overview"
/**
* Scan overview page
* Displays scan statistics and summary information
*/
export default function ScanOverviewPage() {
const { id } = useParams<{ id: string }>()
const scanId = Number(id)
return (
<div className="flex-1 flex flex-col min-h-0 px-4 lg:px-6">
<ScanOverview scanId={scanId} />
</div>
)
}

View File

@@ -8,7 +8,7 @@ export default function ScanHistoryDetailPage() {
const router = useRouter()
useEffect(() => {
router.replace(`/scan/history/${id}/websites/`)
router.replace(`/scan/history/${id}/overview/`)
}, [id, router])
return null

View File

@@ -0,0 +1,15 @@
"use client"
import { useParams } from "next/navigation"
import { ScreenshotsGallery } from "@/components/screenshots/screenshots-gallery"
export default function ScanScreenshotsPage() {
const { id } = useParams<{ id: string }>()
const scanId = Number(id)
return (
<div className="px-4 lg:px-6">
<ScreenshotsGallery scanId={scanId} />
</div>
)
}

View File

@@ -0,0 +1,132 @@
"use client"
import React, { useState, useEffect } from "react"
import { useTranslations } from "next-intl"
import { AlertTriangle, Loader2, Ban } from "lucide-react"
import { Button } from "@/components/ui/button"
import { Textarea } from "@/components/ui/textarea"
import { Skeleton } from "@/components/ui/skeleton"
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"
import { useGlobalBlacklist, useUpdateGlobalBlacklist } from "@/hooks/use-global-blacklist"
/**
* Global blacklist settings page
*/
export default function GlobalBlacklistPage() {
const t = useTranslations("pages.settings.blacklist")
const [blacklistText, setBlacklistText] = useState("")
const [hasChanges, setHasChanges] = useState(false)
const { data, isLoading, error } = useGlobalBlacklist()
const updateBlacklist = useUpdateGlobalBlacklist()
// Initialize text when data loads
useEffect(() => {
if (data?.patterns) {
setBlacklistText(data.patterns.join("\n"))
setHasChanges(false)
}
}, [data])
// Handle text change
const handleTextChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => {
setBlacklistText(e.target.value)
setHasChanges(true)
}
// Handle save
const handleSave = () => {
const patterns = blacklistText
.split("\n")
.map((line) => line.trim())
.filter((line) => line.length > 0)
updateBlacklist.mutate(
{ patterns },
{
onSuccess: () => {
setHasChanges(false)
},
}
)
}
if (isLoading) {
return (
<div className="flex flex-1 flex-col gap-4 p-4">
<div className="space-y-2">
<Skeleton className="h-8 w-48" />
<Skeleton className="h-4 w-96" />
</div>
<Skeleton className="h-[400px] w-full" />
</div>
)
}
if (error) {
return (
<div className="flex flex-1 flex-col items-center justify-center py-12">
<AlertTriangle className="h-10 w-10 text-destructive mb-4" />
<p className="text-muted-foreground">{t("loadError")}</p>
</div>
)
}
return (
<div className="flex flex-1 flex-col gap-4 p-4">
{/* Page header */}
<div>
<h1 className="text-2xl font-bold">{t("title")}</h1>
<p className="text-muted-foreground">{t("description")}</p>
</div>
{/* Blacklist card */}
<Card>
<CardHeader>
<div className="flex items-center gap-2">
<Ban className="h-5 w-5 text-muted-foreground" />
<CardTitle>{t("card.title")}</CardTitle>
</div>
<CardDescription>{t("card.description")}</CardDescription>
</CardHeader>
<CardContent className="space-y-4">
{/* Rules hint */}
<div className="flex flex-wrap items-center gap-x-4 gap-y-2 text-sm text-muted-foreground">
<span className="font-medium text-foreground">{t("rules.title")}:</span>
<span><code className="bg-muted px-1.5 py-0.5 rounded text-xs">*.gov</code> {t("rules.domain")}</span>
<span><code className="bg-muted px-1.5 py-0.5 rounded text-xs">*cdn*</code> {t("rules.keyword")}</span>
<span><code className="bg-muted px-1.5 py-0.5 rounded text-xs">192.168.1.1</code> {t("rules.ip")}</span>
<span><code className="bg-muted px-1.5 py-0.5 rounded text-xs">10.0.0.0/8</code> {t("rules.cidr")}</span>
</div>
{/* Scope hint */}
<div className="rounded-lg border bg-muted/50 p-3 text-sm">
<p className="text-muted-foreground">{t("scopeHint")}</p>
</div>
{/* Input */}
<Textarea
value={blacklistText}
onChange={handleTextChange}
placeholder={t("placeholder")}
className="min-h-[320px] font-mono text-sm"
/>
{/* Save button */}
<div className="flex justify-end">
<Button
onClick={handleSave}
disabled={!hasChanges || updateBlacklist.isPending}
>
{updateBlacklist.isPending && (
<Loader2 className="mr-2 h-4 w-4 animate-spin" />
)}
{t("save")}
</Button>
</div>
</CardContent>
</Card>
</div>
)
}

View File

@@ -1,17 +1,10 @@
"use client"
import { useTranslations } from "next-intl"
import { SystemLogsView } from "@/components/settings/system-logs"
export default function SystemLogsPage() {
const t = useTranslations("settings.systemLogs")
return (
<div className="flex flex-1 flex-col gap-4 p-4">
<div>
<h1 className="text-2xl font-bold tracking-tight">{t("title")}</h1>
<p className="text-muted-foreground">{t("description")}</p>
</div>
<div className="flex flex-1 flex-col p-4 h-full">
<SystemLogsView />
</div>
)

View File

@@ -5,15 +5,15 @@ import { useEffect } from "react"
/**
* Target detail page (compatible with old routes)
* Automatically redirects to websites page
* Automatically redirects to overview page
*/
export default function TargetDetailsPage() {
const { id } = useParams<{ id: string }>()
const router = useRouter()
useEffect(() => {
// Redirect to websites page
router.replace(`/target/${id}/websites/`)
// Redirect to overview page
router.replace(`/target/${id}/overview/`)
}, [id, router])
return null

View File

@@ -3,7 +3,7 @@
import React from "react"
import { usePathname, useParams } from "next/navigation"
import Link from "next/link"
import { Target } from "lucide-react"
import { Target, LayoutDashboard, Package, Image, ShieldAlert, Settings } from "lucide-react"
import { Skeleton } from "@/components/ui/skeleton"
import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs"
import { Badge } from "@/components/ui/badge"
@@ -12,7 +12,8 @@ import { useTranslations } from "next-intl"
/**
* Target detail layout
* Provides shared target information and navigation for all sub-pages
* Two-level navigation: Overview / Assets / Vulnerabilities
* Assets has secondary navigation for different asset types
*/
export default function TargetLayout({
children,
@@ -30,26 +31,54 @@ export default function TargetLayout({
error
} = useTarget(Number(id))
// Get currently active tab
const getActiveTab = () => {
if (pathname.includes("/subdomain")) return "subdomain"
if (pathname.includes("/endpoints")) return "endpoints"
if (pathname.includes("/websites")) return "websites"
if (pathname.includes("/directories")) return "directories"
// Get primary navigation active tab
const getPrimaryTab = () => {
if (pathname.includes("/overview")) return "overview"
if (pathname.includes("/screenshots")) return "screenshots"
if (pathname.includes("/vulnerabilities")) return "vulnerabilities"
if (pathname.includes("/ip-addresses")) return "ip-addresses"
return ""
if (pathname.includes("/settings")) return "settings"
// All asset pages fall under "assets"
if (
pathname.includes("/websites") ||
pathname.includes("/subdomain") ||
pathname.includes("/ip-addresses") ||
pathname.includes("/endpoints") ||
pathname.includes("/directories")
) {
return "assets"
}
return "overview"
}
// Get secondary navigation active tab (for assets)
const getSecondaryTab = () => {
if (pathname.includes("/websites")) return "websites"
if (pathname.includes("/subdomain")) return "subdomain"
if (pathname.includes("/ip-addresses")) return "ip-addresses"
if (pathname.includes("/endpoints")) return "endpoints"
if (pathname.includes("/directories")) return "directories"
return "websites"
}
// Check if we should show secondary navigation
const showSecondaryNav = getPrimaryTab() === "assets"
// Tab path mapping
const basePath = `/target/${id}`
const tabPaths = {
subdomain: `${basePath}/subdomain/`,
endpoints: `${basePath}/endpoints/`,
websites: `${basePath}/websites/`,
directories: `${basePath}/directories/`,
const primaryPaths = {
overview: `${basePath}/overview/`,
assets: `${basePath}/websites/`, // Default to websites when clicking assets
screenshots: `${basePath}/screenshots/`,
vulnerabilities: `${basePath}/vulnerabilities/`,
settings: `${basePath}/settings/`,
}
const secondaryPaths = {
websites: `${basePath}/websites/`,
subdomain: `${basePath}/subdomain/`,
"ip-addresses": `${basePath}/ip-addresses/`,
endpoints: `${basePath}/endpoints/`,
directories: `${basePath}/directories/`,
}
// Get counts for each tab from target data
@@ -60,29 +89,27 @@ export default function TargetLayout({
directories: (target as any)?.summary?.directories || 0,
vulnerabilities: (target as any)?.summary?.vulnerabilities?.total || 0,
"ip-addresses": (target as any)?.summary?.ips || 0,
screenshots: (target as any)?.summary?.screenshots || 0,
}
// Calculate total assets count
const totalAssets = counts.websites + counts.subdomain + counts["ip-addresses"] + counts.endpoints + counts.directories
// Loading state
if (isLoading) {
return (
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
{/* Page header skeleton */}
<div className="flex items-center justify-between px-4 lg:px-6">
<div className="w-full max-w-xl space-y-2">
<div className="flex items-center gap-2">
<Skeleton className="h-6 w-6 rounded-md" />
<Skeleton className="h-7 w-48" />
</div>
<Skeleton className="h-4 w-72" />
</div>
{/* Header skeleton */}
<div className="flex items-center gap-2 px-4 lg:px-6">
<Skeleton className="h-4 w-16" />
<span className="text-muted-foreground">/</span>
<Skeleton className="h-4 w-32" />
</div>
{/* Tabs navigation skeleton */}
<div className="flex items-center justify-between px-4 lg:px-6">
<div className="flex gap-2">
<Skeleton className="h-9 w-20" />
<Skeleton className="h-9 w-24" />
</div>
{/* Tabs skeleton */}
<div className="flex gap-1 px-4 lg:px-6">
<Skeleton className="h-9 w-20" />
<Skeleton className="h-9 w-20" />
<Skeleton className="h-9 w-24" />
</div>
</div>
)
@@ -123,74 +150,52 @@ export default function TargetLayout({
return (
<div className="flex flex-col gap-4 py-4 md:gap-6 md:py-6">
{/* Page header */}
<div className="flex items-center justify-between px-4 lg:px-6">
<div>
<h2 className="text-2xl font-bold tracking-tight flex items-center gap-2">
<Target />
{target.name}
</h2>
<p className="text-muted-foreground">{target.description || t("noDescription")}</p>
</div>
{/* Header: Page label + Target name */}
<div className="flex items-center gap-2 text-sm px-4 lg:px-6">
<span className="text-muted-foreground">{t("breadcrumb.targetDetail")}</span>
<span className="text-muted-foreground">/</span>
<span className="font-medium flex items-center gap-1.5">
<Target className="h-4 w-4" />
{target.name}
</span>
</div>
{/* Tabs navigation - Use Link to ensure progress bar is triggered */}
<div className="flex items-center justify-between px-4 lg:px-6">
<Tabs value={getActiveTab()} className="w-full">
{/* Primary navigation */}
<div className="px-4 lg:px-6">
<Tabs value={getPrimaryTab()}>
<TabsList>
<TabsTrigger value="websites" asChild>
<Link href={tabPaths.websites} className="flex items-center gap-0.5">
Websites
{counts.websites > 0 && (
<TabsTrigger value="overview" asChild>
<Link href={primaryPaths.overview} className="flex items-center gap-1.5">
<LayoutDashboard className="h-4 w-4" />
{t("tabs.overview")}
</Link>
</TabsTrigger>
<TabsTrigger value="assets" asChild>
<Link href={primaryPaths.assets} className="flex items-center gap-1.5">
<Package className="h-4 w-4" />
{t("tabs.assets")}
{totalAssets > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.websites}
{totalAssets}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="subdomain" asChild>
<Link href={tabPaths.subdomain} className="flex items-center gap-0.5">
Subdomains
{counts.subdomain > 0 && (
<TabsTrigger value="screenshots" asChild>
<Link href={primaryPaths.screenshots} className="flex items-center gap-1.5">
<Image className="h-4 w-4" />
{t("tabs.screenshots")}
{counts.screenshots > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.subdomain}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="ip-addresses" asChild>
<Link href={tabPaths["ip-addresses"]} className="flex items-center gap-0.5">
IP Addresses
{counts["ip-addresses"] > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts["ip-addresses"]}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="endpoints" asChild>
<Link href={tabPaths.endpoints} className="flex items-center gap-0.5">
URLs
{counts.endpoints > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.endpoints}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="directories" asChild>
<Link href={tabPaths.directories} className="flex items-center gap-0.5">
Directories
{counts.directories > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.directories}
{counts.screenshots}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="vulnerabilities" asChild>
<Link href={tabPaths.vulnerabilities} className="flex items-center gap-0.5">
Vulnerabilities
<Link href={primaryPaths.vulnerabilities} className="flex items-center gap-1.5">
<ShieldAlert className="h-4 w-4" />
{t("tabs.vulnerabilities")}
{counts.vulnerabilities > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.vulnerabilities}
@@ -198,10 +203,76 @@ export default function TargetLayout({
)}
</Link>
</TabsTrigger>
<TabsTrigger value="settings" asChild>
<Link href={primaryPaths.settings} className="flex items-center gap-1.5">
<Settings className="h-4 w-4" />
{t("tabs.settings")}
</Link>
</TabsTrigger>
</TabsList>
</Tabs>
</div>
{/* Secondary navigation (only for assets) */}
{showSecondaryNav && (
<div className="flex items-center px-4 lg:px-6">
<Tabs value={getSecondaryTab()} className="w-full">
<TabsList variant="underline">
<TabsTrigger value="websites" variant="underline" asChild>
<Link href={secondaryPaths.websites} className="flex items-center gap-0.5">
Websites
{counts.websites > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.websites}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="subdomain" variant="underline" asChild>
<Link href={secondaryPaths.subdomain} className="flex items-center gap-0.5">
Subdomains
{counts.subdomain > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.subdomain}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="ip-addresses" variant="underline" asChild>
<Link href={secondaryPaths["ip-addresses"]} className="flex items-center gap-0.5">
IPs
{counts["ip-addresses"] > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts["ip-addresses"]}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="endpoints" variant="underline" asChild>
<Link href={secondaryPaths.endpoints} className="flex items-center gap-0.5">
URLs
{counts.endpoints > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.endpoints}
</Badge>
)}
</Link>
</TabsTrigger>
<TabsTrigger value="directories" variant="underline" asChild>
<Link href={secondaryPaths.directories} className="flex items-center gap-0.5">
Directories
{counts.directories > 0 && (
<Badge variant="secondary" className="ml-1.5 h-5 min-w-5 rounded-full px-1.5 text-xs">
{counts.directories}
</Badge>
)}
</Link>
</TabsTrigger>
</TabsList>
</Tabs>
</div>
)}
{/* Sub-page content */}
{children}
</div>

View File

@@ -0,0 +1,19 @@
"use client"
import { useParams } from "next/navigation"
import { TargetOverview } from "@/components/target/target-overview"
/**
* Target overview page
* Displays target statistics and summary information
*/
export default function TargetOverviewPage() {
const { id } = useParams<{ id: string }>()
const targetId = Number(id)
return (
<div className="px-4 lg:px-6">
<TargetOverview targetId={targetId} />
</div>
)
}

View File

@@ -5,15 +5,15 @@ import { useEffect } from "react"
/**
* Target detail default page
* Automatically redirects to websites page
* Automatically redirects to overview page
*/
export default function TargetDetailPage() {
const { id } = useParams<{ id: string }>()
const router = useRouter()
useEffect(() => {
// Redirect to websites page
router.replace(`/target/${id}/websites/`)
// Redirect to overview page
router.replace(`/target/${id}/overview/`)
}, [id, router])
return null

View File

@@ -0,0 +1,15 @@
"use client"
import { useParams } from "next/navigation"
import { ScreenshotsGallery } from "@/components/screenshots/screenshots-gallery"
export default function ScreenshotsPage() {
const { id } = useParams<{ id: string }>()
const targetId = Number(id)
return (
<div className="px-4 lg:px-6">
<ScreenshotsGallery targetId={targetId} />
</div>
)
}

View File

@@ -0,0 +1,19 @@
"use client"
import { useParams } from "next/navigation"
import { TargetSettings } from "@/components/target/target-settings"
/**
* Target settings page
* Contains blacklist configuration and other settings
*/
export default function TargetSettingsPage() {
const { id } = useParams<{ id: string }>()
const targetId = Number(id)
return (
<div className="px-4 lg:px-6">
<TargetSettings targetId={targetId} />
</div>
)
}

View File

@@ -18,6 +18,7 @@ import {
IconMessageReport, // Feedback icon
IconSearch, // Search icon
IconKey, // API Key icon
IconBan, // Blacklist icon
} from "@tabler/icons-react"
// Import internationalization hook
import { useTranslations } from 'next-intl'
@@ -174,6 +175,11 @@ export function AppSidebar({ ...props }: React.ComponentProps<typeof Sidebar>) {
url: "/settings/api-keys/",
icon: IconKey,
},
{
name: t('globalBlacklist'),
url: "/settings/blacklist/",
icon: IconBan,
},
]
return (

View File

@@ -114,7 +114,7 @@ export function DirectoriesDataTable({
onSelectionChange={handleSelectionChange}
// Bulk operations
onBulkDelete={onBulkDelete}
bulkDeleteLabel="Delete"
bulkDeleteLabel={tActions("delete")}
showAddButton={false}
// Bulk add button
onBulkAdd={onBulkAdd}

View File

@@ -11,6 +11,7 @@ import { useTargetDirectories, useScanDirectories } from "@/hooks/use-directorie
import { useTarget } from "@/hooks/use-targets"
import { DirectoryService } from "@/services/directory.service"
import { BulkAddUrlsDialog } from "@/components/common/bulk-add-urls-dialog"
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
import { getDateLocale } from "@/lib/date-utils"
import type { TargetType } from "@/lib/url-validator"
import type { Directory } from "@/types/directory.types"
@@ -29,6 +30,8 @@ export function DirectoriesView({
})
const [selectedDirectories, setSelectedDirectories] = useState<Directory[]>([])
const [bulkAddDialogOpen, setBulkAddDialogOpen] = useState(false)
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false)
const [isDeleting, setIsDeleting] = useState(false)
const [filterQuery, setFilterQuery] = useState("")
const [isSearching, setIsSearching] = useState(false)
@@ -240,6 +243,26 @@ export function DirectoriesView({
URL.revokeObjectURL(url)
}
// Handle bulk delete
const handleBulkDelete = async () => {
if (selectedDirectories.length === 0) return
setIsDeleting(true)
try {
const ids = selectedDirectories.map(d => d.id)
const result = await DirectoryService.bulkDelete(ids)
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
setSelectedDirectories([])
setDeleteDialogOpen(false)
refetch()
} catch (error) {
console.error("Failed to delete directories", error)
toast.error(tToast("deleteFailed"))
} finally {
setIsDeleting(false)
}
}
if (error) {
return (
<div className="flex flex-col items-center justify-center py-12">
@@ -280,6 +303,7 @@ export function DirectoriesView({
onSelectionChange={handleSelectionChange}
onDownloadAll={handleDownloadAll}
onDownloadSelected={handleDownloadSelected}
onBulkDelete={targetId ? () => setDeleteDialogOpen(true) : undefined}
onBulkAdd={targetId ? () => setBulkAddDialogOpen(true) : undefined}
/>
@@ -295,6 +319,17 @@ export function DirectoriesView({
onSuccess={() => refetch()}
/>
)}
{/* Delete confirmation dialog */}
<ConfirmDialog
open={deleteDialogOpen}
onOpenChange={setDeleteDialogOpen}
title={tCommon("actions.confirmDelete")}
description={tCommon("actions.deleteConfirmMessage", { count: selectedDirectories.length })}
onConfirm={handleBulkDelete}
loading={isDeleting}
variant="destructive"
/>
</>
)
}

View File

@@ -36,6 +36,7 @@ interface EndpointsDataTableProps<TData extends { id: number | string }, TValue>
onAddNew?: () => void
addButtonText?: string
onSelectionChange?: (selectedRows: TData[]) => void
onBulkDelete?: () => void
pagination?: { pageIndex: number; pageSize: number }
onPaginationChange?: (pagination: { pageIndex: number; pageSize: number }) => void
totalCount?: number
@@ -54,6 +55,7 @@ export function EndpointsDataTable<TData extends { id: number | string }, TValue
onAddNew,
addButtonText = "Add",
onSelectionChange,
onBulkDelete,
pagination: externalPagination,
onPaginationChange,
totalCount,
@@ -135,7 +137,8 @@ export function EndpointsDataTable<TData extends { id: number | string }, TValue
// Selection
onSelectionChange={onSelectionChange}
// Bulk operations
showBulkDelete={false}
onBulkDelete={onBulkDelete}
bulkDeleteLabel={tActions("delete")}
onAddNew={onAddNew}
addButtonLabel={addButtonText}
// Bulk add button

View File

@@ -10,6 +10,7 @@ import { createEndpointColumns } from "./endpoints-columns"
import { LoadingSpinner } from "@/components/loading-spinner"
import { DataTableSkeleton } from "@/components/ui/data-table-skeleton"
import { BulkAddUrlsDialog } from "@/components/common/bulk-add-urls-dialog"
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
import { getDateLocale } from "@/lib/date-utils"
import type { TargetType } from "@/lib/url-validator"
import {
@@ -41,6 +42,8 @@ export function EndpointsDetailView({
const [endpointToDelete, setEndpointToDelete] = useState<Endpoint | null>(null)
const [selectedEndpoints, setSelectedEndpoints] = useState<Endpoint[]>([])
const [bulkAddDialogOpen, setBulkAddDialogOpen] = useState(false)
const [bulkDeleteDialogOpen, setBulkDeleteDialogOpen] = useState(false)
const [isDeleting, setIsDeleting] = useState(false)
// Pagination state management
const [pagination, setPagination] = useState({
@@ -280,6 +283,26 @@ export function EndpointsDetailView({
URL.revokeObjectURL(url)
}
// Handle bulk delete
const handleBulkDelete = async () => {
if (selectedEndpoints.length === 0) return
setIsDeleting(true)
try {
const ids = selectedEndpoints.map(e => e.id)
const result = await EndpointService.bulkDelete(ids)
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
setSelectedEndpoints([])
setBulkDeleteDialogOpen(false)
refetch()
} catch (error) {
console.error("Failed to delete endpoints", error)
toast.error(tToast("deleteFailed"))
} finally {
setIsDeleting(false)
}
}
// Error state
if (error) {
return (
@@ -327,6 +350,7 @@ export function EndpointsDetailView({
onSelectionChange={handleSelectionChange}
onDownloadAll={handleDownloadAll}
onDownloadSelected={handleDownloadSelected}
onBulkDelete={targetId ? () => setBulkDeleteDialogOpen(true) : undefined}
onBulkAdd={targetId ? () => setBulkAddDialogOpen(true) : undefined}
/>
@@ -343,7 +367,18 @@ export function EndpointsDetailView({
/>
)}
{/* Delete confirmation dialog */}
{/* Bulk delete confirmation dialog */}
<ConfirmDialog
open={bulkDeleteDialogOpen}
onOpenChange={setBulkDeleteDialogOpen}
title={tConfirm("deleteTitle")}
description={tCommon("actions.deleteConfirmMessage", { count: selectedEndpoints.length })}
onConfirm={handleBulkDelete}
loading={isDeleting}
variant="destructive"
/>
{/* Single delete confirmation dialog */}
<AlertDialog open={deleteDialogOpen} onOpenChange={setDeleteDialogOpen}>
<AlertDialogContent>
<AlertDialogHeader>

View File

@@ -238,15 +238,39 @@ export function ImportFingerprintDialog({
// Frontend basic validation for JSON files
try {
const text = await file.text()
const json = JSON.parse(text)
let json: any
// Try standard JSON first
try {
json = JSON.parse(text)
} catch {
// If standard JSON fails, try JSONL format (for goby)
if (fingerprintType === "goby") {
const lines = text.trim().split('\n').filter(line => line.trim())
if (lines.length === 0) {
toast.error(t("import.emptyData"))
return
}
// Parse each line as JSON
json = lines.map((line, index) => {
try {
return JSON.parse(line)
} catch {
throw new Error(`Line ${index + 1}: Invalid JSON`)
}
})
} else {
throw new Error("Invalid JSON")
}
}
const validation = config.validate(json)
if (!validation.valid) {
toast.error(validation.error)
return
}
} catch (e) {
toast.error(tToast("invalidJsonFile"))
} catch (e: any) {
toast.error(e.message || tToast("invalidJsonFile"))
return
}
}

View File

@@ -54,6 +54,7 @@ export function IPAddressesDataTable({
}: IPAddressesDataTableProps) {
const t = useTranslations("common.status")
const tDownload = useTranslations("common.download")
const tActions = useTranslations("common.actions")
// Smart search handler
const handleSmartSearch = (rawQuery: string) => {
@@ -98,7 +99,7 @@ export function IPAddressesDataTable({
onSelectionChange={onSelectionChange}
// Bulk operations
onBulkDelete={onBulkDelete}
bulkDeleteLabel="Delete"
bulkDeleteLabel={tActions("delete")}
showAddButton={false}
// Download
downloadOptions={downloadOptions.length > 0 ? downloadOptions : undefined}

View File

@@ -8,6 +8,7 @@ import { createIPAddressColumns } from "./ip-addresses-columns"
import { DataTableSkeleton } from "@/components/ui/data-table-skeleton"
import { Button } from "@/components/ui/button"
import { useTargetIPAddresses, useScanIPAddresses } from "@/hooks/use-ip-addresses"
import { ConfirmDialog } from "@/components/ui/confirm-dialog"
import { getDateLocale } from "@/lib/date-utils"
import type { IPAddress } from "@/types/ip-address.types"
import { IPAddressService } from "@/services/ip-address.service"
@@ -26,6 +27,8 @@ export function IPAddressesView({
})
const [selectedIPAddresses, setSelectedIPAddresses] = useState<IPAddress[]>([])
const [filterQuery, setFilterQuery] = useState("")
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false)
const [isDeleting, setIsDeleting] = useState(false)
// Internationalization
const tColumns = useTranslations("columns")
@@ -215,6 +218,27 @@ export function IPAddressesView({
URL.revokeObjectURL(url)
}
// Handle bulk delete
const handleBulkDelete = async () => {
if (selectedIPAddresses.length === 0) return
setIsDeleting(true)
try {
// IP addresses are aggregated, pass IP strings instead of IDs
const ips = selectedIPAddresses.map(ip => ip.ip)
const result = await IPAddressService.bulkDelete(ips)
toast.success(tToast("deleteSuccess", { count: result.deletedCount }))
setSelectedIPAddresses([])
setDeleteDialogOpen(false)
refetch()
} catch (error) {
console.error("Failed to delete IP addresses", error)
toast.error(tToast("deleteFailed"))
} finally {
setIsDeleting(false)
}
}
if (error) {
return (
<div className="flex flex-col items-center justify-center py-12">
@@ -253,6 +277,18 @@ export function IPAddressesView({
onSelectionChange={handleSelectionChange}
onDownloadAll={handleDownloadAll}
onDownloadSelected={handleDownloadSelected}
onBulkDelete={targetId ? () => setDeleteDialogOpen(true) : undefined}
/>
{/* Delete confirmation dialog */}
<ConfirmDialog
open={deleteDialogOpen}
onOpenChange={setDeleteDialogOpen}
title={tCommon("actions.confirmDelete")}
description={tCommon("actions.deleteConfirmMessage", { count: selectedIPAddresses.length })}
onConfirm={handleBulkDelete}
loading={isDeleting}
variant="destructive"
/>
</>
)

Some files were not shown because too many files have changed in this diff Show More