Compare commits

..

22 Commits

Author SHA1 Message Date
yyhuni
b4037202dc feat: use registry cache for faster builds 2026-01-03 17:35:54 +08:00
yyhuni
4b4f9862bf ci(docker): add postgres image build configuration and update image tags
- Add xingrin-postgres image build job to docker-build workflow for multi-platform support (linux/amd64,linux/arm64)
- Update docker-compose.dev.yml to use IMAGE_TAG variable with dev as default fallback
- Update docker-compose.yml to use IMAGE_TAG variable with required validation
- Replace hardcoded postgres image tag (15) with dynamic IMAGE_TAG for better version management
- Enable flexible image tagging across development and production environments
2026-01-03 17:26:34 +08:00
github-actions[bot]
1c42e4978f chore: bump version to v1.3.5-dev 2026-01-03 08:44:06 +00:00
github-actions[bot]
57bab63997 chore: bump version to v1.3.3-dev 2026-01-03 05:55:07 +00:00
github-actions[bot]
b1f0f18ac0 chore: bump version to v1.3.4-dev 2026-01-03 05:54:50 +00:00
yyhuni
ccee5471b8 docs(readme): add notification push service documentation
- Add notification push service feature to visualization interface section
- Document support for real-time WeChat Work, Telegram, and Discord message push
- Enhance feature list clarity for notification capabilities
2026-01-03 13:34:36 +08:00
yyhuni
0ccd362535 优化下载逻辑 2026-01-03 13:32:58 +08:00
yyhuni
7f2af7f7e2 feat(search): add result export functionality and pagination limit support
- Add optional limit parameter to AssetSearchService.search() method for controlling result set size
- Implement AssetSearchExportView for exporting search results as CSV files with UTF-8 BOM encoding
- Add CSV export endpoint at GET /api/assets/search/export/ with configurable MAX_EXPORT_ROWS limit (10000)
- Support both website and endpoint asset types with type-specific column mappings in CSV export
- Format array fields (tech, matched_gf_patterns) and dates appropriately in exported CSV
- Update URL routing to include new search export endpoint
- Update views __init__.py to export AssetSearchExportView
- Add CSV generation with streaming response for efficient memory usage on large exports
- Update frontend search service to support export functionality
- Add internationalization strings for export feature in en.json and zh.json
- Update smart-filter-input and search-results-table components to support export UI
- Update installation and Docker startup scripts for deployment compatibility
2026-01-03 13:22:21 +08:00
yyhuni
4bd0f9e8c1 feat(search): implement dual-view IMMV architecture for website and endpoint assets
- Add incremental materialized view (IMMV) support for both Website and Endpoint asset types using pg_ivm extension
- Create asset_search_view IMMV with optimized indexes for host, title, url, headers, body, tech, status_code, and created_at fields
- Create endpoint_search_view IMMV with identical field structure and indexing strategy for endpoint-specific searches
- Extend search_service.py to support asset type routing with VIEW_MAPPING and VALID_ASSET_TYPES configuration
- Add comprehensive field mapping and array field definitions for both asset types
- Implement dual-query execution path in search views to handle website and endpoint searches independently
- Update frontend search components to support asset type filtering and result display
- Add search results table component with improved data presentation and filtering capabilities
- Update installation scripts and Docker configuration for pg_ivm extension deployment
- Add internationalization strings for new search UI elements in English and Chinese
- Consolidate index creation and cleanup logic in migrations for maintainability
- Enable automatic incremental updates on data changes without manual view refresh
2026-01-03 12:41:20 +08:00
yyhuni
68cc996e3b refactor(asset): standardize snapshot and asset model field naming and types
- Rename `status` to `status_code` in WebsiteSnapshotDTO for consistency
- Rename `web_server` to `webserver` in WebsiteSnapshotDTO for consistency
- Make `target_id` required field in EndpointSnapshotDTO and WebsiteSnapshotDTO
- Remove optional validation check for `target_id` in EndpointSnapshotDTO
- Convert CharField to TextField for url, location, title, webserver, and content_type fields in Endpoint and EndpointSnapshot models to support longer values
- Update migration 0001_initial.py to reflect field type changes from CharField to TextField
- Update all related services and repositories to use standardized field names
- Update serializers to map renamed fields correctly
- Ensure consistent field naming across DTOs, models, and database schema
2026-01-03 09:08:25 +08:00
github-actions[bot]
f1e79d638e chore: bump version to v1.3.2-dev 2026-01-03 00:33:26 +00:00
yyhuni
d484133e4c chore(docker): optimize server dockerfile with docker-ce-cli installation
- Replace full docker.io package with lightweight docker-ce-cli to reduce image size
- Add ca-certificates and gnupg dependencies for secure package management
- Improve Docker installation process for local Worker task distribution
- Reduce unnecessary dependencies in server container build
2026-01-03 08:09:03 +08:00
yyhuni
fc977ae029 chore(docker,frontend): optimize docker installation and add auth bypass config
- Replace docker.io installation script with apt-get package manager for better reliability
- Add NEXT_PUBLIC_SKIP_AUTH environment variable to Vercel config for development
- Improve Docker build layer caching by using native package manager instead of curl script
- Simplify frontend deployment configuration for local development workflows
2026-01-03 08:08:40 +08:00
yyhuni
f328474404 feat(frontend): add comprehensive mock data infrastructure for services
- Add mock data modules for auth, engines, notifications, scheduled-scans, and workers
- Implement mock authentication data with user profiles and login/logout responses
- Create mock scan engine configurations with multiple predefined scanning profiles
- Add mock notification system with various severity levels and categories
- Implement mock scheduled scan data with cron expressions and run history
- Add mock worker node data with status and performance metrics
- Update service layer to integrate with new mock data infrastructure
- Provide helper functions for filtering and paginating mock data
- Enable frontend development and testing without backend API dependency
2026-01-03 07:59:20 +08:00
yyhuni
68e726a066 chore(docker): update base image to python 3.10-slim-bookworm
- Update Python base image from 3.10-slim to 3.10-slim-bookworm
- Ensures compatibility with latest Debian stable release
- Improves security with updated system packages and dependencies
2026-01-02 23:19:09 +08:00
yyhuni
77a6f45909 fix:搜索的楼栋统计问题 2026-01-02 23:12:55 +08:00
yyhuni
49d1f1f1bb 采用ivm增量更新方案进行搜索 2026-01-02 22:46:40 +08:00
yyhuni
db8ecb1644 feat(search): add mock data infrastructure and vulnerability detail integration
- Add comprehensive mock data configuration for all major entities (dashboard, endpoints, organizations, scans, subdomains, targets, vulnerabilities, websites)
- Implement mock service layer with centralized config for development and testing
- Add vulnerability detail dialog integration to search results with lazy loading
- Enhance search result card with vulnerability viewing capability
- Update search materialized view migration to include vulnerability name field
- Implement default host fuzzy search fallback for bare text queries without operators
- Add vulnerability data formatting in search view for consistent API response structure
- Configure Vercel deployment settings and update Next.js configuration
- Update all service layers to support mock data injection for development environment
- Extend search types with improved vulnerability data structure
- Add internationalization strings for vulnerability loading errors
- Enable rapid frontend development and testing without backend API dependency
2026-01-02 19:06:09 +08:00
yyhuni
18cc016268 feat(search): implement advanced query parser with expression syntax support
- Add SearchQueryParser class to parse complex search expressions with operators (=, ==, !=)
- Support logical operators && (AND) and || (OR) for combining multiple conditions
- Implement field mapping for frontend to database field translation
- Add support for array field searching (tech stack) with unnest and ANY operators
- Support fuzzy matching (=), exact matching (==), and negation (!=) operators
- Add proper SQL injection prevention through parameterized queries
- Refactor search service to use expression-based filtering instead of simple filters
- Update search views to integrate new query parser
- Enhance frontend search hook and service to support new expression syntax
- Update search types to reflect new query structure
- Improve search page UI to display expression syntax examples and help text
- Enable complex multi-condition searches like: host="api" && tech="nginx" || status=="200"
2026-01-02 17:46:31 +08:00
yyhuni
23bc463283 feat(search): improve technology stack filtering with fuzzy matching
- Replace exact array matching with fuzzy search using ILIKE operator
- Update tech filter to search within array elements using unnest() and EXISTS
- Support partial technology name matching (e.g., "node" matches "nodejs")
- Apply consistent fuzzy matching logic across both search methods
- Enhance user experience by allowing flexible technology stack queries
2026-01-02 17:01:24 +08:00
yyhuni
7b903b91b2 feat(search): implement comprehensive search infrastructure with materialized views and pagination
- Add asset search service with materialized view support for optimized queries
- Implement search refresh service for maintaining up-to-date search indexes
- Create database migrations for AssetStatistics, StatisticsHistory, Directory, and DirectorySnapshot models
- Add PostgreSQL GIN indexes with trigram operators for full-text search capabilities
- Implement search pagination component with configurable page size and navigation
- Add search result card component with enhanced asset display formatting
- Create search API views with filtering and sorting capabilities
- Add use-search hook for client-side search state management
- Implement search service client for API communication
- Update search types with pagination metadata and result structures
- Add English and Chinese translations for search UI components
- Enhance scheduler to support search index refresh tasks
- Refactor asset views into modular search_views and asset_views
- Update URL routing to support new search endpoints
- Improve scan flow handlers for better search index integration
2026-01-02 16:57:54 +08:00
yyhuni
b3136d51b9 搜索页面前端UI设计完成 2026-01-02 10:07:26 +08:00
87 changed files with 7050 additions and 494 deletions

View File

@@ -44,6 +44,10 @@ jobs:
dockerfile: docker/agent/Dockerfile
context: .
platforms: linux/amd64,linux/arm64
- image: xingrin-postgres
dockerfile: docker/postgres/Dockerfile
context: docker/postgres
platforms: linux/amd64,linux/arm64
steps:
- name: Checkout
@@ -106,8 +110,8 @@ jobs:
${{ steps.version.outputs.IS_RELEASE == 'true' && format('{0}/{1}:latest', env.IMAGE_PREFIX, matrix.image) || '' }}
build-args: |
IMAGE_TAG=${{ steps.version.outputs.VERSION }}
cache-from: type=gha,scope=${{ matrix.image }}
cache-to: type=gha,mode=max,scope=${{ matrix.image }}
cache-from: type=registry,ref=${{ env.IMAGE_PREFIX }}/${{ matrix.image }}:cache
cache-to: type=registry,ref=${{ env.IMAGE_PREFIX }}/${{ matrix.image }}:cache,mode=max
provenance: false
sbom: false

View File

@@ -13,14 +13,14 @@
<p align="center">
<a href="#-功能特性">功能特性</a> •
<a href="#-全局资产搜索">资产搜索</a> •
<a href="#-快速开始">快速开始</a> •
<a href="#-文档">文档</a> •
<a href="#-技术栈">技术栈</a> •
<a href="#-反馈与贡献">反馈与贡献</a>
</p>
<p align="center">
<sub>🔍 关键词: ASM | 攻击面管理 | 漏洞扫描 | 资产发现 | Bug Bounty | 渗透测试 | Nuclei | 子域名枚举 | EASM</sub>
<sub>🔍 关键词: ASM | 攻击面管理 | 漏洞扫描 | 资产发现 | 资产搜索 | Bug Bounty | 渗透测试 | Nuclei | 子域名枚举 | EASM</sub>
</p>
---
@@ -162,9 +162,34 @@ flowchart TB
W3 -.心跳上报.-> REDIS
```
### 🔎 全局资产搜索
- **多类型搜索** - 支持 Website 和 Endpoint 两种资产类型
- **表达式语法** - 支持 `=`(模糊)、`==`(精确)、`!=`(不等于)操作符
- **逻辑组合** - 支持 `&&` (AND) 和 `||` (OR) 逻辑组合
- **多字段查询** - 支持 host、url、title、tech、status、body、header 字段
- **CSV 导出** - 流式导出全部搜索结果,无数量限制
#### 搜索语法示例
```bash
# 基础搜索
host="api" # host 包含 "api"
status=="200" # 状态码精确等于 200
tech="nginx" # 技术栈包含 nginx
# 组合搜索
host="api" && status=="200" # host 包含 api 且状态码为 200
tech="vue" || tech="react" # 技术栈包含 vue 或 react
# 复杂查询
host="admin" && tech="php" && status=="200"
url="/api/v1" && status!="404"
```
### 📊 可视化界面
- **数据统计** - 资产/漏洞统计仪表盘
- **实时通知** - WebSocket 消息推送
- **通知推送** - 实时企业微信tgdiscard消息推送服务
---
@@ -172,7 +197,7 @@ flowchart TB
### 环境要求
- **操作系统**: Ubuntu 20.04+ / Debian 11+ (推荐)
- **操作系统**: Ubuntu 20.04+ / Debian 11+
- **硬件**: 2核 4G 内存起步20GB+ 磁盘空间
### 一键安装
@@ -197,6 +222,7 @@ sudo ./install.sh --mirror
### 访问服务
- **Web 界面**: `https://ip:8083`
- **默认账号**: admin / admin首次登录后请修改密码
### 常用命令

View File

@@ -1 +1 @@
v1.2.14-dev
v1.3.5-dev

View File

@@ -1,4 +1,5 @@
import logging
import sys
from django.apps import AppConfig
@@ -16,6 +17,9 @@ class AssetConfig(AppConfig):
# 启用 pg_trgm 扩展(用于文本模糊搜索索引)
# 用于已有数据库升级场景
self._ensure_pg_trgm_extension()
# 验证 pg_ivm 扩展是否可用(用于 IMMV 增量维护)
self._verify_pg_ivm_extension()
def _ensure_pg_trgm_extension(self):
"""
@@ -43,3 +47,60 @@ class AssetConfig(AppConfig):
"请手动执行: CREATE EXTENSION IF NOT EXISTS pg_trgm;",
str(e)
)
def _verify_pg_ivm_extension(self):
"""
验证 pg_ivm 扩展是否可用。
pg_ivm 用于 IMMV增量维护物化视图是系统必需的扩展。
如果不可用,将记录错误并退出。
"""
from django.db import connection
# 检查是否为 PostgreSQL 数据库
if connection.vendor != 'postgresql':
logger.debug("跳过 pg_ivm 验证:当前数据库不是 PostgreSQL")
return
# 跳过某些管理命令(如 migrate、makemigrations
import sys
if len(sys.argv) > 1 and sys.argv[1] in ('migrate', 'makemigrations', 'collectstatic', 'check'):
logger.debug("跳过 pg_ivm 验证:当前为管理命令")
return
try:
with connection.cursor() as cursor:
# 检查 pg_ivm 扩展是否已安装
cursor.execute("""
SELECT COUNT(*) FROM pg_extension WHERE extname = 'pg_ivm'
""")
count = cursor.fetchone()[0]
if count > 0:
logger.info("✓ pg_ivm 扩展已启用")
else:
# 尝试创建扩展
try:
cursor.execute("CREATE EXTENSION IF NOT EXISTS pg_ivm;")
logger.info("✓ pg_ivm 扩展已创建并启用")
except Exception as create_error:
logger.error(
"=" * 60 + "\n"
"错误: pg_ivm 扩展未安装\n"
"=" * 60 + "\n"
"pg_ivm 是系统必需的扩展,用于增量维护物化视图。\n\n"
"请在 PostgreSQL 服务器上安装 pg_ivm\n"
" curl -sSL https://raw.githubusercontent.com/yyhuni/xingrin/main/docker/scripts/install-pg-ivm.sh | sudo bash\n\n"
"或手动安装:\n"
" 1. apt install build-essential postgresql-server-dev-15 git\n"
" 2. git clone https://github.com/sraoss/pg_ivm.git && cd pg_ivm && make && make install\n"
" 3. 在 postgresql.conf 中添加: shared_preload_libraries = 'pg_ivm'\n"
" 4. 重启 PostgreSQL\n"
"=" * 60
)
# 在生产环境中退出,开发环境中仅警告
from django.conf import settings
if not settings.DEBUG:
sys.exit(1)
except Exception as e:
logger.error(f"pg_ivm 扩展验证失败: {e}")

View File

@@ -13,6 +13,7 @@ class EndpointSnapshotDTO:
快照只属于 scan。
"""
scan_id: int
target_id: int # 必填,用于同步到资产表
url: str
host: str = '' # 主机名域名或IP地址
title: str = ''
@@ -25,7 +26,6 @@ class EndpointSnapshotDTO:
response_body: str = ''
vhost: Optional[bool] = None
matched_gf_patterns: List[str] = None
target_id: Optional[int] = None # 冗余字段,用于同步到资产表
response_headers: str = ''
def __post_init__(self):
@@ -43,9 +43,6 @@ class EndpointSnapshotDTO:
"""
from apps.asset.dtos.asset import EndpointDTO
if self.target_id is None:
raise ValueError("target_id 不能为 None无法同步到资产表")
return EndpointDTO(
target_id=self.target_id,
url=self.url,

View File

@@ -13,14 +13,14 @@ class WebsiteSnapshotDTO:
快照只属于 scantarget 信息通过 scan.target 获取。
"""
scan_id: int
target_id: int # 仅用于传递数据,不保存到数据库
target_id: int # 必填,用于同步到资产表
url: str
host: str
title: str = ''
status: Optional[int] = None
status_code: Optional[int] = None # 统一命名status -> status_code
content_length: Optional[int] = None
location: str = ''
web_server: str = ''
webserver: str = '' # 统一命名web_server -> webserver
content_type: str = ''
tech: List[str] = None
response_body: str = ''
@@ -45,10 +45,10 @@ class WebsiteSnapshotDTO:
url=self.url,
host=self.host,
title=self.title,
status_code=self.status,
status_code=self.status_code,
content_length=self.content_length,
location=self.location,
webserver=self.web_server,
webserver=self.webserver,
content_type=self.content_type,
tech=self.tech if self.tech else [],
response_body=self.response_body,

View File

@@ -0,0 +1,345 @@
# Generated by Django 5.2.7 on 2026-01-02 04:45
import django.contrib.postgres.fields
import django.contrib.postgres.indexes
import django.core.validators
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('scan', '0001_initial'),
('targets', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AssetStatistics',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('total_targets', models.IntegerField(default=0, help_text='目标总数')),
('total_subdomains', models.IntegerField(default=0, help_text='子域名总数')),
('total_ips', models.IntegerField(default=0, help_text='IP地址总数')),
('total_endpoints', models.IntegerField(default=0, help_text='端点总数')),
('total_websites', models.IntegerField(default=0, help_text='网站总数')),
('total_vulns', models.IntegerField(default=0, help_text='漏洞总数')),
('total_assets', models.IntegerField(default=0, help_text='总资产数(子域名+IP+端点+网站)')),
('prev_targets', models.IntegerField(default=0, help_text='上次目标总数')),
('prev_subdomains', models.IntegerField(default=0, help_text='上次子域名总数')),
('prev_ips', models.IntegerField(default=0, help_text='上次IP地址总数')),
('prev_endpoints', models.IntegerField(default=0, help_text='上次端点总数')),
('prev_websites', models.IntegerField(default=0, help_text='上次网站总数')),
('prev_vulns', models.IntegerField(default=0, help_text='上次漏洞总数')),
('prev_assets', models.IntegerField(default=0, help_text='上次总资产数')),
('updated_at', models.DateTimeField(auto_now=True, help_text='最后更新时间')),
],
options={
'verbose_name': '资产统计',
'verbose_name_plural': '资产统计',
'db_table': 'asset_statistics',
},
),
migrations.CreateModel(
name='StatisticsHistory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(help_text='统计日期', unique=True)),
('total_targets', models.IntegerField(default=0, help_text='目标总数')),
('total_subdomains', models.IntegerField(default=0, help_text='子域名总数')),
('total_ips', models.IntegerField(default=0, help_text='IP地址总数')),
('total_endpoints', models.IntegerField(default=0, help_text='端点总数')),
('total_websites', models.IntegerField(default=0, help_text='网站总数')),
('total_vulns', models.IntegerField(default=0, help_text='漏洞总数')),
('total_assets', models.IntegerField(default=0, help_text='总资产数')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, help_text='更新时间')),
],
options={
'verbose_name': '统计历史',
'verbose_name_plural': '统计历史',
'db_table': 'statistics_history',
'ordering': ['-date'],
'indexes': [models.Index(fields=['date'], name='statistics__date_1d29cd_idx')],
},
),
migrations.CreateModel(
name='Directory',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url', models.CharField(help_text='完整请求 URL', max_length=2000)),
('status', models.IntegerField(blank=True, help_text='HTTP 响应状态码', null=True)),
('content_length', models.BigIntegerField(blank=True, help_text='响应体字节大小Content-Length 或实际长度)', null=True)),
('words', models.IntegerField(blank=True, help_text='响应体中单词数量(按空格分割)', null=True)),
('lines', models.IntegerField(blank=True, help_text='响应体行数(按换行符分割)', null=True)),
('content_type', models.CharField(blank=True, default='', help_text='响应头 Content-Type 值', max_length=200)),
('duration', models.BigIntegerField(blank=True, help_text='请求耗时(单位:纳秒)', null=True)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('target', models.ForeignKey(help_text='所属的扫描目标', on_delete=django.db.models.deletion.CASCADE, related_name='directories', to='targets.target')),
],
options={
'verbose_name': '目录',
'verbose_name_plural': '目录',
'db_table': 'directory',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='directory_created_2cef03_idx'), models.Index(fields=['target'], name='directory_target__e310c8_idx'), models.Index(fields=['url'], name='directory_url_ba40cd_idx'), models.Index(fields=['status'], name='directory_status_40bbe6_idx'), django.contrib.postgres.indexes.GinIndex(fields=['url'], name='directory_url_trgm_idx', opclasses=['gin_trgm_ops'])],
'constraints': [models.UniqueConstraint(fields=('target', 'url'), name='unique_directory_url_target')],
},
),
migrations.CreateModel(
name='DirectorySnapshot',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url', models.CharField(help_text='目录URL', max_length=2000)),
('status', models.IntegerField(blank=True, help_text='HTTP状态码', null=True)),
('content_length', models.BigIntegerField(blank=True, help_text='内容长度', null=True)),
('words', models.IntegerField(blank=True, help_text='响应体中单词数量(按空格分割)', null=True)),
('lines', models.IntegerField(blank=True, help_text='响应体行数(按换行符分割)', null=True)),
('content_type', models.CharField(blank=True, default='', help_text='响应头 Content-Type 值', max_length=200)),
('duration', models.BigIntegerField(blank=True, help_text='请求耗时(单位:纳秒)', null=True)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('scan', models.ForeignKey(help_text='所属的扫描任务', on_delete=django.db.models.deletion.CASCADE, related_name='directory_snapshots', to='scan.scan')),
],
options={
'verbose_name': '目录快照',
'verbose_name_plural': '目录快照',
'db_table': 'directory_snapshot',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['scan'], name='directory_s_scan_id_c45900_idx'), models.Index(fields=['url'], name='directory_s_url_b4b72b_idx'), models.Index(fields=['status'], name='directory_s_status_e9f57e_idx'), models.Index(fields=['content_type'], name='directory_s_content_45e864_idx'), models.Index(fields=['-created_at'], name='directory_s_created_eb9d27_idx'), django.contrib.postgres.indexes.GinIndex(fields=['url'], name='dir_snap_url_trgm', opclasses=['gin_trgm_ops'])],
'constraints': [models.UniqueConstraint(fields=('scan', 'url'), name='unique_directory_per_scan_snapshot')],
},
),
migrations.CreateModel(
name='Endpoint',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url', models.TextField(help_text='最终访问的完整URL')),
('host', models.CharField(blank=True, default='', help_text='主机名域名或IP地址', max_length=253)),
('location', models.TextField(blank=True, default='', help_text='重定向地址HTTP 3xx 响应头 Location')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('title', models.TextField(blank=True, default='', help_text='网页标题HTML <title> 标签内容)')),
('webserver', models.TextField(blank=True, default='', help_text='服务器类型HTTP 响应头 Server 值)')),
('response_body', models.TextField(blank=True, default='', help_text='HTTP响应体')),
('content_type', models.TextField(blank=True, default='', help_text='响应类型HTTP Content-Type 响应头)')),
('tech', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=list, help_text='技术栈(服务器/框架/语言等)', size=None)),
('status_code', models.IntegerField(blank=True, help_text='HTTP状态码', null=True)),
('content_length', models.IntegerField(blank=True, help_text='响应体大小(单位字节)', null=True)),
('vhost', models.BooleanField(blank=True, help_text='是否支持虚拟主机', null=True)),
('matched_gf_patterns', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=list, help_text='匹配的GF模式列表用于识别敏感端点如api, debug, config等', size=None)),
('response_headers', models.TextField(blank=True, default='', help_text='原始HTTP响应头')),
('target', models.ForeignKey(help_text='所属的扫描目标(主关联字段,表示所属关系,不能为空)', on_delete=django.db.models.deletion.CASCADE, related_name='endpoints', to='targets.target')),
],
options={
'verbose_name': '端点',
'verbose_name_plural': '端点',
'db_table': 'endpoint',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='endpoint_created_44fe9c_idx'), models.Index(fields=['target'], name='endpoint_target__7f9065_idx'), models.Index(fields=['url'], name='endpoint_url_30f66e_idx'), models.Index(fields=['host'], name='endpoint_host_5b4cc8_idx'), models.Index(fields=['status_code'], name='endpoint_status__5d4fdd_idx'), models.Index(fields=['title'], name='endpoint_title_29e26c_idx'), django.contrib.postgres.indexes.GinIndex(fields=['tech'], name='endpoint_tech_2bfa7c_gin'), django.contrib.postgres.indexes.GinIndex(fields=['response_headers'], name='endpoint_resp_headers_trgm_idx', opclasses=['gin_trgm_ops']), django.contrib.postgres.indexes.GinIndex(fields=['url'], name='endpoint_url_trgm_idx', opclasses=['gin_trgm_ops']), django.contrib.postgres.indexes.GinIndex(fields=['title'], name='endpoint_title_trgm_idx', opclasses=['gin_trgm_ops'])],
'constraints': [models.UniqueConstraint(fields=('url', 'target'), name='unique_endpoint_url_target')],
},
),
migrations.CreateModel(
name='EndpointSnapshot',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url', models.TextField(help_text='端点URL')),
('host', models.CharField(blank=True, default='', help_text='主机名域名或IP地址', max_length=253)),
('title', models.TextField(blank=True, default='', help_text='页面标题')),
('status_code', models.IntegerField(blank=True, help_text='HTTP状态码', null=True)),
('content_length', models.IntegerField(blank=True, help_text='内容长度', null=True)),
('location', models.TextField(blank=True, default='', help_text='重定向位置')),
('webserver', models.TextField(blank=True, default='', help_text='Web服务器')),
('content_type', models.TextField(blank=True, default='', help_text='内容类型')),
('tech', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=list, help_text='技术栈', size=None)),
('response_body', models.TextField(blank=True, default='', help_text='HTTP响应体')),
('vhost', models.BooleanField(blank=True, help_text='虚拟主机标志', null=True)),
('matched_gf_patterns', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=list, help_text='匹配的GF模式列表', size=None)),
('response_headers', models.TextField(blank=True, default='', help_text='原始HTTP响应头')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('scan', models.ForeignKey(help_text='所属的扫描任务', on_delete=django.db.models.deletion.CASCADE, related_name='endpoint_snapshots', to='scan.scan')),
],
options={
'verbose_name': '端点快照',
'verbose_name_plural': '端点快照',
'db_table': 'endpoint_snapshot',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['scan'], name='endpoint_sn_scan_id_6ac9a7_idx'), models.Index(fields=['url'], name='endpoint_sn_url_205160_idx'), models.Index(fields=['host'], name='endpoint_sn_host_577bfd_idx'), models.Index(fields=['title'], name='endpoint_sn_title_516a05_idx'), models.Index(fields=['status_code'], name='endpoint_sn_status__83efb0_idx'), models.Index(fields=['webserver'], name='endpoint_sn_webserv_66be83_idx'), models.Index(fields=['-created_at'], name='endpoint_sn_created_21fb5b_idx'), django.contrib.postgres.indexes.GinIndex(fields=['tech'], name='endpoint_sn_tech_0d0752_gin'), django.contrib.postgres.indexes.GinIndex(fields=['response_headers'], name='ep_snap_resp_hdr_trgm', opclasses=['gin_trgm_ops']), django.contrib.postgres.indexes.GinIndex(fields=['url'], name='ep_snap_url_trgm', opclasses=['gin_trgm_ops']), django.contrib.postgres.indexes.GinIndex(fields=['title'], name='ep_snap_title_trgm', opclasses=['gin_trgm_ops'])],
'constraints': [models.UniqueConstraint(fields=('scan', 'url'), name='unique_endpoint_per_scan_snapshot')],
},
),
migrations.CreateModel(
name='HostPortMapping',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('host', models.CharField(help_text='主机名域名或IP', max_length=1000)),
('ip', models.GenericIPAddressField(help_text='IP地址')),
('port', models.IntegerField(help_text='端口号1-65535', validators=[django.core.validators.MinValueValidator(1, message='端口号必须大于等于1'), django.core.validators.MaxValueValidator(65535, message='端口号必须小于等于65535')])),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('target', models.ForeignKey(help_text='所属的扫描目标', on_delete=django.db.models.deletion.CASCADE, related_name='host_port_mappings', to='targets.target')),
],
options={
'verbose_name': '主机端口映射',
'verbose_name_plural': '主机端口映射',
'db_table': 'host_port_mapping',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['target'], name='host_port_m_target__943e9b_idx'), models.Index(fields=['host'], name='host_port_m_host_f78363_idx'), models.Index(fields=['ip'], name='host_port_m_ip_2e6f02_idx'), models.Index(fields=['port'], name='host_port_m_port_9fb9ff_idx'), models.Index(fields=['host', 'ip'], name='host_port_m_host_3ce245_idx'), models.Index(fields=['-created_at'], name='host_port_m_created_11cd22_idx')],
'constraints': [models.UniqueConstraint(fields=('target', 'host', 'ip', 'port'), name='unique_target_host_ip_port')],
},
),
migrations.CreateModel(
name='HostPortMappingSnapshot',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('host', models.CharField(help_text='主机名域名或IP', max_length=1000)),
('ip', models.GenericIPAddressField(help_text='IP地址')),
('port', models.IntegerField(help_text='端口号1-65535', validators=[django.core.validators.MinValueValidator(1, message='端口号必须大于等于1'), django.core.validators.MaxValueValidator(65535, message='端口号必须小于等于65535')])),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('scan', models.ForeignKey(help_text='所属的扫描任务(主关联)', on_delete=django.db.models.deletion.CASCADE, related_name='host_port_mapping_snapshots', to='scan.scan')),
],
options={
'verbose_name': '主机端口映射快照',
'verbose_name_plural': '主机端口映射快照',
'db_table': 'host_port_mapping_snapshot',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['scan'], name='host_port_m_scan_id_50ba0b_idx'), models.Index(fields=['host'], name='host_port_m_host_e99054_idx'), models.Index(fields=['ip'], name='host_port_m_ip_54818c_idx'), models.Index(fields=['port'], name='host_port_m_port_ed7b48_idx'), models.Index(fields=['host', 'ip'], name='host_port_m_host_8a463a_idx'), models.Index(fields=['scan', 'host'], name='host_port_m_scan_id_426fdb_idx'), models.Index(fields=['-created_at'], name='host_port_m_created_fb28b8_idx')],
'constraints': [models.UniqueConstraint(fields=('scan', 'host', 'ip', 'port'), name='unique_scan_host_ip_port_snapshot')],
},
),
migrations.CreateModel(
name='Subdomain',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(help_text='子域名名称', max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('target', models.ForeignKey(help_text='所属的扫描目标(主关联字段,表示所属关系,不能为空)', on_delete=django.db.models.deletion.CASCADE, related_name='subdomains', to='targets.target')),
],
options={
'verbose_name': '子域名',
'verbose_name_plural': '子域名',
'db_table': 'subdomain',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='subdomain_created_e187a8_idx'), models.Index(fields=['name', 'target'], name='subdomain_name_60e1d0_idx'), models.Index(fields=['target'], name='subdomain_target__e409f0_idx'), models.Index(fields=['name'], name='subdomain_name_d40ba7_idx'), django.contrib.postgres.indexes.GinIndex(fields=['name'], name='subdomain_name_trgm_idx', opclasses=['gin_trgm_ops'])],
'constraints': [models.UniqueConstraint(fields=('name', 'target'), name='unique_subdomain_name_target')],
},
),
migrations.CreateModel(
name='SubdomainSnapshot',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(help_text='子域名名称', max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('scan', models.ForeignKey(help_text='所属的扫描任务', on_delete=django.db.models.deletion.CASCADE, related_name='subdomain_snapshots', to='scan.scan')),
],
options={
'verbose_name': '子域名快照',
'verbose_name_plural': '子域名快照',
'db_table': 'subdomain_snapshot',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['scan'], name='subdomain_s_scan_id_68c253_idx'), models.Index(fields=['name'], name='subdomain_s_name_2da42b_idx'), models.Index(fields=['-created_at'], name='subdomain_s_created_d2b48e_idx'), django.contrib.postgres.indexes.GinIndex(fields=['name'], name='subdomain_snap_name_trgm', opclasses=['gin_trgm_ops'])],
'constraints': [models.UniqueConstraint(fields=('scan', 'name'), name='unique_subdomain_per_scan_snapshot')],
},
),
migrations.CreateModel(
name='Vulnerability',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url', models.CharField(help_text='漏洞所在的URL', max_length=2000)),
('vuln_type', models.CharField(help_text='漏洞类型(如 xss, sqli', max_length=100)),
('severity', models.CharField(choices=[('unknown', '未知'), ('info', '信息'), ('low', ''), ('medium', ''), ('high', ''), ('critical', '危急')], default='unknown', help_text='严重性(未知/信息/低/中/高/危急)', max_length=20)),
('source', models.CharField(blank=True, default='', help_text='来源工具(如 dalfox, nuclei, crlfuzz', max_length=50)),
('cvss_score', models.DecimalField(blank=True, decimal_places=1, help_text='CVSS 评分0.0-10.0', max_digits=3, null=True)),
('description', models.TextField(blank=True, default='', help_text='漏洞描述')),
('raw_output', models.JSONField(blank=True, default=dict, help_text='工具原始输出')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('target', models.ForeignKey(help_text='所属的扫描目标', on_delete=django.db.models.deletion.CASCADE, related_name='vulnerabilities', to='targets.target')),
],
options={
'verbose_name': '漏洞',
'verbose_name_plural': '漏洞',
'db_table': 'vulnerability',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['target'], name='vulnerabili_target__755a02_idx'), models.Index(fields=['vuln_type'], name='vulnerabili_vuln_ty_3010cd_idx'), models.Index(fields=['severity'], name='vulnerabili_severit_1a798b_idx'), models.Index(fields=['source'], name='vulnerabili_source_7c7552_idx'), models.Index(fields=['url'], name='vulnerabili_url_4dcc4d_idx'), models.Index(fields=['-created_at'], name='vulnerabili_created_e25ff7_idx')],
},
),
migrations.CreateModel(
name='VulnerabilitySnapshot',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url', models.CharField(help_text='漏洞所在的URL', max_length=2000)),
('vuln_type', models.CharField(help_text='漏洞类型(如 xss, sqli', max_length=100)),
('severity', models.CharField(choices=[('unknown', '未知'), ('info', '信息'), ('low', ''), ('medium', ''), ('high', ''), ('critical', '危急')], default='unknown', help_text='严重性(未知/信息/低/中/高/危急)', max_length=20)),
('source', models.CharField(blank=True, default='', help_text='来源工具(如 dalfox, nuclei, crlfuzz', max_length=50)),
('cvss_score', models.DecimalField(blank=True, decimal_places=1, help_text='CVSS 评分0.0-10.0', max_digits=3, null=True)),
('description', models.TextField(blank=True, default='', help_text='漏洞描述')),
('raw_output', models.JSONField(blank=True, default=dict, help_text='工具原始输出')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('scan', models.ForeignKey(help_text='所属的扫描任务', on_delete=django.db.models.deletion.CASCADE, related_name='vulnerability_snapshots', to='scan.scan')),
],
options={
'verbose_name': '漏洞快照',
'verbose_name_plural': '漏洞快照',
'db_table': 'vulnerability_snapshot',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['scan'], name='vulnerabili_scan_id_7b81c9_idx'), models.Index(fields=['url'], name='vulnerabili_url_11a707_idx'), models.Index(fields=['vuln_type'], name='vulnerabili_vuln_ty_6b90ee_idx'), models.Index(fields=['severity'], name='vulnerabili_severit_4eae0d_idx'), models.Index(fields=['source'], name='vulnerabili_source_968b1f_idx'), models.Index(fields=['-created_at'], name='vulnerabili_created_53a12e_idx')],
},
),
migrations.CreateModel(
name='WebSite',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url', models.TextField(help_text='最终访问的完整URL')),
('host', models.CharField(blank=True, default='', help_text='主机名域名或IP地址', max_length=253)),
('location', models.TextField(blank=True, default='', help_text='重定向地址HTTP 3xx 响应头 Location')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('title', models.TextField(blank=True, default='', help_text='网页标题HTML <title> 标签内容)')),
('webserver', models.TextField(blank=True, default='', help_text='服务器类型HTTP 响应头 Server 值)')),
('response_body', models.TextField(blank=True, default='', help_text='HTTP响应体')),
('content_type', models.TextField(blank=True, default='', help_text='响应类型HTTP Content-Type 响应头)')),
('tech', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=list, help_text='技术栈(服务器/框架/语言等)', size=None)),
('status_code', models.IntegerField(blank=True, help_text='HTTP状态码', null=True)),
('content_length', models.IntegerField(blank=True, help_text='响应体大小(单位字节)', null=True)),
('vhost', models.BooleanField(blank=True, help_text='是否支持虚拟主机', null=True)),
('response_headers', models.TextField(blank=True, default='', help_text='原始HTTP响应头')),
('target', models.ForeignKey(help_text='所属的扫描目标(主关联字段,表示所属关系,不能为空)', on_delete=django.db.models.deletion.CASCADE, related_name='websites', to='targets.target')),
],
options={
'verbose_name': '站点',
'verbose_name_plural': '站点',
'db_table': 'website',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='website_created_c9cfd2_idx'), models.Index(fields=['url'], name='website_url_b18883_idx'), models.Index(fields=['host'], name='website_host_996b50_idx'), models.Index(fields=['target'], name='website_target__2a353b_idx'), models.Index(fields=['title'], name='website_title_c2775b_idx'), models.Index(fields=['status_code'], name='website_status__51663d_idx'), django.contrib.postgres.indexes.GinIndex(fields=['tech'], name='website_tech_e3f0cb_gin'), django.contrib.postgres.indexes.GinIndex(fields=['response_headers'], name='website_resp_headers_trgm_idx', opclasses=['gin_trgm_ops']), django.contrib.postgres.indexes.GinIndex(fields=['url'], name='website_url_trgm_idx', opclasses=['gin_trgm_ops']), django.contrib.postgres.indexes.GinIndex(fields=['title'], name='website_title_trgm_idx', opclasses=['gin_trgm_ops'])],
'constraints': [models.UniqueConstraint(fields=('url', 'target'), name='unique_website_url_target')],
},
),
migrations.CreateModel(
name='WebsiteSnapshot',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url', models.TextField(help_text='站点URL')),
('host', models.CharField(blank=True, default='', help_text='主机名域名或IP地址', max_length=253)),
('title', models.TextField(blank=True, default='', help_text='页面标题')),
('status_code', models.IntegerField(blank=True, help_text='HTTP状态码', null=True)),
('content_length', models.BigIntegerField(blank=True, help_text='内容长度', null=True)),
('location', models.TextField(blank=True, default='', help_text='重定向位置')),
('webserver', models.TextField(blank=True, default='', help_text='Web服务器')),
('content_type', models.TextField(blank=True, default='', help_text='内容类型')),
('tech', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=list, help_text='技术栈', size=None)),
('response_body', models.TextField(blank=True, default='', help_text='HTTP响应体')),
('vhost', models.BooleanField(blank=True, help_text='虚拟主机标志', null=True)),
('response_headers', models.TextField(blank=True, default='', help_text='原始HTTP响应头')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('scan', models.ForeignKey(help_text='所属的扫描任务', on_delete=django.db.models.deletion.CASCADE, related_name='website_snapshots', to='scan.scan')),
],
options={
'verbose_name': '网站快照',
'verbose_name_plural': '网站快照',
'db_table': 'website_snapshot',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['scan'], name='website_sna_scan_id_26b6dc_idx'), models.Index(fields=['url'], name='website_sna_url_801a70_idx'), models.Index(fields=['host'], name='website_sna_host_348fe1_idx'), models.Index(fields=['title'], name='website_sna_title_b1a5ee_idx'), models.Index(fields=['-created_at'], name='website_sna_created_2c149a_idx'), django.contrib.postgres.indexes.GinIndex(fields=['tech'], name='website_sna_tech_3d6d2f_gin'), django.contrib.postgres.indexes.GinIndex(fields=['response_headers'], name='ws_snap_resp_hdr_trgm', opclasses=['gin_trgm_ops']), django.contrib.postgres.indexes.GinIndex(fields=['url'], name='ws_snap_url_trgm', opclasses=['gin_trgm_ops']), django.contrib.postgres.indexes.GinIndex(fields=['title'], name='ws_snap_title_trgm', opclasses=['gin_trgm_ops'])],
'constraints': [models.UniqueConstraint(fields=('scan', 'url'), name='unique_website_per_scan_snapshot')],
},
),
]

View File

@@ -0,0 +1,187 @@
"""
创建资产搜索 IMMV增量维护物化视图
使用 pg_ivm 扩展创建 IMMV数据变更时自动增量更新无需手动刷新。
包含:
1. asset_search_view - Website 搜索视图
2. endpoint_search_view - Endpoint 搜索视图
"""
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('asset', '0001_initial'),
]
operations = [
# 1. 确保 pg_ivm 扩展已启用
migrations.RunSQL(
sql="CREATE EXTENSION IF NOT EXISTS pg_ivm;",
reverse_sql="-- pg_ivm extension kept for other uses"
),
# ==================== Website IMMV ====================
# 2. 创建 asset_search_view IMMV
migrations.RunSQL(
sql="""
SELECT pgivm.create_immv('asset_search_view', $$
SELECT
w.id,
w.url,
w.host,
w.title,
w.tech,
w.status_code,
w.response_headers,
w.response_body,
w.content_type,
w.content_length,
w.webserver,
w.location,
w.vhost,
w.created_at,
w.target_id
FROM website w
$$);
""",
reverse_sql="SELECT pgivm.drop_immv('asset_search_view');"
),
# 3. 创建 asset_search_view 索引
migrations.RunSQL(
sql="""
-- 唯一索引
CREATE UNIQUE INDEX IF NOT EXISTS asset_search_view_id_idx
ON asset_search_view (id);
-- host 模糊搜索索引
CREATE INDEX IF NOT EXISTS asset_search_view_host_trgm_idx
ON asset_search_view USING gin (host gin_trgm_ops);
-- title 模糊搜索索引
CREATE INDEX IF NOT EXISTS asset_search_view_title_trgm_idx
ON asset_search_view USING gin (title gin_trgm_ops);
-- url 模糊搜索索引
CREATE INDEX IF NOT EXISTS asset_search_view_url_trgm_idx
ON asset_search_view USING gin (url gin_trgm_ops);
-- response_headers 模糊搜索索引
CREATE INDEX IF NOT EXISTS asset_search_view_headers_trgm_idx
ON asset_search_view USING gin (response_headers gin_trgm_ops);
-- response_body 模糊搜索索引
CREATE INDEX IF NOT EXISTS asset_search_view_body_trgm_idx
ON asset_search_view USING gin (response_body gin_trgm_ops);
-- tech 数组索引
CREATE INDEX IF NOT EXISTS asset_search_view_tech_idx
ON asset_search_view USING gin (tech);
-- status_code 索引
CREATE INDEX IF NOT EXISTS asset_search_view_status_idx
ON asset_search_view (status_code);
-- created_at 排序索引
CREATE INDEX IF NOT EXISTS asset_search_view_created_idx
ON asset_search_view (created_at DESC);
""",
reverse_sql="""
DROP INDEX IF EXISTS asset_search_view_id_idx;
DROP INDEX IF EXISTS asset_search_view_host_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_title_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_url_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_headers_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_body_trgm_idx;
DROP INDEX IF EXISTS asset_search_view_tech_idx;
DROP INDEX IF EXISTS asset_search_view_status_idx;
DROP INDEX IF EXISTS asset_search_view_created_idx;
"""
),
# ==================== Endpoint IMMV ====================
# 4. 创建 endpoint_search_view IMMV
migrations.RunSQL(
sql="""
SELECT pgivm.create_immv('endpoint_search_view', $$
SELECT
e.id,
e.url,
e.host,
e.title,
e.tech,
e.status_code,
e.response_headers,
e.response_body,
e.content_type,
e.content_length,
e.webserver,
e.location,
e.vhost,
e.matched_gf_patterns,
e.created_at,
e.target_id
FROM endpoint e
$$);
""",
reverse_sql="SELECT pgivm.drop_immv('endpoint_search_view');"
),
# 5. 创建 endpoint_search_view 索引
migrations.RunSQL(
sql="""
-- 唯一索引
CREATE UNIQUE INDEX IF NOT EXISTS endpoint_search_view_id_idx
ON endpoint_search_view (id);
-- host 模糊搜索索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_host_trgm_idx
ON endpoint_search_view USING gin (host gin_trgm_ops);
-- title 模糊搜索索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_title_trgm_idx
ON endpoint_search_view USING gin (title gin_trgm_ops);
-- url 模糊搜索索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_url_trgm_idx
ON endpoint_search_view USING gin (url gin_trgm_ops);
-- response_headers 模糊搜索索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_headers_trgm_idx
ON endpoint_search_view USING gin (response_headers gin_trgm_ops);
-- response_body 模糊搜索索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_body_trgm_idx
ON endpoint_search_view USING gin (response_body gin_trgm_ops);
-- tech 数组索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_tech_idx
ON endpoint_search_view USING gin (tech);
-- status_code 索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_status_idx
ON endpoint_search_view (status_code);
-- created_at 排序索引
CREATE INDEX IF NOT EXISTS endpoint_search_view_created_idx
ON endpoint_search_view (created_at DESC);
""",
reverse_sql="""
DROP INDEX IF EXISTS endpoint_search_view_id_idx;
DROP INDEX IF EXISTS endpoint_search_view_host_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_title_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_url_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_headers_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_body_trgm_idx;
DROP INDEX IF EXISTS endpoint_search_view_tech_idx;
DROP INDEX IF EXISTS endpoint_search_view_status_idx;
DROP INDEX IF EXISTS endpoint_search_view_created_idx;
"""
),
]

View File

@@ -65,28 +65,25 @@ class Endpoint(models.Model):
help_text='所属的扫描目标(主关联字段,表示所属关系,不能为空)'
)
url = models.CharField(max_length=2000, help_text='最终访问的完整URL')
url = models.TextField(help_text='最终访问的完整URL')
host = models.CharField(
max_length=253,
blank=True,
default='',
help_text='主机名域名或IP地址'
)
location = models.CharField(
max_length=1000,
location = models.TextField(
blank=True,
default='',
help_text='重定向地址HTTP 3xx 响应头 Location'
)
created_at = models.DateTimeField(auto_now_add=True, help_text='创建时间')
title = models.CharField(
max_length=1000,
title = models.TextField(
blank=True,
default='',
help_text='网页标题HTML <title> 标签内容)'
)
webserver = models.CharField(
max_length=200,
webserver = models.TextField(
blank=True,
default='',
help_text='服务器类型HTTP 响应头 Server 值)'
@@ -96,8 +93,7 @@ class Endpoint(models.Model):
default='',
help_text='HTTP响应体'
)
content_type = models.CharField(
max_length=200,
content_type = models.TextField(
blank=True,
default='',
help_text='响应类型HTTP Content-Type 响应头)'
@@ -188,28 +184,25 @@ class WebSite(models.Model):
help_text='所属的扫描目标(主关联字段,表示所属关系,不能为空)'
)
url = models.CharField(max_length=2000, help_text='最终访问的完整URL')
url = models.TextField(help_text='最终访问的完整URL')
host = models.CharField(
max_length=253,
blank=True,
default='',
help_text='主机名域名或IP地址'
)
location = models.CharField(
max_length=1000,
location = models.TextField(
blank=True,
default='',
help_text='重定向地址HTTP 3xx 响应头 Location'
)
created_at = models.DateTimeField(auto_now_add=True, help_text='创建时间')
title = models.CharField(
max_length=1000,
title = models.TextField(
blank=True,
default='',
help_text='网页标题HTML <title> 标签内容)'
)
webserver = models.CharField(
max_length=200,
webserver = models.TextField(
blank=True,
default='',
help_text='服务器类型HTTP 响应头 Server 值)'
@@ -219,8 +212,7 @@ class WebSite(models.Model):
default='',
help_text='HTTP响应体'
)
content_type = models.CharField(
max_length=200,
content_type = models.TextField(
blank=True,
default='',
help_text='响应类型HTTP Content-Type 响应头)'

View File

@@ -61,14 +61,14 @@ class WebsiteSnapshot(models.Model):
)
# 扫描结果数据
url = models.CharField(max_length=2000, help_text='站点URL')
url = models.TextField(help_text='站点URL')
host = models.CharField(max_length=253, blank=True, default='', help_text='主机名域名或IP地址')
title = models.CharField(max_length=500, blank=True, default='', help_text='页面标题')
status = models.IntegerField(null=True, blank=True, help_text='HTTP状态码')
title = models.TextField(blank=True, default='', help_text='页面标题')
status_code = models.IntegerField(null=True, blank=True, help_text='HTTP状态码')
content_length = models.BigIntegerField(null=True, blank=True, help_text='内容长度')
location = models.CharField(max_length=1000, blank=True, default='', help_text='重定向位置')
web_server = models.CharField(max_length=200, blank=True, default='', help_text='Web服务器')
content_type = models.CharField(max_length=200, blank=True, default='', help_text='内容类型')
location = models.TextField(blank=True, default='', help_text='重定向位置')
webserver = models.TextField(blank=True, default='', help_text='Web服务器')
content_type = models.TextField(blank=True, default='', help_text='内容类型')
tech = ArrayField(
models.CharField(max_length=100),
blank=True,
@@ -267,19 +267,19 @@ class EndpointSnapshot(models.Model):
)
# 扫描结果数据
url = models.CharField(max_length=2000, help_text='端点URL')
url = models.TextField(help_text='端点URL')
host = models.CharField(
max_length=253,
blank=True,
default='',
help_text='主机名域名或IP地址'
)
title = models.CharField(max_length=1000, blank=True, default='', help_text='页面标题')
title = models.TextField(blank=True, default='', help_text='页面标题')
status_code = models.IntegerField(null=True, blank=True, help_text='HTTP状态码')
content_length = models.IntegerField(null=True, blank=True, help_text='内容长度')
location = models.CharField(max_length=1000, blank=True, default='', help_text='重定向位置')
webserver = models.CharField(max_length=200, blank=True, default='', help_text='Web服务器')
content_type = models.CharField(max_length=200, blank=True, default='', help_text='内容类型')
location = models.TextField(blank=True, default='', help_text='重定向位置')
webserver = models.TextField(blank=True, default='', help_text='Web服务器')
content_type = models.TextField(blank=True, default='', help_text='内容类型')
tech = ArrayField(
models.CharField(max_length=100),
blank=True,

View File

@@ -46,10 +46,10 @@ class DjangoWebsiteSnapshotRepository:
url=item.url,
host=item.host,
title=item.title,
status=item.status,
status_code=item.status_code,
content_length=item.content_length,
location=item.location,
web_server=item.web_server,
webserver=item.webserver,
content_type=item.content_type,
tech=item.tech if item.tech else [],
response_body=item.response_body,
@@ -99,27 +99,12 @@ class DjangoWebsiteSnapshotRepository:
WebsiteSnapshot.objects
.filter(scan_id=scan_id)
.values(
'url', 'host', 'location', 'title', 'status',
'content_length', 'content_type', 'web_server', 'tech',
'url', 'host', 'location', 'title', 'status_code',
'content_length', 'content_type', 'webserver', 'tech',
'response_body', 'response_headers', 'vhost', 'created_at'
)
.order_by('url')
)
for row in qs.iterator(chunk_size=batch_size):
# 重命名字段以匹配 CSV 表头
yield {
'url': row['url'],
'host': row['host'],
'location': row['location'],
'title': row['title'],
'status_code': row['status'],
'content_length': row['content_length'],
'content_type': row['content_type'],
'webserver': row['web_server'],
'tech': row['tech'],
'response_body': row['response_body'],
'response_headers': row['response_headers'],
'vhost': row['vhost'],
'created_at': row['created_at'],
}
yield row

View File

@@ -217,8 +217,6 @@ class WebsiteSnapshotSerializer(serializers.ModelSerializer):
"""网站快照序列化器(用于扫描历史)"""
subdomain_name = serializers.CharField(source='subdomain.name', read_only=True)
webserver = serializers.CharField(source='web_server', read_only=True) # 映射字段名
status_code = serializers.IntegerField(source='status', read_only=True) # 映射字段名
responseHeaders = serializers.CharField(source='response_headers', read_only=True) # 原始HTTP响应头
class Meta:
@@ -228,9 +226,9 @@ class WebsiteSnapshotSerializer(serializers.ModelSerializer):
'url',
'location',
'title',
'webserver', # 使用映射后的字段名
'webserver',
'content_type',
'status_code', # 使用映射后的字段名
'status_code',
'content_length',
'response_body',
'tech',

View File

@@ -27,7 +27,7 @@ class EndpointService:
'url': 'url',
'host': 'host',
'title': 'title',
'status': 'status_code',
'status_code': 'status_code',
'tech': 'tech',
}

View File

@@ -19,7 +19,7 @@ class WebSiteService:
'url': 'url',
'host': 'host',
'title': 'title',
'status': 'status_code',
'status_code': 'status_code',
'tech': 'tech',
}

View File

@@ -0,0 +1,439 @@
"""
资产搜索服务
提供资产搜索的核心业务逻辑:
- 从物化视图查询数据
- 支持表达式语法解析
- 支持 =(模糊)、==(精确)、!=(不等于)操作符
- 支持 && (AND) 和 || (OR) 逻辑组合
- 支持 Website 和 Endpoint 两种资产类型
"""
import logging
import re
from typing import Optional, List, Dict, Any, Tuple, Literal
from django.db import connection
logger = logging.getLogger(__name__)
# 支持的字段映射(前端字段名 -> 数据库字段名)
FIELD_MAPPING = {
'host': 'host',
'url': 'url',
'title': 'title',
'tech': 'tech',
'status': 'status_code',
'body': 'response_body',
'header': 'response_headers',
}
# 数组类型字段
ARRAY_FIELDS = {'tech'}
# 资产类型到视图名的映射
VIEW_MAPPING = {
'website': 'asset_search_view',
'endpoint': 'endpoint_search_view',
}
# 有效的资产类型
VALID_ASSET_TYPES = {'website', 'endpoint'}
# Website 查询字段
WEBSITE_SELECT_FIELDS = """
id,
url,
host,
title,
tech,
status_code,
response_headers,
response_body,
content_type,
content_length,
webserver,
location,
vhost,
created_at,
target_id
"""
# Endpoint 查询字段(包含 matched_gf_patterns
ENDPOINT_SELECT_FIELDS = """
id,
url,
host,
title,
tech,
status_code,
response_headers,
response_body,
content_type,
content_length,
webserver,
location,
vhost,
matched_gf_patterns,
created_at,
target_id
"""
class SearchQueryParser:
"""
搜索查询解析器
支持语法:
- field="value" 模糊匹配ILIKE %value%
- field=="value" 精确匹配
- field!="value" 不等于
- && AND 连接
- || OR 连接
- () 分组(暂不支持嵌套)
示例:
- host="api" && tech="nginx"
- tech="vue" || tech="react"
- status=="200" && host!="test"
"""
# 匹配单个条件: field="value" 或 field=="value" 或 field!="value"
CONDITION_PATTERN = re.compile(r'(\w+)\s*(==|!=|=)\s*"([^"]*)"')
@classmethod
def parse(cls, query: str) -> Tuple[str, List[Any]]:
"""
解析查询字符串,返回 SQL WHERE 子句和参数
Args:
query: 搜索查询字符串
Returns:
(where_clause, params) 元组
"""
if not query or not query.strip():
return "1=1", []
query = query.strip()
# 检查是否包含操作符语法,如果不包含则作为 host 模糊搜索
if not cls.CONDITION_PATTERN.search(query):
# 裸文本,默认作为 host 模糊搜索
return "host ILIKE %s", [f"%{query}%"]
# 按 || 分割为 OR 组
or_groups = cls._split_by_or(query)
if len(or_groups) == 1:
# 没有 OR直接解析 AND 条件
return cls._parse_and_group(or_groups[0])
# 多个 OR 组
or_clauses = []
all_params = []
for group in or_groups:
clause, params = cls._parse_and_group(group)
if clause and clause != "1=1":
or_clauses.append(f"({clause})")
all_params.extend(params)
if not or_clauses:
return "1=1", []
return " OR ".join(or_clauses), all_params
@classmethod
def _split_by_or(cls, query: str) -> List[str]:
"""按 || 分割查询,但忽略引号内的 ||"""
parts = []
current = ""
in_quotes = False
i = 0
while i < len(query):
char = query[i]
if char == '"':
in_quotes = not in_quotes
current += char
elif not in_quotes and i + 1 < len(query) and query[i:i+2] == '||':
if current.strip():
parts.append(current.strip())
current = ""
i += 1 # 跳过第二个 |
else:
current += char
i += 1
if current.strip():
parts.append(current.strip())
return parts if parts else [query]
@classmethod
def _parse_and_group(cls, group: str) -> Tuple[str, List[Any]]:
"""解析 AND 组(用 && 连接的条件)"""
# 移除外层括号
group = group.strip()
if group.startswith('(') and group.endswith(')'):
group = group[1:-1].strip()
# 按 && 分割
parts = cls._split_by_and(group)
and_clauses = []
all_params = []
for part in parts:
clause, params = cls._parse_condition(part.strip())
if clause:
and_clauses.append(clause)
all_params.extend(params)
if not and_clauses:
return "1=1", []
return " AND ".join(and_clauses), all_params
@classmethod
def _split_by_and(cls, query: str) -> List[str]:
"""按 && 分割查询,但忽略引号内的 &&"""
parts = []
current = ""
in_quotes = False
i = 0
while i < len(query):
char = query[i]
if char == '"':
in_quotes = not in_quotes
current += char
elif not in_quotes and i + 1 < len(query) and query[i:i+2] == '&&':
if current.strip():
parts.append(current.strip())
current = ""
i += 1 # 跳过第二个 &
else:
current += char
i += 1
if current.strip():
parts.append(current.strip())
return parts if parts else [query]
@classmethod
def _parse_condition(cls, condition: str) -> Tuple[Optional[str], List[Any]]:
"""
解析单个条件
Returns:
(sql_clause, params) 或 (None, []) 如果解析失败
"""
# 移除括号
condition = condition.strip()
if condition.startswith('(') and condition.endswith(')'):
condition = condition[1:-1].strip()
match = cls.CONDITION_PATTERN.match(condition)
if not match:
logger.warning(f"无法解析条件: {condition}")
return None, []
field, operator, value = match.groups()
field = field.lower()
# 验证字段
if field not in FIELD_MAPPING:
logger.warning(f"未知字段: {field}")
return None, []
db_field = FIELD_MAPPING[field]
is_array = field in ARRAY_FIELDS
# 根据操作符生成 SQL
if operator == '=':
# 模糊匹配
return cls._build_like_condition(db_field, value, is_array)
elif operator == '==':
# 精确匹配
return cls._build_exact_condition(db_field, value, is_array)
elif operator == '!=':
# 不等于
return cls._build_not_equal_condition(db_field, value, is_array)
return None, []
@classmethod
def _build_like_condition(cls, field: str, value: str, is_array: bool) -> Tuple[str, List[Any]]:
"""构建模糊匹配条件"""
if is_array:
# 数组字段:检查数组中是否有元素包含该值
return f"EXISTS (SELECT 1 FROM unnest({field}) AS t WHERE t ILIKE %s)", [f"%{value}%"]
elif field == 'status_code':
# 状态码是整数,模糊匹配转为精确匹配
try:
return f"{field} = %s", [int(value)]
except ValueError:
return f"{field}::text ILIKE %s", [f"%{value}%"]
else:
return f"{field} ILIKE %s", [f"%{value}%"]
@classmethod
def _build_exact_condition(cls, field: str, value: str, is_array: bool) -> Tuple[str, List[Any]]:
"""构建精确匹配条件"""
if is_array:
# 数组字段:检查数组中是否包含该精确值
return f"%s = ANY({field})", [value]
elif field == 'status_code':
# 状态码是整数
try:
return f"{field} = %s", [int(value)]
except ValueError:
return f"{field}::text = %s", [value]
else:
return f"{field} = %s", [value]
@classmethod
def _build_not_equal_condition(cls, field: str, value: str, is_array: bool) -> Tuple[str, List[Any]]:
"""构建不等于条件"""
if is_array:
# 数组字段:检查数组中不包含该值
return f"NOT (%s = ANY({field}))", [value]
elif field == 'status_code':
try:
return f"({field} IS NULL OR {field} != %s)", [int(value)]
except ValueError:
return f"({field} IS NULL OR {field}::text != %s)", [value]
else:
return f"({field} IS NULL OR {field} != %s)", [value]
AssetType = Literal['website', 'endpoint']
class AssetSearchService:
"""资产搜索服务"""
def search(
self,
query: str,
asset_type: AssetType = 'website',
limit: Optional[int] = None
) -> List[Dict[str, Any]]:
"""
搜索资产
Args:
query: 搜索查询字符串
asset_type: 资产类型 ('website''endpoint')
limit: 最大返回数量(可选)
Returns:
List[Dict]: 搜索结果列表
"""
where_clause, params = SearchQueryParser.parse(query)
# 根据资产类型选择视图和字段
view_name = VIEW_MAPPING.get(asset_type, 'asset_search_view')
select_fields = ENDPOINT_SELECT_FIELDS if asset_type == 'endpoint' else WEBSITE_SELECT_FIELDS
sql = f"""
SELECT {select_fields}
FROM {view_name}
WHERE {where_clause}
ORDER BY created_at DESC
"""
# 添加 LIMIT
if limit is not None and limit > 0:
sql += f" LIMIT {int(limit)}"
try:
with connection.cursor() as cursor:
cursor.execute(sql, params)
columns = [col[0] for col in cursor.description]
results = []
for row in cursor.fetchall():
result = dict(zip(columns, row))
results.append(result)
return results
except Exception as e:
logger.error(f"搜索查询失败: {e}, SQL: {sql}, params: {params}")
raise
def count(self, query: str, asset_type: AssetType = 'website') -> int:
"""
统计搜索结果数量
Args:
query: 搜索查询字符串
asset_type: 资产类型 ('website''endpoint')
Returns:
int: 结果总数
"""
where_clause, params = SearchQueryParser.parse(query)
# 根据资产类型选择视图
view_name = VIEW_MAPPING.get(asset_type, 'asset_search_view')
sql = f"SELECT COUNT(*) FROM {view_name} WHERE {where_clause}"
try:
with connection.cursor() as cursor:
cursor.execute(sql, params)
return cursor.fetchone()[0]
except Exception as e:
logger.error(f"统计查询失败: {e}")
raise
def search_iter(
self,
query: str,
asset_type: AssetType = 'website',
batch_size: int = 1000
):
"""
流式搜索资产(使用服务端游标,内存友好)
Args:
query: 搜索查询字符串
asset_type: 资产类型 ('website''endpoint')
batch_size: 每批获取的数量
Yields:
Dict: 单条搜索结果
"""
where_clause, params = SearchQueryParser.parse(query)
# 根据资产类型选择视图和字段
view_name = VIEW_MAPPING.get(asset_type, 'asset_search_view')
select_fields = ENDPOINT_SELECT_FIELDS if asset_type == 'endpoint' else WEBSITE_SELECT_FIELDS
sql = f"""
SELECT {select_fields}
FROM {view_name}
WHERE {where_clause}
ORDER BY created_at DESC
"""
try:
# 使用服务端游标,避免一次性加载所有数据到内存
with connection.cursor(name='export_cursor') as cursor:
cursor.itersize = batch_size
cursor.execute(sql, params)
columns = [col[0] for col in cursor.description]
for row in cursor:
yield dict(zip(columns, row))
except Exception as e:
logger.error(f"流式搜索查询失败: {e}, SQL: {sql}, params: {params}")
raise

View File

@@ -72,7 +72,7 @@ class EndpointSnapshotsService:
'url': 'url',
'host': 'host',
'title': 'title',
'status': 'status_code',
'status_code': 'status_code',
'webserver': 'webserver',
'tech': 'tech',
}

View File

@@ -73,8 +73,8 @@ class WebsiteSnapshotsService:
'url': 'url',
'host': 'host',
'title': 'title',
'status': 'status',
'webserver': 'web_server',
'status_code': 'status_code',
'webserver': 'webserver',
'tech': 'tech',
}

View File

@@ -0,0 +1,7 @@
"""
Asset 应用的任务模块
注意:物化视图刷新已移至 APScheduler 定时任务apps.engine.scheduler
"""
__all__ = []

View File

@@ -10,6 +10,8 @@ from .views import (
DirectoryViewSet,
VulnerabilityViewSet,
AssetStatisticsViewSet,
AssetSearchView,
AssetSearchExportView,
)
# 创建 DRF 路由器
@@ -25,4 +27,6 @@ router.register(r'statistics', AssetStatisticsViewSet, basename='asset-statistic
urlpatterns = [
path('assets/', include(router.urls)),
path('assets/search/', AssetSearchView.as_view(), name='asset-search'),
path('assets/search/export/', AssetSearchExportView.as_view(), name='asset-search-export'),
]

View File

@@ -0,0 +1,40 @@
"""
Asset 应用视图模块
重新导出所有视图类以保持向后兼容
"""
from .asset_views import (
AssetStatisticsViewSet,
SubdomainViewSet,
WebSiteViewSet,
DirectoryViewSet,
EndpointViewSet,
HostPortMappingViewSet,
VulnerabilityViewSet,
SubdomainSnapshotViewSet,
WebsiteSnapshotViewSet,
DirectorySnapshotViewSet,
EndpointSnapshotViewSet,
HostPortMappingSnapshotViewSet,
VulnerabilitySnapshotViewSet,
)
from .search_views import AssetSearchView, AssetSearchExportView
__all__ = [
'AssetStatisticsViewSet',
'SubdomainViewSet',
'WebSiteViewSet',
'DirectoryViewSet',
'EndpointViewSet',
'HostPortMappingViewSet',
'VulnerabilityViewSet',
'SubdomainSnapshotViewSet',
'WebsiteSnapshotViewSet',
'DirectorySnapshotViewSet',
'EndpointSnapshotViewSet',
'HostPortMappingSnapshotViewSet',
'VulnerabilitySnapshotViewSet',
'AssetSearchView',
'AssetSearchExportView',
]

View File

@@ -10,17 +10,17 @@ from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db import DatabaseError, IntegrityError, OperationalError
from django.http import StreamingHttpResponse
from .serializers import (
from ..serializers import (
SubdomainListSerializer, WebSiteSerializer, DirectorySerializer,
VulnerabilitySerializer, EndpointListSerializer, IPAddressAggregatedSerializer,
SubdomainSnapshotSerializer, WebsiteSnapshotSerializer, DirectorySnapshotSerializer,
EndpointSnapshotSerializer, VulnerabilitySnapshotSerializer
)
from .services import (
from ..services import (
SubdomainService, WebSiteService, DirectoryService,
VulnerabilityService, AssetStatisticsService, EndpointService, HostPortMappingService
)
from .services.snapshot import (
from ..services.snapshot import (
SubdomainSnapshotsService, WebsiteSnapshotsService, DirectorySnapshotsService,
EndpointSnapshotsService, HostPortMappingSnapshotsService, VulnerabilitySnapshotsService
)

View File

@@ -0,0 +1,364 @@
"""
资产搜索 API 视图
提供资产搜索的 REST API 接口:
- GET /api/assets/search/ - 搜索资产
- GET /api/assets/search/export/ - 导出搜索结果为 CSV
搜索语法:
- field="value" 模糊匹配ILIKE %value%
- field=="value" 精确匹配
- field!="value" 不等于
- && AND 连接
- || OR 连接
支持的字段:
- host: 主机名
- url: URL
- title: 标题
- tech: 技术栈
- status: 状态码
- body: 响应体
- header: 响应头
支持的资产类型:
- website: 站点(默认)
- endpoint: 端点
"""
import logging
import json
from datetime import datetime
from urllib.parse import urlparse, urlunparse
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.request import Request
from django.http import StreamingHttpResponse
from django.db import connection
from apps.common.response_helpers import success_response, error_response
from apps.common.error_codes import ErrorCodes
from apps.asset.services.search_service import AssetSearchService, VALID_ASSET_TYPES
logger = logging.getLogger(__name__)
class AssetSearchView(APIView):
"""
资产搜索 API
GET /api/assets/search/
Query Parameters:
q: 搜索查询表达式
asset_type: 资产类型 ('website''endpoint',默认 'website')
page: 页码(从 1 开始,默认 1
pageSize: 每页数量(默认 10最大 100
示例查询:
?q=host="api" && tech="nginx"
?q=tech="vue" || tech="react"&asset_type=endpoint
?q=status=="200" && host!="test"
Response:
{
"results": [...],
"total": 100,
"page": 1,
"pageSize": 10,
"totalPages": 10,
"assetType": "website"
}
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.service = AssetSearchService()
def _parse_headers(self, headers_data) -> dict:
"""解析响应头为字典"""
if not headers_data:
return {}
try:
return json.loads(headers_data)
except (json.JSONDecodeError, TypeError):
result = {}
for line in str(headers_data).split('\n'):
if ':' in line:
key, value = line.split(':', 1)
result[key.strip()] = value.strip()
return result
def _format_result(self, result: dict, vulnerabilities_by_url: dict, asset_type: str) -> dict:
"""格式化单个搜索结果"""
url = result.get('url', '')
vulns = vulnerabilities_by_url.get(url, [])
# 基础字段Website 和 Endpoint 共有)
formatted = {
'id': result.get('id'),
'url': url,
'host': result.get('host', ''),
'title': result.get('title', ''),
'technologies': result.get('tech', []) or [],
'statusCode': result.get('status_code'),
'contentLength': result.get('content_length'),
'contentType': result.get('content_type', ''),
'webserver': result.get('webserver', ''),
'location': result.get('location', ''),
'vhost': result.get('vhost'),
'responseHeaders': self._parse_headers(result.get('response_headers')),
'responseBody': result.get('response_body', ''),
'createdAt': result.get('created_at').isoformat() if result.get('created_at') else None,
'targetId': result.get('target_id'),
}
# Website 特有字段:漏洞关联
if asset_type == 'website':
formatted['vulnerabilities'] = [
{
'id': v.get('id'),
'name': v.get('vuln_type', ''),
'vulnType': v.get('vuln_type', ''),
'severity': v.get('severity', 'info'),
}
for v in vulns
]
# Endpoint 特有字段
if asset_type == 'endpoint':
formatted['matchedGfPatterns'] = result.get('matched_gf_patterns', []) or []
return formatted
def _get_vulnerabilities_by_url_prefix(self, website_urls: list) -> dict:
"""
根据 URL 前缀批量查询漏洞数据
漏洞 URL 是 website URL 的子路径,使用前缀匹配:
- website.url: https://example.com/path?query=1
- vulnerability.url: https://example.com/path/api/users
Args:
website_urls: website URL 列表,格式为 [(url, target_id), ...]
Returns:
dict: {website_url: [vulnerability_list]}
"""
if not website_urls:
return {}
try:
with connection.cursor() as cursor:
# 构建 OR 条件:每个 website URL去掉查询参数作为前缀匹配
conditions = []
params = []
url_mapping = {} # base_url -> original_url
for url, target_id in website_urls:
if not url or target_id is None:
continue
# 使用 urlparse 去掉查询参数和片段,只保留 scheme://netloc/path
parsed = urlparse(url)
base_url = urlunparse((parsed.scheme, parsed.netloc, parsed.path, '', '', ''))
url_mapping[base_url] = url
conditions.append("(v.url LIKE %s AND v.target_id = %s)")
params.extend([base_url + '%', target_id])
if not conditions:
return {}
where_clause = " OR ".join(conditions)
sql = f"""
SELECT v.id, v.vuln_type, v.severity, v.url, v.target_id
FROM vulnerability v
WHERE {where_clause}
ORDER BY
CASE v.severity
WHEN 'critical' THEN 1
WHEN 'high' THEN 2
WHEN 'medium' THEN 3
WHEN 'low' THEN 4
ELSE 5
END
"""
cursor.execute(sql, params)
# 获取所有漏洞
all_vulns = []
for row in cursor.fetchall():
all_vulns.append({
'id': row[0],
'vuln_type': row[1],
'name': row[1],
'severity': row[2],
'url': row[3],
'target_id': row[4],
})
# 按原始 website URL 分组(用于返回结果)
result = {url: [] for url, _ in website_urls}
for vuln in all_vulns:
vuln_url = vuln['url']
# 找到匹配的 website URL最长前缀匹配
for website_url, target_id in website_urls:
parsed = urlparse(website_url)
base_url = urlunparse((parsed.scheme, parsed.netloc, parsed.path, '', '', ''))
if vuln_url.startswith(base_url) and vuln['target_id'] == target_id:
result[website_url].append(vuln)
break
return result
except Exception as e:
logger.error(f"批量查询漏洞失败: {e}")
return {}
def get(self, request: Request):
"""搜索资产"""
# 获取搜索查询
query = request.query_params.get('q', '').strip()
if not query:
return error_response(
code=ErrorCodes.VALIDATION_ERROR,
message='Search query (q) is required',
status_code=status.HTTP_400_BAD_REQUEST
)
# 获取并验证资产类型
asset_type = request.query_params.get('asset_type', 'website').strip().lower()
if asset_type not in VALID_ASSET_TYPES:
return error_response(
code=ErrorCodes.VALIDATION_ERROR,
message=f'Invalid asset_type. Must be one of: {", ".join(VALID_ASSET_TYPES)}',
status_code=status.HTTP_400_BAD_REQUEST
)
# 获取分页参数
try:
page = int(request.query_params.get('page', 1))
page_size = int(request.query_params.get('pageSize', 10))
except (ValueError, TypeError):
page = 1
page_size = 10
# 限制分页参数
page = max(1, page)
page_size = min(max(1, page_size), 100)
# 获取总数和搜索结果
total = self.service.count(query, asset_type)
total_pages = (total + page_size - 1) // page_size if total > 0 else 1
offset = (page - 1) * page_size
all_results = self.service.search(query, asset_type)
results = all_results[offset:offset + page_size]
# 批量查询漏洞数据(仅 Website 类型需要)
vulnerabilities_by_url = {}
if asset_type == 'website':
website_urls = [(r.get('url'), r.get('target_id')) for r in results if r.get('url') and r.get('target_id')]
vulnerabilities_by_url = self._get_vulnerabilities_by_url_prefix(website_urls) if website_urls else {}
# 格式化结果
formatted_results = [self._format_result(r, vulnerabilities_by_url, asset_type) for r in results]
return success_response(data={
'results': formatted_results,
'total': total,
'page': page,
'pageSize': page_size,
'totalPages': total_pages,
'assetType': asset_type,
})
class AssetSearchExportView(APIView):
"""
资产搜索导出 API
GET /api/assets/search/export/
Query Parameters:
q: 搜索查询表达式
asset_type: 资产类型 ('website''endpoint',默认 'website')
Response:
CSV 文件流(使用服务端游标,支持大数据量导出)
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.service = AssetSearchService()
def _get_headers_and_formatters(self, asset_type: str):
"""获取 CSV 表头和格式化器"""
from apps.common.utils import format_datetime, format_list_field
if asset_type == 'website':
headers = ['url', 'host', 'title', 'status_code', 'content_type', 'content_length',
'webserver', 'location', 'tech', 'vhost', 'created_at']
else:
headers = ['url', 'host', 'title', 'status_code', 'content_type', 'content_length',
'webserver', 'location', 'tech', 'matched_gf_patterns', 'vhost', 'created_at']
formatters = {
'created_at': format_datetime,
'tech': lambda x: format_list_field(x, separator='; '),
'matched_gf_patterns': lambda x: format_list_field(x, separator='; '),
'vhost': lambda x: 'true' if x else ('false' if x is False else ''),
}
return headers, formatters
def get(self, request: Request):
"""导出搜索结果为 CSV流式导出无数量限制"""
from apps.common.utils import generate_csv_rows
# 获取搜索查询
query = request.query_params.get('q', '').strip()
if not query:
return error_response(
code=ErrorCodes.VALIDATION_ERROR,
message='Search query (q) is required',
status_code=status.HTTP_400_BAD_REQUEST
)
# 获取并验证资产类型
asset_type = request.query_params.get('asset_type', 'website').strip().lower()
if asset_type not in VALID_ASSET_TYPES:
return error_response(
code=ErrorCodes.VALIDATION_ERROR,
message=f'Invalid asset_type. Must be one of: {", ".join(VALID_ASSET_TYPES)}',
status_code=status.HTTP_400_BAD_REQUEST
)
# 检查是否有结果(快速检查,避免空导出)
total = self.service.count(query, asset_type)
if total == 0:
return error_response(
code=ErrorCodes.NOT_FOUND,
message='No results to export',
status_code=status.HTTP_404_NOT_FOUND
)
# 获取表头和格式化器
headers, formatters = self._get_headers_and_formatters(asset_type)
# 获取流式数据迭代器
data_iterator = self.service.search_iter(query, asset_type)
# 生成文件名
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
filename = f'search_{asset_type}_{timestamp}.csv'
# 返回流式响应
response = StreamingHttpResponse(
generate_csv_rows(data_iterator, headers, formatters),
content_type='text/csv; charset=utf-8'
)
response['Content-Disposition'] = f'attachment; filename="{filename}"'
return response

View File

@@ -0,0 +1,213 @@
# Generated by Django 5.2.7 on 2026-01-02 04:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NucleiTemplateRepo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='仓库名称,用于前端展示和配置引用', max_length=200, unique=True)),
('repo_url', models.CharField(help_text='Git 仓库地址', max_length=500)),
('local_path', models.CharField(blank=True, default='', help_text='本地工作目录绝对路径', max_length=500)),
('commit_hash', models.CharField(blank=True, default='', help_text='最后同步的 Git commit hash用于 Worker 版本校验', max_length=40)),
('last_synced_at', models.DateTimeField(blank=True, help_text='最后一次成功同步时间', null=True)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, help_text='更新时间')),
],
options={
'verbose_name': 'Nuclei 模板仓库',
'verbose_name_plural': 'Nuclei 模板仓库',
'db_table': 'nuclei_template_repo',
},
),
migrations.CreateModel(
name='ARLFingerprint',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='指纹名称', max_length=300, unique=True)),
('rule', models.TextField(help_text='匹配规则表达式')),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'ARL 指纹',
'verbose_name_plural': 'ARL 指纹',
'db_table': 'arl_fingerprint',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['name'], name='arl_fingerp_name_c3a305_idx'), models.Index(fields=['-created_at'], name='arl_fingerp_created_ed1060_idx')],
},
),
migrations.CreateModel(
name='EholeFingerprint',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cms', models.CharField(help_text='产品/CMS名称', max_length=200)),
('method', models.CharField(default='keyword', help_text='匹配方式', max_length=200)),
('location', models.CharField(default='body', help_text='匹配位置', max_length=200)),
('keyword', models.JSONField(default=list, help_text='关键词列表')),
('is_important', models.BooleanField(default=False, help_text='是否重点资产')),
('type', models.CharField(blank=True, default='-', help_text='分类', max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'EHole 指纹',
'verbose_name_plural': 'EHole 指纹',
'db_table': 'ehole_fingerprint',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['cms'], name='ehole_finge_cms_72ca2c_idx'), models.Index(fields=['method'], name='ehole_finge_method_17f0db_idx'), models.Index(fields=['location'], name='ehole_finge_locatio_7bb82b_idx'), models.Index(fields=['type'], name='ehole_finge_type_ca2bce_idx'), models.Index(fields=['is_important'], name='ehole_finge_is_impo_d56e64_idx'), models.Index(fields=['-created_at'], name='ehole_finge_created_d862b0_idx')],
'constraints': [models.UniqueConstraint(fields=('cms', 'method', 'location'), name='unique_ehole_fingerprint')],
},
),
migrations.CreateModel(
name='FingerPrintHubFingerprint',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fp_id', models.CharField(help_text='指纹ID', max_length=200, unique=True)),
('name', models.CharField(help_text='指纹名称', max_length=300)),
('author', models.CharField(blank=True, default='', help_text='作者', max_length=200)),
('tags', models.CharField(blank=True, default='', help_text='标签', max_length=500)),
('severity', models.CharField(blank=True, default='info', help_text='严重程度', max_length=50)),
('metadata', models.JSONField(blank=True, default=dict, help_text='元数据')),
('http', models.JSONField(default=list, help_text='HTTP 匹配规则')),
('source_file', models.CharField(blank=True, default='', help_text='来源文件', max_length=500)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'FingerPrintHub 指纹',
'verbose_name_plural': 'FingerPrintHub 指纹',
'db_table': 'fingerprinthub_fingerprint',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['fp_id'], name='fingerprint_fp_id_df467f_idx'), models.Index(fields=['name'], name='fingerprint_name_95b6fb_idx'), models.Index(fields=['author'], name='fingerprint_author_80f54b_idx'), models.Index(fields=['severity'], name='fingerprint_severit_f70422_idx'), models.Index(fields=['-created_at'], name='fingerprint_created_bec16c_idx')],
},
),
migrations.CreateModel(
name='FingersFingerprint',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='指纹名称', max_length=300, unique=True)),
('link', models.URLField(blank=True, default='', help_text='相关链接', max_length=500)),
('rule', models.JSONField(default=list, help_text='匹配规则数组')),
('tag', models.JSONField(default=list, help_text='标签数组')),
('focus', models.BooleanField(default=False, help_text='是否重点关注')),
('default_port', models.JSONField(blank=True, default=list, help_text='默认端口数组')),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Fingers 指纹',
'verbose_name_plural': 'Fingers 指纹',
'db_table': 'fingers_fingerprint',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['name'], name='fingers_fin_name_952de0_idx'), models.Index(fields=['link'], name='fingers_fin_link_4c6b7f_idx'), models.Index(fields=['focus'], name='fingers_fin_focus_568c7f_idx'), models.Index(fields=['-created_at'], name='fingers_fin_created_46fc91_idx')],
},
),
migrations.CreateModel(
name='GobyFingerprint',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='产品名称', max_length=300, unique=True)),
('logic', models.CharField(help_text='逻辑表达式', max_length=500)),
('rule', models.JSONField(default=list, help_text='规则数组')),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Goby 指纹',
'verbose_name_plural': 'Goby 指纹',
'db_table': 'goby_fingerprint',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['name'], name='goby_finger_name_82084c_idx'), models.Index(fields=['logic'], name='goby_finger_logic_a63226_idx'), models.Index(fields=['-created_at'], name='goby_finger_created_50e000_idx')],
},
),
migrations.CreateModel(
name='ScanEngine',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(help_text='引擎名称', max_length=200, unique=True)),
('configuration', models.CharField(blank=True, default='', help_text='引擎配置yaml 格式', max_length=10000)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, help_text='更新时间')),
],
options={
'verbose_name': '扫描引擎',
'verbose_name_plural': '扫描引擎',
'db_table': 'scan_engine',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='scan_engine_created_da4870_idx')],
},
),
migrations.CreateModel(
name='WappalyzerFingerprint',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='应用名称', max_length=300, unique=True)),
('cats', models.JSONField(default=list, help_text='分类 ID 数组')),
('cookies', models.JSONField(blank=True, default=dict, help_text='Cookie 检测规则')),
('headers', models.JSONField(blank=True, default=dict, help_text='HTTP Header 检测规则')),
('script_src', models.JSONField(blank=True, default=list, help_text='脚本 URL 正则数组')),
('js', models.JSONField(blank=True, default=list, help_text='JavaScript 变量检测规则')),
('implies', models.JSONField(blank=True, default=list, help_text='依赖关系数组')),
('meta', models.JSONField(blank=True, default=dict, help_text='HTML meta 标签检测规则')),
('html', models.JSONField(blank=True, default=list, help_text='HTML 内容正则数组')),
('description', models.TextField(blank=True, default='', help_text='应用描述')),
('website', models.URLField(blank=True, default='', help_text='官网链接', max_length=500)),
('cpe', models.CharField(blank=True, default='', help_text='CPE 标识符', max_length=300)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Wappalyzer 指纹',
'verbose_name_plural': 'Wappalyzer 指纹',
'db_table': 'wappalyzer_fingerprint',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['name'], name='wappalyzer__name_63c669_idx'), models.Index(fields=['website'], name='wappalyzer__website_88de1c_idx'), models.Index(fields=['cpe'], name='wappalyzer__cpe_30c761_idx'), models.Index(fields=['-created_at'], name='wappalyzer__created_8e6c21_idx')],
},
),
migrations.CreateModel(
name='Wordlist',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(help_text='字典名称,唯一', max_length=200, unique=True)),
('description', models.CharField(blank=True, default='', help_text='字典描述', max_length=200)),
('file_path', models.CharField(help_text='后端保存的字典文件绝对路径', max_length=500)),
('file_size', models.BigIntegerField(default=0, help_text='文件大小(字节)')),
('line_count', models.IntegerField(default=0, help_text='字典行数')),
('file_hash', models.CharField(blank=True, default='', help_text='文件 SHA-256 哈希,用于缓存校验', max_length=64)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, help_text='更新时间')),
],
options={
'verbose_name': '字典文件',
'verbose_name_plural': '字典文件',
'db_table': 'wordlist',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='wordlist_created_4afb02_idx')],
},
),
migrations.CreateModel(
name='WorkerNode',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='节点名称', max_length=100)),
('ip_address', models.GenericIPAddressField(help_text='IP 地址(本地节点为 127.0.0.1')),
('ssh_port', models.IntegerField(default=22, help_text='SSH 端口')),
('username', models.CharField(default='root', help_text='SSH 用户名', max_length=50)),
('password', models.CharField(blank=True, default='', help_text='SSH 密码', max_length=200)),
('is_local', models.BooleanField(default=False, help_text='是否为本地节点Docker 容器内)')),
('status', models.CharField(choices=[('pending', '待部署'), ('deploying', '部署中'), ('online', '在线'), ('offline', '离线'), ('updating', '更新中'), ('outdated', '版本过低')], default='pending', help_text='状态: pending/deploying/online/offline', max_length=20)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Worker 节点',
'db_table': 'worker_node',
'ordering': ['-created_at'],
'constraints': [models.UniqueConstraint(condition=models.Q(('is_local', False)), fields=('ip_address',), name='unique_remote_worker_ip'), models.UniqueConstraint(fields=('name',), name='unique_worker_name')],
},
),
]

View File

@@ -88,6 +88,8 @@ def _register_scheduled_jobs(scheduler: BackgroundScheduler):
replace_existing=True,
)
logger.info(" - 已注册: 扫描结果清理(每天 03:00")
# 注意:搜索物化视图刷新已迁移到 pg_ivm 增量维护,无需定时任务
def _trigger_scheduled_scans():

View File

@@ -204,14 +204,13 @@ def _run_scans_sequentially(
# 流式执行扫描并实时保存结果
result = run_and_stream_save_websites_task(
cmd=command,
tool_name=tool_name, # 新增:工具名称
tool_name=tool_name,
scan_id=scan_id,
target_id=target_id,
cwd=str(site_scan_dir),
shell=True,
batch_size=1000,
timeout=timeout,
log_file=str(log_file) # 新增:日志文件路径
log_file=str(log_file)
)
tool_stats[tool_name] = {

View File

@@ -212,7 +212,6 @@ def _validate_and_stream_save_urls(
target_id=target_id,
cwd=str(url_fetch_dir),
shell=True,
batch_size=500,
timeout=timeout,
log_file=str(log_file)
)

View File

@@ -162,6 +162,8 @@ def on_initiate_scan_flow_completed(flow: Flow, flow_run: FlowRun, state: State)
# 执行状态更新并获取统计数据
stats = _update_completed_status()
# 注意:物化视图刷新已迁移到 pg_ivm 增量维护,无需手动标记刷新
# 发送通知(包含统计摘要)
logger.info("准备发送扫描完成通知 - Scan ID: %s, Target: %s", scan_id, target_name)
try:

View File

@@ -0,0 +1,119 @@
# Generated by Django 5.2.7 on 2026-01-02 04:45
import django.contrib.postgres.fields
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('engine', '0001_initial'),
('targets', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='NotificationSettings',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('discord_enabled', models.BooleanField(default=False, help_text='是否启用 Discord 通知')),
('discord_webhook_url', models.URLField(blank=True, default='', help_text='Discord Webhook URL')),
('categories', models.JSONField(default=dict, help_text='各分类通知开关,如 {"scan": true, "vulnerability": true, "asset": true, "system": false}')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '通知设置',
'verbose_name_plural': '通知设置',
'db_table': 'notification_settings',
},
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('category', models.CharField(choices=[('scan', '扫描任务'), ('vulnerability', '漏洞发现'), ('asset', '资产发现'), ('system', '系统消息')], db_index=True, default='system', help_text='通知分类', max_length=20)),
('level', models.CharField(choices=[('low', ''), ('medium', ''), ('high', ''), ('critical', '严重')], db_index=True, default='low', help_text='通知级别', max_length=20)),
('title', models.CharField(help_text='通知标题', max_length=200)),
('message', models.CharField(help_text='通知内容', max_length=2000)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('is_read', models.BooleanField(default=False, help_text='是否已读')),
('read_at', models.DateTimeField(blank=True, help_text='阅读时间', null=True)),
],
options={
'verbose_name': '通知',
'verbose_name_plural': '通知',
'db_table': 'notification',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='notificatio_created_c430f0_idx'), models.Index(fields=['category', '-created_at'], name='notificatio_categor_df0584_idx'), models.Index(fields=['level', '-created_at'], name='notificatio_level_0e5d12_idx'), models.Index(fields=['is_read', '-created_at'], name='notificatio_is_read_518ce0_idx')],
},
),
migrations.CreateModel(
name='Scan',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('engine_ids', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), default=list, help_text='引擎 ID 列表', size=None)),
('engine_names', models.JSONField(default=list, help_text='引擎名称列表,如 ["引擎A", "引擎B"]')),
('merged_configuration', models.TextField(default='', help_text='合并后的 YAML 配置')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='任务创建时间')),
('stopped_at', models.DateTimeField(blank=True, help_text='扫描结束时间', null=True)),
('status', models.CharField(choices=[('cancelled', '已取消'), ('completed', '已完成'), ('failed', '失败'), ('initiated', '初始化'), ('running', '运行中')], db_index=True, default='initiated', help_text='任务状态', max_length=20)),
('results_dir', models.CharField(blank=True, default='', help_text='结果存储目录', max_length=100)),
('container_ids', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=list, help_text='容器 ID 列表Docker Container ID', size=None)),
('error_message', models.CharField(blank=True, default='', help_text='错误信息', max_length=2000)),
('deleted_at', models.DateTimeField(blank=True, db_index=True, help_text='删除时间NULL表示未删除', null=True)),
('progress', models.IntegerField(default=0, help_text='扫描进度 0-100')),
('current_stage', models.CharField(blank=True, default='', help_text='当前扫描阶段', max_length=50)),
('stage_progress', models.JSONField(default=dict, help_text='各阶段进度详情')),
('cached_subdomains_count', models.IntegerField(default=0, help_text='缓存的子域名数量')),
('cached_websites_count', models.IntegerField(default=0, help_text='缓存的网站数量')),
('cached_endpoints_count', models.IntegerField(default=0, help_text='缓存的端点数量')),
('cached_ips_count', models.IntegerField(default=0, help_text='缓存的IP地址数量')),
('cached_directories_count', models.IntegerField(default=0, help_text='缓存的目录数量')),
('cached_vulns_total', models.IntegerField(default=0, help_text='缓存的漏洞总数')),
('cached_vulns_critical', models.IntegerField(default=0, help_text='缓存的严重漏洞数量')),
('cached_vulns_high', models.IntegerField(default=0, help_text='缓存的高危漏洞数量')),
('cached_vulns_medium', models.IntegerField(default=0, help_text='缓存的中危漏洞数量')),
('cached_vulns_low', models.IntegerField(default=0, help_text='缓存的低危漏洞数量')),
('stats_updated_at', models.DateTimeField(blank=True, help_text='统计数据最后更新时间', null=True)),
('target', models.ForeignKey(help_text='扫描目标', on_delete=django.db.models.deletion.CASCADE, related_name='scans', to='targets.target')),
('worker', models.ForeignKey(blank=True, help_text='执行扫描的 Worker 节点', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='scans', to='engine.workernode')),
],
options={
'verbose_name': '扫描任务',
'verbose_name_plural': '扫描任务',
'db_table': 'scan',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='scan_created_0bb6c7_idx'), models.Index(fields=['target'], name='scan_target__718b9d_idx'), models.Index(fields=['deleted_at', '-created_at'], name='scan_deleted_eb17e8_idx')],
},
),
migrations.CreateModel(
name='ScheduledScan',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(help_text='任务名称', max_length=200)),
('engine_ids', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), default=list, help_text='引擎 ID 列表', size=None)),
('engine_names', models.JSONField(default=list, help_text='引擎名称列表,如 ["引擎A", "引擎B"]')),
('merged_configuration', models.TextField(default='', help_text='合并后的 YAML 配置')),
('cron_expression', models.CharField(default='0 2 * * *', help_text='Cron 表达式,格式:分 时 日 月 周', max_length=100)),
('is_enabled', models.BooleanField(db_index=True, default=True, help_text='是否启用')),
('run_count', models.IntegerField(default=0, help_text='已执行次数')),
('last_run_time', models.DateTimeField(blank=True, help_text='上次执行时间', null=True)),
('next_run_time', models.DateTimeField(blank=True, help_text='下次执行时间', null=True)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, help_text='更新时间')),
('organization', models.ForeignKey(blank=True, help_text='扫描组织(设置后执行时动态获取组织下所有目标)', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='scheduled_scans', to='targets.organization')),
('target', models.ForeignKey(blank=True, help_text='扫描单个目标(与 organization 二选一)', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='scheduled_scans', to='targets.target')),
],
options={
'verbose_name': '定时扫描任务',
'verbose_name_plural': '定时扫描任务',
'db_table': 'scheduled_scan',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='scheduled_s_created_9b9c2e_idx'), models.Index(fields=['is_enabled', '-created_at'], name='scheduled_s_is_enab_23d660_idx'), models.Index(fields=['name'], name='scheduled_s_name_bf332d_idx')],
},
),
]

View File

@@ -341,9 +341,9 @@ def _process_batch(
url=record['url'],
host=host,
title=record.get('title', '') or '',
status=record.get('status_code'),
status_code=record.get('status_code'),
content_length=record.get('content_length'),
web_server=record.get('server', '') or '',
webserver=record.get('server', '') or '',
tech=record.get('techs', []),
)
snapshot_dtos.append(dto)

View File

@@ -30,7 +30,6 @@ from typing import Generator, Optional, Dict, Any, TYPE_CHECKING
from django.db import IntegrityError, OperationalError, DatabaseError
from dataclasses import dataclass
from urllib.parse import urlparse, urlunparse
from dateutil.parser import parse as parse_datetime
from psycopg2 import InterfaceError
from apps.asset.dtos.snapshot import WebsiteSnapshotDTO
@@ -62,6 +61,18 @@ class ServiceSet:
)
def _sanitize_string(value: str) -> str:
"""
清理字符串中的 NUL 字符和其他不可打印字符
PostgreSQL 不允许字符串字段包含 NUL (0x00) 字符
"""
if not value:
return value
# 移除 NUL 字符
return value.replace('\x00', '')
def normalize_url(url: str) -> str:
"""
标准化 URL移除默认端口号
@@ -117,70 +128,50 @@ def normalize_url(url: str) -> str:
return url
def _extract_hostname(url: str) -> str:
"""
从 URL 提取主机名
Args:
url: URL 字符串
Returns:
str: 提取的主机名(小写)
"""
try:
if url:
parsed = urlparse(url)
if parsed.hostname:
return parsed.hostname
# 降级方案:手动提取
return url.replace('http://', '').replace('https://', '').split('/')[0].split(':')[0]
return ''
except Exception as e:
logger.debug("提取主机名失败: %s", e)
return ''
class HttpxRecord:
"""httpx 扫描记录数据类"""
def __init__(self, data: Dict[str, Any]):
self.url = data.get('url', '')
self.input = data.get('input', '')
self.title = data.get('title', '')
self.status_code = data.get('status_code')
self.content_length = data.get('content_length')
self.content_type = data.get('content_type', '')
self.location = data.get('location', '')
self.webserver = data.get('webserver', '')
self.response_body = data.get('body', '') # 从 body 字段获取完整响应体
self.tech = data.get('tech', [])
self.vhost = data.get('vhost')
self.failed = data.get('failed', False)
self.timestamp = data.get('timestamp')
self.response_headers = data.get('raw_header', '') # 从 raw_header 字段获取原始响应头字符串
self.url = _sanitize_string(data.get('url', ''))
self.input = _sanitize_string(data.get('input', ''))
self.title = _sanitize_string(data.get('title', ''))
self.status_code = data.get('status_code') # int不需要清理
self.content_length = data.get('content_length') # int不需要清理
self.content_type = _sanitize_string(data.get('content_type', ''))
self.location = _sanitize_string(data.get('location', ''))
self.webserver = _sanitize_string(data.get('webserver', ''))
self.response_body = _sanitize_string(data.get('body', ''))
self.tech = [_sanitize_string(t) for t in data.get('tech', []) if isinstance(t, str)] # 列表中的字符串也需要清理
self.vhost = data.get('vhost') # bool不需要清理
self.failed = data.get('failed', False) # bool不需要清理
self.response_headers = _sanitize_string(data.get('raw_header', ''))
# 从 URL 中提取主机名
self.host = self._extract_hostname()
def _extract_hostname(self) -> str:
"""
从 URL 或 input 字段提取主机名
优先级:
1. 使用 urlparse 解析 URL 获取 hostname
2. 从 input 字段提取(处理可能包含协议的情况)
3. 从 URL 字段手动提取(降级方案)
Returns:
str: 提取的主机名(小写)
"""
try:
# 方法 1: 使用 urlparse 解析 URL
if self.url:
parsed = urlparse(self.url)
if parsed.hostname:
return parsed.hostname
# 方法 2: 从 input 字段提取
if self.input:
host = self.input.strip().lower()
# 移除协议前缀
if host.startswith(('http://', 'https://')):
host = host.split('//', 1)[1].split('/')[0]
return host
# 方法 3: 从 URL 手动提取(降级方案)
if self.url:
return self.url.replace('http://', '').replace('https://', '').split('/')[0]
# 兜底:返回空字符串
return ''
except Exception as e:
# 异常处理:尽力从 input 或 URL 提取
logger.debug("提取主机名失败: %s,使用降级方案", e)
if self.input:
return self.input.strip().lower()
if self.url:
return self.url.replace('http://', '').replace('https://', '').split('/')[0]
return ''
# 从 URL 中提取主机名(优先使用 httpx 返回的 host否则自动提取
httpx_host = _sanitize_string(data.get('host', ''))
self.host = httpx_host if httpx_host else _extract_hostname(self.url)
def _save_batch_with_retry(
@@ -228,39 +219,31 @@ def _save_batch_with_retry(
}
except (OperationalError, DatabaseError, InterfaceError) as e:
# 数据库连接/操作错误,可重试
# 数据库级错误(连接中断、表结构不匹配等):按指数退避重试,最终失败时抛出异常让 Flow 失败
if attempt < max_retries - 1:
wait_time = 2 ** attempt # 指数退避: 1s, 2s, 4s
wait_time = 2 ** attempt
logger.warning(
"批次 %d 保存失败(第 %d 次尝试),%d秒后重试: %s",
batch_num, attempt + 1, wait_time, str(e)[:100]
)
time.sleep(wait_time)
else:
logger.error("批次 %d 保存失败(已重试 %d 次): %s", batch_num, max_retries, e)
return {
'success': False,
'created_websites': 0,
'skipped_failed': 0
}
except Exception as e:
# 其他未知错误 - 检查是否为连接问题
error_str = str(e).lower()
if 'connection' in error_str and attempt < max_retries - 1:
logger.warning(
"批次 %d 连接相关错误(尝试 %d/%d: %sRepository 装饰器会自动重连",
batch_num, attempt + 1, max_retries, str(e)
logger.error(
"批次 %d 保存失败(已重试 %d 次),将终止任务: %s",
batch_num,
max_retries,
e,
exc_info=True,
)
time.sleep(2)
else:
logger.error("批次 %d 未知错误: %s", batch_num, e, exc_info=True)
return {
'success': False,
'created_websites': 0,
'skipped_failed': 0
}
# 让上层 Task 感知失败,从而标记整个扫描为失败
raise
except Exception as e:
# 其他未知异常也不再吞掉,直接抛出以便 Flow 标记为失败
logger.error("批次 %d 未知错误: %s", batch_num, e, exc_info=True)
raise
# 理论上不会走到这里,保留兜底返回值以满足类型约束
return {
'success': False,
'created_websites': 0,
@@ -328,43 +311,39 @@ def _save_batch(
skipped_failed += 1
continue
# 解析时间戳
created_at = None
if hasattr(record, 'timestamp') and record.timestamp:
try:
created_at = parse_datetime(record.timestamp)
except (ValueError, TypeError) as e:
logger.warning(f"无法解析时间戳 {record.timestamp}: {e}")
# 使用 input 字段(原始扫描的 URL而不是 url 字段(重定向后的 URL
# 原因:避免多个不同的输入 URL 重定向到同一个 URL 时产生唯一约束冲突
# 例如http://example.com 和 https://example.com 都重定向到 https://example.com
# 如果使用 record.url两条记录会有相同的 url导致数据库冲突
# 如果使用 record.input两条记录保留原始输入不会冲突
normalized_url = normalize_url(record.input)
# 提取 host 字段域名或IP地址
host = record.host if record.host else ''
# 创建 WebsiteSnapshot DTO
snapshot_dto = WebsiteSnapshotDTO(
scan_id=scan_id,
target_id=target_id, # 主关联字段
url=normalized_url, # 保存原始输入 URL归一化后
host=host, # 主机名域名或IP地址
location=record.location, # location 字段保存重定向信息
title=record.title[:1000] if record.title else '',
web_server=record.webserver[:200] if record.webserver else '',
response_body=record.response_body if record.response_body else '',
content_type=record.content_type[:200] if record.content_type else '',
tech=record.tech if isinstance(record.tech, list) else [],
status=record.status_code,
content_length=record.content_length,
vhost=record.vhost,
response_headers=record.response_headers if record.response_headers else '',
)
snapshot_items.append(snapshot_dto)
try:
# 使用 input 字段(原始扫描的 URL而不是 url 字段(重定向后的 URL
# 原因:避免多个不同的输入 URL 重定向到同一个 URL 时产生唯一约束冲突
# 例如http://example.com 和 https://example.com 都重定向到 https://example.com
# 如果使用 record.url两条记录会有相同的 url导致数据库冲突
# 如果使用 record.input两条记录保留原始输入不会冲突
normalized_url = normalize_url(record.input) if record.input else normalize_url(record.url)
# 提取 host 字段域名或IP地址
host = record.host if record.host else ''
# 创建 WebsiteSnapshot DTO
snapshot_dto = WebsiteSnapshotDTO(
scan_id=scan_id,
target_id=target_id, # 主关联字段
url=normalized_url, # 保存原始输入 URL归一化后
host=host, # 主机名域名或IP地址
location=record.location if record.location else '',
title=record.title if record.title else '',
webserver=record.webserver if record.webserver else '',
response_body=record.response_body if record.response_body else '',
content_type=record.content_type if record.content_type else '',
tech=record.tech if isinstance(record.tech, list) else [],
status_code=record.status_code,
content_length=record.content_length,
vhost=record.vhost,
response_headers=record.response_headers if record.response_headers else '',
)
snapshot_items.append(snapshot_dto)
except Exception as e:
logger.error("处理记录失败: %s,错误: %s", record.url, e)
continue
# ========== Step 3: 保存快照并同步到资产表(通过快照 Service==========
if snapshot_items:
@@ -386,28 +365,31 @@ def _parse_and_validate_line(line: str) -> Optional[HttpxRecord]:
Optional[HttpxRecord]: 有效的 httpx 扫描记录,或 None 如果验证失败
验证步骤:
1. 解析 JSON 格式
2. 验证数据类型为字典
3. 创建 HttpxRecord 对象
4. 验证必要字段url
1. 清理 NUL 字符
2. 解析 JSON 格式
3. 验证数据类型为字典
4. 创建 HttpxRecord 对象
5. 验证必要字段url
"""
try:
# 步骤 1: 解析 JSON
# 步骤 1: 清理 NUL 字符后再解析 JSON
line = _sanitize_string(line)
# 步骤 2: 解析 JSON
try:
line_data = json.loads(line, strict=False)
except json.JSONDecodeError:
# logger.info("跳过非 JSON 行: %s", line)
return None
# 步骤 2: 验证数据类型
# 步骤 3: 验证数据类型
if not isinstance(line_data, dict):
logger.info("跳过非字典数据")
return None
# 步骤 3: 创建记录
# 步骤 4: 创建记录
record = HttpxRecord(line_data)
# 步骤 4: 验证必要字段
# 步骤 5: 验证必要字段
if not record.url:
logger.info("URL 为空,跳过 - 数据: %s", str(line_data)[:200])
return None
@@ -416,7 +398,7 @@ def _parse_and_validate_line(line: str) -> Optional[HttpxRecord]:
return record
except Exception:
logger.info("跳过无法解析的行: %s", line[:100])
logger.info("跳过无法解析的行: %s", line[:100] if line else 'empty')
return None
@@ -464,8 +446,8 @@ def _parse_httpx_stream_output(
# yield 一条有效记录
yield record
# 每处理 1000 条记录输出一次进度
if valid_records % 1000 == 0:
# 每处理 5 条记录输出一次进度
if valid_records % 5 == 0:
logger.info("已解析 %d 条有效记录...", valid_records)
except subprocess.TimeoutExpired as e:
@@ -604,8 +586,8 @@ def _process_records_in_batches(
_process_batch(batch, scan_id, target_id, batch_num, total_stats, failed_batches, services)
batch = [] # 清空批次
# 每20个批次输出进度
if batch_num % 20 == 0:
# 每 2 个批次输出进度
if batch_num % 2 == 0:
logger.info("进度: 已处理 %d 批次,%d 条记录", batch_num, total_records)
# 保存最后一批
@@ -676,11 +658,7 @@ def _cleanup_resources(data_generator) -> None:
logger.error("关闭生成器时出错: %s", gen_close_error)
@task(
name='run_and_stream_save_websites',
retries=0,
log_prints=True
)
@task(name='run_and_stream_save_websites', retries=0)
def run_and_stream_save_websites_task(
cmd: str,
tool_name: str,
@@ -688,7 +666,7 @@ def run_and_stream_save_websites_task(
target_id: int,
cwd: Optional[str] = None,
shell: bool = False,
batch_size: int = 1000,
batch_size: int = 10,
timeout: Optional[int] = None,
log_file: Optional[str] = None
) -> dict:

View File

@@ -23,10 +23,11 @@ import subprocess
import time
from pathlib import Path
from prefect import task
from typing import Generator, Optional
from typing import Generator, Optional, Dict, Any
from django.db import IntegrityError, OperationalError, DatabaseError
from psycopg2 import InterfaceError
from dataclasses import dataclass
from urllib.parse import urlparse
from apps.asset.services.snapshot import EndpointSnapshotsService
from apps.scan.utils import execute_stream
@@ -63,7 +64,53 @@ def _sanitize_string(value: str) -> str:
return value.replace('\x00', '')
def _parse_and_validate_line(line: str) -> Optional[dict]:
def _extract_hostname(url: str) -> str:
"""
从 URL 提取主机名
Args:
url: URL 字符串
Returns:
str: 提取的主机名(小写)
"""
try:
if url:
parsed = urlparse(url)
if parsed.hostname:
return parsed.hostname
# 降级方案:手动提取
return url.replace('http://', '').replace('https://', '').split('/')[0].split(':')[0]
return ''
except Exception as e:
logger.debug("提取主机名失败: %s", e)
return ''
class HttpxRecord:
"""httpx 扫描记录数据类"""
def __init__(self, data: Dict[str, Any]):
self.url = _sanitize_string(data.get('url', ''))
self.input = _sanitize_string(data.get('input', ''))
self.title = _sanitize_string(data.get('title', ''))
self.status_code = data.get('status_code') # int不需要清理
self.content_length = data.get('content_length') # int不需要清理
self.content_type = _sanitize_string(data.get('content_type', ''))
self.location = _sanitize_string(data.get('location', ''))
self.webserver = _sanitize_string(data.get('webserver', ''))
self.response_body = _sanitize_string(data.get('body', ''))
self.tech = [_sanitize_string(t) for t in data.get('tech', []) if isinstance(t, str)] # 列表中的字符串也需要清理
self.vhost = data.get('vhost') # bool不需要清理
self.failed = data.get('failed', False) # bool不需要清理
self.response_headers = _sanitize_string(data.get('raw_header', ''))
# 从 URL 中提取主机名(优先使用 httpx 返回的 host否则自动提取
httpx_host = _sanitize_string(data.get('host', ''))
self.host = httpx_host if httpx_host else _extract_hostname(self.url)
def _parse_and_validate_line(line: str) -> Optional[HttpxRecord]:
"""
解析并验证单行 httpx JSON 输出
@@ -71,9 +118,7 @@ def _parse_and_validate_line(line: str) -> Optional[dict]:
line: 单行输出数据
Returns:
Optional[dict]: 有效的 httpx 记录,或 None 如果验证失败
保存所有有效 URL不再过滤状态码安全扫描中 403/404/500 等也有分析价值)
Optional[HttpxRecord]: 有效的 httpx 记录,或 None 如果验证失败
"""
try:
# 清理 NUL 字符后再解析 JSON
@@ -83,7 +128,6 @@ def _parse_and_validate_line(line: str) -> Optional[dict]:
try:
line_data = json.loads(line, strict=False)
except json.JSONDecodeError:
# logger.info("跳过非 JSON 行: %s", line)
return None
# 验证数据类型
@@ -91,29 +135,15 @@ def _parse_and_validate_line(line: str) -> Optional[dict]:
logger.info("跳过非字典数据")
return None
# 获取必要字段
url = line_data.get('url', '').strip()
status_code = line_data.get('status_code')
# 创建记录
record = HttpxRecord(line_data)
if not url:
# 验证必要字段
if not record.url:
logger.info("URL 为空,跳过 - 数据: %s", str(line_data)[:200])
return None
# 保存所有有效 URL不再过滤状态码
return {
'url': _sanitize_string(url),
'host': _sanitize_string(line_data.get('host', '')),
'status_code': status_code,
'title': _sanitize_string(line_data.get('title', '')),
'content_length': line_data.get('content_length', 0),
'content_type': _sanitize_string(line_data.get('content_type', '')),
'webserver': _sanitize_string(line_data.get('webserver', '')),
'location': _sanitize_string(line_data.get('location', '')),
'tech': line_data.get('tech', []),
'response_body': _sanitize_string(line_data.get('body', '')),
'vhost': line_data.get('vhost', False),
'response_headers': _sanitize_string(line_data.get('raw_header', '')),
}
return record
except Exception:
logger.info("跳过无法解析的行: %s", line[:100] if line else 'empty')
@@ -127,7 +157,7 @@ def _parse_httpx_stream_output(
shell: bool = False,
timeout: Optional[int] = None,
log_file: Optional[str] = None
) -> Generator[dict, None, None]:
) -> Generator[HttpxRecord, None, None]:
"""
流式解析 httpx 命令输出
@@ -140,7 +170,7 @@ def _parse_httpx_stream_output(
log_file: 日志文件路径
Yields:
dict: 每次 yield 一条存活的 URL 记录
HttpxRecord: 每次 yield 一条存活的 URL 记录
"""
logger.info("开始流式解析 httpx 输出 - 命令: %s", cmd)
@@ -170,8 +200,8 @@ def _parse_httpx_stream_output(
# yield 一条有效记录(存活的 URL
yield record
# 每处理 500 条记录输出一次进度
if valid_records % 500 == 0:
# 每处理 100 条记录输出一次进度
if valid_records % 100 == 0:
logger.info("已解析 %d 条存活的 URL...", valid_records)
except subprocess.TimeoutExpired as e:
@@ -188,6 +218,78 @@ def _parse_httpx_stream_output(
)
def _validate_task_parameters(cmd: str, target_id: int, scan_id: int, cwd: Optional[str]) -> None:
"""
验证任务参数的有效性
Args:
cmd: 扫描命令
target_id: 目标ID
scan_id: 扫描ID
cwd: 工作目录
Raises:
ValueError: 参数验证失败
"""
if not cmd or not cmd.strip():
raise ValueError("扫描命令不能为空")
if target_id is None:
raise ValueError("target_id 不能为 None必须指定目标ID")
if scan_id is None:
raise ValueError("scan_id 不能为 None必须指定扫描ID")
# 验证工作目录(如果指定)
if cwd and not Path(cwd).exists():
raise ValueError(f"工作目录不存在: {cwd}")
def _build_final_result(stats: dict) -> dict:
"""
构建最终结果并输出日志
Args:
stats: 处理统计信息
Returns:
dict: 最终结果
"""
logger.info(
"✓ URL 验证任务完成 - 处理记录: %d%d 批次),创建端点: %d,跳过(失败): %d",
stats['processed_records'], stats['batch_count'], stats['created_endpoints'],
stats['skipped_failed']
)
# 如果没有创建任何记录,给出明确提示
if stats['created_endpoints'] == 0:
logger.warning(
"⚠️ 没有创建任何端点记录可能原因1) 命令输出格式问题 2) 重复数据被忽略 3) 所有请求都失败"
)
return {
'processed_records': stats['processed_records'],
'created_endpoints': stats['created_endpoints'],
'skipped_failed': stats['skipped_failed']
}
def _cleanup_resources(data_generator) -> None:
"""
清理任务资源
Args:
data_generator: 数据生成器
"""
# 确保生成器被正确关闭
if data_generator is not None:
try:
data_generator.close()
logger.debug("已关闭数据生成器")
except Exception as gen_close_error:
logger.error("关闭生成器时出错: %s", gen_close_error)
def _save_batch_with_retry(
batch: list,
scan_id: int,
@@ -208,14 +310,19 @@ def _save_batch_with_retry(
max_retries: 最大重试次数
Returns:
dict: {'success': bool, 'saved_count': int}
dict: {
'success': bool,
'created_endpoints': int,
'skipped_failed': int
}
"""
for attempt in range(max_retries):
try:
count = _save_batch(batch, scan_id, target_id, batch_num, services)
stats = _save_batch(batch, scan_id, target_id, batch_num, services)
return {
'success': True,
'saved_count': count
'created_endpoints': stats.get('created_endpoints', 0),
'skipped_failed': stats.get('skipped_failed', 0)
}
except IntegrityError as e:
@@ -223,7 +330,8 @@ def _save_batch_with_retry(
logger.error("批次 %d 数据完整性错误,跳过: %s", batch_num, str(e)[:100])
return {
'success': False,
'saved_count': 0
'created_endpoints': 0,
'skipped_failed': 0
}
except (OperationalError, DatabaseError, InterfaceError) as e:
@@ -254,7 +362,8 @@ def _save_batch_with_retry(
# 理论上不会走到这里,保留兜底返回值以满足类型约束
return {
'success': False,
'saved_count': 0
'created_endpoints': 0,
'skipped_failed': 0
}
@@ -264,50 +373,72 @@ def _save_batch(
target_id: int,
batch_num: int,
services: ServiceSet
) -> int:
) -> dict:
"""
保存一个批次的数据到数据库
Args:
batch: 数据批次list of dict
batch: 数据批次list of HttpxRecord
scan_id: 扫描任务 ID
target_id: 目标 ID
batch_num: 批次编号
services: Service 集合
Returns:
int: 创建的记录数
dict: 包含创建和跳过记录的统计信息
"""
# 参数验证
if not isinstance(batch, list):
raise TypeError(f"batch 必须是 list 类型,实际: {type(batch).__name__}")
if not batch:
logger.debug("批次 %d 为空,跳过处理", batch_num)
return 0
return {
'created_endpoints': 0,
'skipped_failed': 0
}
# 统计变量
skipped_failed = 0
# 批量构造 Endpoint 快照 DTO
from apps.asset.dtos.snapshot import EndpointSnapshotDTO
snapshots = []
for record in batch:
# 跳过失败的请求
if record.failed:
skipped_failed += 1
continue
try:
# Endpoint URL 直接使用原始值,不做标准化
# 原因Endpoint URL 来自 waymore/katana包含路径和参数标准化可能改变含义
url = record.input if record.input else record.url
# 提取 host 字段域名或IP地址
host = record.host if record.host else ''
dto = EndpointSnapshotDTO(
scan_id=scan_id,
url=record['url'],
host=record.get('host', ''),
title=record.get('title', ''),
status_code=record.get('status_code'),
content_length=record.get('content_length', 0),
location=record.get('location', ''),
webserver=record.get('webserver', ''),
content_type=record.get('content_type', ''),
tech=record.get('tech', []),
response_body=record.get('response_body', ''),
vhost=record.get('vhost', False),
matched_gf_patterns=[],
target_id=target_id,
response_headers=record.get('response_headers', ''),
url=url,
host=host,
title=record.title if record.title else '',
status_code=record.status_code,
content_length=record.content_length,
location=record.location if record.location else '',
webserver=record.webserver if record.webserver else '',
content_type=record.content_type if record.content_type else '',
tech=record.tech if isinstance(record.tech, list) else [],
response_body=record.response_body if record.response_body else '',
vhost=record.vhost if record.vhost else False,
matched_gf_patterns=[],
response_headers=record.response_headers if record.response_headers else '',
)
snapshots.append(dto)
except Exception as e:
logger.error("处理记录失败: %s,错误: %s", record.get('url', 'Unknown'), e)
logger.error("处理记录失败: %s,错误: %s", record.url, e)
continue
if snapshots:
@@ -316,15 +447,69 @@ def _save_batch(
services.snapshot.save_and_sync(snapshots)
count = len(snapshots)
logger.info(
"批次 %d: 保存了 %d 个存活的 URL%d 个)",
batch_num, count, len(batch)
"批次 %d: 保存了 %d 个存活的 URL%d,跳过失败: %d",
batch_num, count, len(batch), skipped_failed
)
return count
return {
'created_endpoints': count,
'skipped_failed': skipped_failed
}
except Exception as e:
logger.error("批次 %d 批量保存失败: %s", batch_num, e)
raise
return 0
return {
'created_endpoints': 0,
'skipped_failed': skipped_failed
}
def _accumulate_batch_stats(total_stats: dict, batch_result: dict) -> None:
"""
累加批次统计信息
Args:
total_stats: 总统计信息字典
batch_result: 批次结果字典
"""
total_stats['created_endpoints'] += batch_result.get('created_endpoints', 0)
total_stats['skipped_failed'] += batch_result.get('skipped_failed', 0)
def _process_batch(
batch: list,
scan_id: int,
target_id: int,
batch_num: int,
total_stats: dict,
failed_batches: list,
services: ServiceSet
) -> None:
"""
处理单个批次
Args:
batch: 数据批次
scan_id: 扫描ID
target_id: 目标ID
batch_num: 批次编号
total_stats: 总统计信息
failed_batches: 失败批次列表
services: Service 集合(必须,依赖注入)
"""
result = _save_batch_with_retry(
batch, scan_id, target_id, batch_num, services
)
# 累计统计信息(失败时可能有部分数据已保存)
_accumulate_batch_stats(total_stats, result)
if not result['success']:
failed_batches.append(batch_num)
logger.warning(
"批次 %d 保存失败,但已累计统计信息:创建端点=%d",
batch_num, result.get('created_endpoints', 0)
)
def _process_records_in_batches(
@@ -335,7 +520,7 @@ def _process_records_in_batches(
services: ServiceSet
) -> dict:
"""
分批处理记录并保存到数据库
流式处理记录并分批保存
Args:
data_generator: 数据生成器
@@ -345,14 +530,23 @@ def _process_records_in_batches(
services: Service 集合
Returns:
dict: 处理统计结果
dict: 处理统计信息
Raises:
RuntimeError: 存在失败批次时抛出
"""
batch = []
batch_num = 0
total_records = 0
total_saved = 0
batch_num = 0
failed_batches = []
batch = []
# 统计信息
total_stats = {
'created_endpoints': 0,
'skipped_failed': 0
}
# 流式读取生成器并分批保存
for record in data_generator:
batch.append(record)
total_records += 1
@@ -360,46 +554,35 @@ def _process_records_in_batches(
# 达到批次大小,执行保存
if len(batch) >= batch_size:
batch_num += 1
result = _save_batch_with_retry(
batch, scan_id, target_id, batch_num, services
)
if result['success']:
total_saved += result['saved_count']
else:
failed_batches.append(batch_num)
_process_batch(batch, scan_id, target_id, batch_num, total_stats, failed_batches, services)
batch = [] # 清空批次
# 每 10 个批次输出进度
if batch_num % 10 == 0:
logger.info(
"进度: 已处理 %d 批次,%d 条记录,保存 %d",
batch_num, total_records, total_saved
)
logger.info("进度: 已处理 %d 批次,%d 条记录", batch_num, total_records)
# 保存最后一批
if batch:
batch_num += 1
result = _save_batch_with_retry(
batch, scan_id, target_id, batch_num, services
_process_batch(batch, scan_id, target_id, batch_num, total_stats, failed_batches, services)
# 检查失败批次
if failed_batches:
error_msg = (
f"流式保存 URL 验证结果时出现失败批次,处理记录: {total_records}"
f"失败批次: {failed_batches}"
)
if result['success']:
total_saved += result['saved_count']
else:
failed_batches.append(batch_num)
logger.warning(error_msg)
raise RuntimeError(error_msg)
return {
'processed_records': total_records,
'saved_urls': total_saved,
'failed_urls': total_records - total_saved,
'batch_count': batch_num,
'failed_batches': failed_batches
**total_stats
}
@task(name="run_and_stream_save_urls", retries=3, retry_delay_seconds=10)
@task(name="run_and_stream_save_urls", retries=0)
def run_and_stream_save_urls_task(
cmd: str,
tool_name: str,
@@ -407,7 +590,7 @@ def run_and_stream_save_urls_task(
target_id: int,
cwd: Optional[str] = None,
shell: bool = False,
batch_size: int = 500,
batch_size: int = 100,
timeout: Optional[int] = None,
log_file: Optional[str] = None
) -> dict:
@@ -415,17 +598,18 @@ def run_and_stream_save_urls_task(
执行 httpx 验证并流式保存存活的 URL
该任务将:
1. 执行 httpx 命令验证 URL 存活
2. 流式处理输出,实时解析
3. 批量保存存活的 URL 到 Endpoint 表
1. 验证输入参数
2. 初始化资源(缓存、生成器)
3. 流式处理记录并分批保存
4. 构建并返回结果统计
Args:
cmd: httpx 命令
tool_name: 工具名称('httpx'
scan_id: 扫描任务 ID
target_id: 目标 ID
cwd: 工作目录
shell: 是否使用 shell 执行
cwd: 工作目录(可选)
shell: 是否使用 shell 执行(默认 False
batch_size: 批次大小(默认 500
timeout: 超时时间(秒)
log_file: 日志文件路径
@@ -433,11 +617,14 @@ def run_and_stream_save_urls_task(
Returns:
dict: {
'processed_records': int, # 处理的记录总数
'saved_urls': int, # 保存的存活 URL
'failed_urls': int, # 失败/死链
'batch_count': int, # 批次数
'failed_batches': list # 失败的批次号
'created_endpoints': int, # 创建的端点记录
'skipped_failed': int, # 因请求失败跳过的记录
}
Raises:
ValueError: 参数验证失败
RuntimeError: 命令执行或数据库操作失败
subprocess.TimeoutExpired: 命令执行超时
"""
logger.info(
"开始执行流式 URL 验证任务 - target_id=%s, 超时=%s秒, 命令: %s",
@@ -447,33 +634,30 @@ def run_and_stream_save_urls_task(
data_generator = None
try:
# 1. 初始化资源
# 1. 验证参数
_validate_task_parameters(cmd, target_id, scan_id, cwd)
# 2. 初始化资源
data_generator = _parse_httpx_stream_output(
cmd, tool_name, cwd, shell, timeout, log_file
)
services = ServiceSet.create_default()
# 2. 流式处理记录并分批保存
# 3. 流式处理记录并分批保存
stats = _process_records_in_batches(
data_generator, scan_id, target_id, batch_size, services
)
# 3. 输出最终统计
logger.info(
"✓ URL 验证任务完成 - 处理: %d, 存活: %d, 失败: %d",
stats['processed_records'],
stats['saved_urls'],
stats['failed_urls']
)
return stats
# 4. 构建最终结果
return _build_final_result(stats)
except subprocess.TimeoutExpired:
# 超时异常直接向上传播,保留异常类型
logger.warning(
"⚠️ URL 验证任务超时 - target_id=%s, 超时=%s",
target_id, timeout
)
raise
raise # 直接重新抛出,不包装
except Exception as e:
error_msg = f"流式执行 URL 验证任务失败: {e}"
@@ -481,12 +665,5 @@ def run_and_stream_save_urls_task(
raise RuntimeError(error_msg) from e
finally:
# 清理资源
if data_generator is not None:
try:
# 确保生成器被正确关闭
data_generator.close()
except (GeneratorExit, StopIteration):
pass
except Exception as e:
logger.warning("关闭数据生成器时出错: %s", e)
# 5. 清理资源
_cleanup_resources(data_generator)

View File

@@ -0,0 +1,52 @@
# Generated by Django 5.2.7 on 2026-01-02 04:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, default='', help_text='目标标识(域名/IP/CIDR', max_length=300)),
('type', models.CharField(choices=[('domain', '域名'), ('ip', 'IP地址'), ('cidr', 'CIDR范围')], db_index=True, default='domain', help_text='目标类型', max_length=20)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('last_scanned_at', models.DateTimeField(blank=True, help_text='最后扫描时间', null=True)),
('deleted_at', models.DateTimeField(blank=True, db_index=True, help_text='删除时间NULL表示未删除', null=True)),
],
options={
'verbose_name': '扫描目标',
'verbose_name_plural': '扫描目标',
'db_table': 'target',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['type'], name='target_type_36a73c_idx'), models.Index(fields=['-created_at'], name='target_created_67f489_idx'), models.Index(fields=['deleted_at', '-created_at'], name='target_deleted_9fc9da_idx'), models.Index(fields=['deleted_at', 'type'], name='target_deleted_306a89_idx'), models.Index(fields=['name'], name='target_name_f1c641_idx')],
'constraints': [models.UniqueConstraint(condition=models.Q(('deleted_at__isnull', True)), fields=('name',), name='unique_target_name_active')],
},
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, default='', help_text='组织名称', max_length=300)),
('description', models.CharField(blank=True, default='', help_text='组织描述', max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='创建时间')),
('deleted_at', models.DateTimeField(blank=True, db_index=True, help_text='删除时间NULL表示未删除', null=True)),
('targets', models.ManyToManyField(blank=True, help_text='所属目标列表', related_name='organizations', to='targets.target')),
],
options={
'verbose_name': '组织',
'verbose_name_plural': '组织',
'db_table': 'organization',
'ordering': ['-created_at'],
'indexes': [models.Index(fields=['-created_at'], name='organizatio_created_012eac_idx'), models.Index(fields=['deleted_at', '-created_at'], name='organizatio_deleted_2c604f_idx'), models.Index(fields=['name'], name='organizatio_name_bcc2ee_idx')],
'constraints': [models.UniqueConstraint(condition=models.Q(('deleted_at__isnull', True)), fields=('name',), name='unique_organization_name_active')],
},
),
]

View File

@@ -260,6 +260,12 @@ class TestDataGenerator:
def clear_data(self):
"""清除所有测试数据"""
cur = self.conn.cursor()
# 先删除 IMMV避免 pg_ivm 的 anyarray bug
print(" 删除 IMMV...")
cur.execute("DROP TABLE IF EXISTS asset_search_view CASCADE")
self.conn.commit()
tables = [
# 指纹表
'ehole_fingerprint', 'goby_fingerprint', 'wappalyzer_fingerprint',
@@ -276,6 +282,26 @@ class TestDataGenerator:
for table in tables:
cur.execute(f"DELETE FROM {table}")
self.conn.commit()
# 重建 IMMV
print(" 重建 IMMV...")
cur.execute("""
SELECT pgivm.create_immv('asset_search_view', $$
SELECT
w.id,
w.url,
w.host,
w.title,
w.tech,
w.status_code,
w.response_headers,
w.response_body,
w.created_at,
w.target_id
FROM website w
$$)
""")
self.conn.commit()
print(" ✓ 数据清除完成\n")
def create_workers(self) -> list:
@@ -1248,77 +1274,79 @@ class TestDataGenerator:
print(f" ✓ 创建了 {count} 个主机端口映射\n")
def create_vulnerabilities(self, target_ids: list):
"""创建漏洞"""
"""创建漏洞(基于 website URL 前缀)"""
print("🐛 创建漏洞...")
cur = self.conn.cursor()
vuln_types = [
'sql-injection-authentication-bypass-vulnerability-', # 50 chars
'cross-site-scripting-xss-stored-persistent-attack-', # 50 chars
'cross-site-request-forgery-csrf-token-validation--', # 50 chars
'server-side-request-forgery-ssrf-internal-access--', # 50 chars
'xml-external-entity-xxe-injection-vulnerability---', # 50 chars
'remote-code-execution-rce-command-injection-flaw--', # 50 chars
'local-file-inclusion-lfi-path-traversal-exploit---', # 50 chars
'directory-traversal-arbitrary-file-read-access----', # 50 chars
'authentication-bypass-session-management-flaw-----', # 50 chars
'insecure-direct-object-reference-idor-access-ctrl-', # 50 chars
'sensitive-data-exposure-information-disclosure----', # 50 chars
'security-misconfiguration-default-credentials-----', # 50 chars
'broken-access-control-privilege-escalation-vuln---', # 50 chars
'cors-misconfiguration-cross-origin-data-leakage---', # 50 chars
'subdomain-takeover-dns-misconfiguration-exploit---', # 50 chars
'exposed-admin-panel-unauthorized-access-control---', # 50 chars
'default-credentials-weak-authentication-bypass----', # 50 chars
'information-disclosure-sensitive-data-exposure----', # 50 chars
'command-injection-os-command-execution-exploit----', # 50 chars
'ldap-injection-directory-service-manipulation-----', # 50 chars
'xpath-injection-xml-query-manipulation-attack-----', # 50 chars
'nosql-injection-mongodb-query-manipulation--------', # 50 chars
'template-injection-ssti-server-side-execution-----', # 50 chars
'deserialization-vulnerability-object-injection----', # 50 chars
'jwt-vulnerability-token-forgery-authentication----', # 50 chars
'open-redirect-url-redirection-phishing-attack-----', # 50 chars
'http-request-smuggling-cache-poisoning-attack-----', # 50 chars
'host-header-injection-password-reset-poisoning----', # 50 chars
'clickjacking-ui-redressing-frame-injection--------', # 50 chars
'session-fixation-authentication-session-attack----', # 50 chars
'sql-injection-authentication-bypass-vulnerability-',
'cross-site-scripting-xss-stored-persistent-attack-',
'cross-site-request-forgery-csrf-token-validation--',
'server-side-request-forgery-ssrf-internal-access--',
'xml-external-entity-xxe-injection-vulnerability---',
'remote-code-execution-rce-command-injection-flaw--',
'local-file-inclusion-lfi-path-traversal-exploit---',
'directory-traversal-arbitrary-file-read-access----',
'authentication-bypass-session-management-flaw-----',
'insecure-direct-object-reference-idor-access-ctrl-',
'sensitive-data-exposure-information-disclosure----',
'security-misconfiguration-default-credentials-----',
'broken-access-control-privilege-escalation-vuln---',
'cors-misconfiguration-cross-origin-data-leakage---',
'subdomain-takeover-dns-misconfiguration-exploit---',
'exposed-admin-panel-unauthorized-access-control---',
'default-credentials-weak-authentication-bypass----',
'information-disclosure-sensitive-data-exposure----',
'command-injection-os-command-execution-exploit----',
'ldap-injection-directory-service-manipulation-----',
]
sources = [
'nuclei-vulnerability-scanner--', # 30 chars
'dalfox-xss-parameter-analysis-', # 30 chars
'sqlmap-sql-injection-testing--', # 30 chars
'crlfuzz-crlf-injection-finder-', # 30 chars
'httpx-web-probe-fingerprint---', # 30 chars
'manual-penetration-testing----', # 30 chars
'burp-suite-professional-scan--', # 30 chars
'owasp-zap-security-scanner----', # 30 chars
'nmap-network-service-scanner--', # 30 chars
'nikto-web-server-scanner------', # 30 chars
'wpscan-wordpress-vuln-scan----', # 30 chars
'dirsearch-directory-brute-----', # 30 chars
'ffuf-web-fuzzer-content-disc--', # 30 chars
'amass-subdomain-enumeration---', # 30 chars
'subfinder-passive-subdomain---', # 30 chars
'masscan-port-scanner-fast-----', # 30 chars
'nessus-vulnerability-assess---', # 30 chars
'qualys-cloud-security-scan----', # 30 chars
'acunetix-web-vuln-scanner-----', # 30 chars
'semgrep-static-code-analysis--', # 30 chars
'nuclei-vulnerability-scanner--',
'dalfox-xss-parameter-analysis-',
'sqlmap-sql-injection-testing--',
'crlfuzz-crlf-injection-finder-',
'httpx-web-probe-fingerprint---',
'manual-penetration-testing----',
'burp-suite-professional-scan--',
'owasp-zap-security-scanner----',
]
severities = ['unknown', 'info', 'low', 'medium', 'high', 'critical']
# 获取域名目标
cur.execute("SELECT id, name FROM target WHERE type = 'domain' AND deleted_at IS NULL LIMIT 80")
domain_targets = cur.fetchall()
# 漏洞路径后缀(会追加到 website URL 后面)
vuln_paths = [
'/api/users?id=1',
'/api/admin/config',
'/api/v1/auth/login',
'/api/v2/data/export',
'/admin/settings',
'/debug/console',
'/backup/db.sql',
'/.env',
'/.git/config',
'/wp-admin/',
'/phpmyadmin/',
'/api/graphql',
'/swagger.json',
'/actuator/health',
'/metrics',
]
# 获取所有 website 的 URL 和 target_id
cur.execute("SELECT id, url, target_id FROM website LIMIT 500")
websites = cur.fetchall()
if not websites:
print(" ⚠ 没有 website 数据,跳过漏洞生成\n")
return
count = 0
batch_data = []
for target_id, target_name in domain_targets:
num = random.randint(30, 80)
for website_id, website_url, target_id in websites:
# 每个 website 生成 1-5 个漏洞
num_vulns = random.randint(1, 5)
for idx in range(num):
for idx in range(num_vulns):
severity = random.choice(severities)
cvss_ranges = {
'critical': (9.0, 10.0), 'high': (7.0, 8.9), 'medium': (4.0, 6.9),
@@ -1327,22 +1355,22 @@ class TestDataGenerator:
cvss_range = cvss_ranges.get(severity, (0.0, 10.0))
cvss_score = round(random.uniform(*cvss_range), 1)
# 生成固定 245 长度的 URL
url = generate_fixed_length_url(target_name, length=245, path_hint=f'vuln/{idx:04d}')
# 漏洞 URL = website URL + 漏洞路径
# 先移除 website URL 中的查询参数
base_url = website_url.split('?')[0]
vuln_url = base_url + random.choice(vuln_paths)
# 生成固定 300 长度的描述
description = generate_fixed_length_text(length=300, text_type='description')
raw_output = json.dumps({
'template': f'CVE-2024-{random.randint(10000, 99999)}',
'matcher_name': 'default',
'severity': severity,
'host': target_name,
'matched_at': url,
'matched_at': vuln_url,
})
batch_data.append((
target_id, url, random.choice(vuln_types), severity,
target_id, vuln_url, random.choice(vuln_types), severity,
random.choice(sources), cvss_score, description, raw_output
))
count += 1
@@ -1488,7 +1516,7 @@ class TestDataGenerator:
if batch_data:
execute_values(cur, """
INSERT INTO website_snapshot (
scan_id, url, host, title, web_server, tech, status,
scan_id, url, host, title, webserver, tech, status_code,
content_length, content_type, location, response_body,
response_headers, created_at
) VALUES %s

View File

@@ -27,10 +27,50 @@ BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[XingRin]${NC} $1"; }
log_success() { echo -e "${GREEN}[XingRin]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[XingRin]${NC} $1"; }
log_error() { echo -e "${RED}[XingRin]${NC} $1"; }
# 渐变色定义
CYAN='\033[0;36m'
MAGENTA='\033[0;35m'
BOLD='\033[1m'
DIM='\033[2m'
log_info() { echo -e "${CYAN}${NC} $1"; }
log_success() { echo -e "${GREEN}${NC} $1"; }
log_warn() { echo -e "${YELLOW}${NC} $1"; }
log_error() { echo -e "${RED}${NC} $1"; }
# 炫酷 Banner
show_banner() {
echo -e ""
echo -e "${CYAN}${BOLD} ██╗ ██╗██╗███╗ ██╗ ██████╗ ██████╗ ██╗███╗ ██╗${NC}"
echo -e "${CYAN} ╚██╗██╔╝██║████╗ ██║██╔════╝ ██╔══██╗██║████╗ ██║${NC}"
echo -e "${BLUE}${BOLD} ╚███╔╝ ██║██╔██╗ ██║██║ ███╗██████╔╝██║██╔██╗ ██║${NC}"
echo -e "${BLUE} ██╔██╗ ██║██║╚██╗██║██║ ██║██╔══██╗██║██║╚██╗██║${NC}"
echo -e "${MAGENTA}${BOLD} ██╔╝ ██╗██║██║ ╚████║╚██████╔╝██║ ██║██║██║ ╚████║${NC}"
echo -e "${MAGENTA} ╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝${NC}"
echo -e ""
echo -e "${DIM} ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${BOLD} 🚀 分布式安全扫描平台 │ Worker 节点部署${NC}"
echo -e "${DIM} ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e ""
}
# 完成 Banner
show_complete() {
echo -e ""
echo -e "${GREEN}${BOLD} ╔═══════════════════════════════════════════════════╗${NC}"
echo -e "${GREEN}${BOLD} ║ ║${NC}"
echo -e "${GREEN}${BOLD} ║ ██████╗ ██████╗ ███╗ ██╗███████╗██╗ ║${NC}"
echo -e "${GREEN}${BOLD} ║ ██╔══██╗██╔═══██╗████╗ ██║██╔════╝██║ ║${NC}"
echo -e "${GREEN}${BOLD} ║ ██║ ██║██║ ██║██╔██╗ ██║█████╗ ██║ ║${NC}"
echo -e "${GREEN}${BOLD} ║ ██║ ██║██║ ██║██║╚██╗██║██╔══╝ ╚═╝ ║${NC}"
echo -e "${GREEN}${BOLD} ║ ██████╔╝╚██████╔╝██║ ╚████║███████╗██╗ ║${NC}"
echo -e "${GREEN}${BOLD} ║ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚══════╝╚═╝ ║${NC}"
echo -e "${GREEN}${BOLD} ║ ║${NC}"
echo -e "${GREEN}${BOLD} ║ ✨ XingRin Worker 节点部署完成! ║${NC}"
echo -e "${GREEN}${BOLD} ║ ║${NC}"
echo -e "${GREEN}${BOLD} ╚═══════════════════════════════════════════════════╝${NC}"
echo -e ""
}
# 等待 apt 锁释放
wait_for_apt_lock() {
@@ -150,9 +190,7 @@ pull_image() {
# 主流程
main() {
log_info "=========================================="
log_info " XingRin 节点安装"
log_info "=========================================="
show_banner
detect_os
install_docker
@@ -162,9 +200,7 @@ main() {
touch "$DOCKER_MARKER"
log_success "=========================================="
log_success " ✓ 安装完成"
log_success "=========================================="
show_complete
}
main "$@"

View File

@@ -2,9 +2,13 @@ services:
# PostgreSQL可选使用远程数据库时不启动
# 本地模式: docker compose --profile local-db up -d
# 远程模式: docker compose up -d需配置 DB_HOST 为远程地址)
# 使用自定义镜像,预装 pg_ivm 扩展
postgres:
profiles: ["local-db"]
image: postgres:15
build:
context: ./postgres
dockerfile: Dockerfile
image: ${DOCKER_USER:-yyhuni}/xingrin-postgres:${IMAGE_TAG:-dev}
restart: always
environment:
POSTGRES_DB: ${DB_NAME}
@@ -15,6 +19,9 @@ services:
- ./postgres/init-user-db.sh:/docker-entrypoint-initdb.d/init-user-db.sh
ports:
- "${DB_PORT}:5432"
command: >
postgres
-c shared_preload_libraries=pg_ivm
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER}"]
interval: 5s

View File

@@ -8,9 +8,13 @@
services:
# PostgreSQL可选使用远程数据库时不启动
# 使用自定义镜像,预装 pg_ivm 扩展
postgres:
profiles: ["local-db"]
image: postgres:15
build:
context: ./postgres
dockerfile: Dockerfile
image: ${DOCKER_USER:-yyhuni}/xingrin-postgres:${IMAGE_TAG:?IMAGE_TAG is required}
restart: always
environment:
POSTGRES_DB: ${DB_NAME}
@@ -21,6 +25,9 @@ services:
- ./postgres/init-user-db.sh:/docker-entrypoint-initdb.d/init-user-db.sh
ports:
- "${DB_PORT}:5432"
command: >
postgres
-c shared_preload_libraries=pg_ivm
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER}"]
interval: 5s

View File

@@ -0,0 +1,19 @@
FROM postgres:15
# 安装编译依赖
RUN apt-get update && apt-get install -y \
build-essential \
postgresql-server-dev-15 \
git \
&& rm -rf /var/lib/apt/lists/*
# 编译安装 pg_ivm
RUN git clone https://github.com/sraoss/pg_ivm.git /tmp/pg_ivm \
&& cd /tmp/pg_ivm \
&& make \
&& make install \
&& rm -rf /tmp/pg_ivm
# 配置 shared_preload_libraries
# 注意: 这个配置会在容器启动时被应用
RUN echo "shared_preload_libraries = 'pg_ivm'" >> /usr/share/postgresql/postgresql.conf.sample

129
docker/scripts/install-pg-ivm.sh Executable file
View File

@@ -0,0 +1,129 @@
#!/bin/bash
# pg_ivm 一键安装脚本(用于远程自建 PostgreSQL 服务器)
# 要求: PostgreSQL 13+ 版本
set -e
echo "=========================================="
echo "pg_ivm 一键安装脚本"
echo "要求: PostgreSQL 13+ 版本"
echo "=========================================="
echo ""
# 检查是否以 root 运行
if [ "$EUID" -ne 0 ]; then
echo "错误: 请使用 sudo 运行此脚本"
exit 1
fi
# 检测 PostgreSQL 版本
detect_pg_version() {
if command -v psql &> /dev/null; then
psql --version | grep -oP '\d+' | head -1
elif [ -n "$PG_VERSION" ]; then
echo "$PG_VERSION"
else
echo "15"
fi
}
PG_VERSION=${PG_VERSION:-$(detect_pg_version)}
# 检测 PostgreSQL
if ! command -v psql &> /dev/null; then
echo "错误: 未检测到 PostgreSQL请先安装 PostgreSQL"
exit 1
fi
echo "检测到 PostgreSQL 版本: $PG_VERSION"
# 检查版本要求
if [ "$PG_VERSION" -lt 13 ]; then
echo "错误: pg_ivm 要求 PostgreSQL 13+ 版本,当前版本: $PG_VERSION"
exit 1
fi
# 安装编译依赖
echo ""
echo "[1/4] 安装编译依赖..."
if command -v apt-get &> /dev/null; then
apt-get update -qq
apt-get install -y -qq build-essential postgresql-server-dev-${PG_VERSION} git
elif command -v yum &> /dev/null; then
yum install -y gcc make git postgresql${PG_VERSION}-devel
else
echo "错误: 不支持的包管理器,请手动安装编译依赖"
exit 1
fi
echo "✓ 编译依赖安装完成"
# 编译安装 pg_ivm
echo ""
echo "[2/4] 编译安装 pg_ivm..."
rm -rf /tmp/pg_ivm
git clone --quiet https://github.com/sraoss/pg_ivm.git /tmp/pg_ivm
cd /tmp/pg_ivm
make -s
make install -s
rm -rf /tmp/pg_ivm
echo "✓ pg_ivm 编译安装完成"
# 配置 shared_preload_libraries
echo ""
echo "[3/4] 配置 shared_preload_libraries..."
PG_CONF_DIRS=(
"/etc/postgresql/${PG_VERSION}/main"
"/var/lib/pgsql/${PG_VERSION}/data"
"/var/lib/postgresql/data"
)
PG_CONF_DIR=""
for dir in "${PG_CONF_DIRS[@]}"; do
if [ -d "$dir" ]; then
PG_CONF_DIR="$dir"
break
fi
done
if [ -z "$PG_CONF_DIR" ]; then
echo "警告: 未找到 PostgreSQL 配置目录,请手动配置 shared_preload_libraries"
echo "在 postgresql.conf 中添加: shared_preload_libraries = 'pg_ivm'"
else
if grep -q "shared_preload_libraries.*pg_ivm" "$PG_CONF_DIR/postgresql.conf" 2>/dev/null; then
echo "✓ shared_preload_libraries 已配置"
else
if [ -d "$PG_CONF_DIR/conf.d" ]; then
echo "shared_preload_libraries = 'pg_ivm'" > "$PG_CONF_DIR/conf.d/pg_ivm.conf"
echo "✓ 配置已写入 $PG_CONF_DIR/conf.d/pg_ivm.conf"
else
if grep -q "^shared_preload_libraries" "$PG_CONF_DIR/postgresql.conf"; then
sed -i "s/^shared_preload_libraries = '\(.*\)'/shared_preload_libraries = '\1,pg_ivm'/" "$PG_CONF_DIR/postgresql.conf"
else
echo "shared_preload_libraries = 'pg_ivm'" >> "$PG_CONF_DIR/postgresql.conf"
fi
echo "✓ 配置已写入 $PG_CONF_DIR/postgresql.conf"
fi
fi
fi
# 重启 PostgreSQL
echo ""
echo "[4/4] 重启 PostgreSQL..."
if systemctl is-active --quiet postgresql; then
systemctl restart postgresql
echo "✓ PostgreSQL 已重启"
elif systemctl is-active --quiet postgresql-${PG_VERSION}; then
systemctl restart postgresql-${PG_VERSION}
echo "✓ PostgreSQL 已重启"
else
echo "警告: 无法自动重启 PostgreSQL请手动重启"
fi
echo ""
echo "=========================================="
echo "✓ pg_ivm 安装完成"
echo "=========================================="
echo ""
echo "验证安装:"
echo " psql -U postgres -c \"CREATE EXTENSION IF NOT EXISTS pg_ivm;\""
echo ""

126
docker/scripts/test-pg-ivm.sh Executable file
View File

@@ -0,0 +1,126 @@
#!/bin/bash
# pg_ivm 安装验证测试
# 在 Docker 容器中测试 install-pg-ivm.sh 的安装流程
set -e
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
CONTAINER_NAME="pg_ivm_test_$$"
IMAGE_NAME="postgres:15"
echo "=========================================="
echo "pg_ivm 安装验证测试"
echo "=========================================="
# 清理函数
cleanup() {
echo ""
echo "[清理] 删除测试容器..."
docker rm -f "$CONTAINER_NAME" 2>/dev/null || true
}
trap cleanup EXIT
# 1. 启动临时容器
echo ""
echo "[1/5] 启动临时 PostgreSQL 容器..."
docker run -d --name "$CONTAINER_NAME" \
-e POSTGRES_PASSWORD=test \
-e POSTGRES_USER=postgres \
-e POSTGRES_DB=testdb \
-e PG_VERSION=15 \
"$IMAGE_NAME"
echo "等待 PostgreSQL 启动..."
sleep 10
if ! docker ps | grep -q "$CONTAINER_NAME"; then
echo "错误: 容器启动失败"
exit 1
fi
# 2. 复制并执行安装脚本
echo ""
echo "[2/5] 执行 pg_ivm 安装脚本..."
docker cp "$SCRIPT_DIR/install-pg-ivm.sh" "$CONTAINER_NAME:/tmp/install-pg-ivm.sh"
# 在容器内模拟安装(跳过 systemctl 重启,手动重启容器)
docker exec "$CONTAINER_NAME" bash -c "
set -e
export PG_VERSION=15
echo '安装编译依赖...'
apt-get update -qq
apt-get install -y -qq build-essential postgresql-server-dev-15 git
echo '编译安装 pg_ivm...'
rm -rf /tmp/pg_ivm
git clone --quiet https://github.com/sraoss/pg_ivm.git /tmp/pg_ivm
cd /tmp/pg_ivm
make -s
make install -s
rm -rf /tmp/pg_ivm
echo '✓ pg_ivm 编译安装完成'
"
# 3. 配置 shared_preload_libraries 并重启
echo ""
echo "[3/5] 配置 shared_preload_libraries..."
docker exec "$CONTAINER_NAME" bash -c "
echo \"shared_preload_libraries = 'pg_ivm'\" >> /var/lib/postgresql/data/postgresql.conf
"
echo "重启 PostgreSQL..."
docker restart "$CONTAINER_NAME"
sleep 8
# 4. 验证扩展是否可用
echo ""
echo "[4/5] 验证 pg_ivm 扩展..."
docker exec "$CONTAINER_NAME" psql -U postgres -d testdb -c "CREATE EXTENSION IF NOT EXISTS pg_ivm;" > /dev/null 2>&1
EXTENSION_EXISTS=$(docker exec "$CONTAINER_NAME" psql -U postgres -d testdb -t -c "SELECT COUNT(*) FROM pg_extension WHERE extname = 'pg_ivm';")
if [ "$(echo $EXTENSION_EXISTS | tr -d ' ')" != "1" ]; then
echo "错误: pg_ivm 扩展未正确加载"
exit 1
fi
echo "✓ pg_ivm 扩展已加载"
# 5. 测试 IMMV 功能
echo ""
echo "[5/5] 测试 IMMV 增量更新功能..."
docker exec "$CONTAINER_NAME" psql -U postgres -d testdb -c "
CREATE TABLE test_table (id SERIAL PRIMARY KEY, name TEXT, value INTEGER);
SELECT pgivm.create_immv('test_immv', 'SELECT id, name, value FROM test_table');
INSERT INTO test_table (name, value) VALUES ('test1', 100);
INSERT INTO test_table (name, value) VALUES ('test2', 200);
" > /dev/null 2>&1
IMMV_COUNT=$(docker exec "$CONTAINER_NAME" psql -U postgres -d testdb -t -c "SELECT COUNT(*) FROM test_immv;")
if [ "$(echo $IMMV_COUNT | tr -d ' ')" != "2" ]; then
echo "错误: IMMV 增量更新失败,期望 2 行,实际 $(echo $IMMV_COUNT | tr -d ' ')"
exit 1
fi
echo "✓ IMMV 增量更新正常 (2 行数据)"
# 测试更新
docker exec "$CONTAINER_NAME" psql -U postgres -d testdb -c "UPDATE test_table SET value = 150 WHERE name = 'test1';" > /dev/null 2>&1
UPDATED_VALUE=$(docker exec "$CONTAINER_NAME" psql -U postgres -d testdb -t -c "SELECT value FROM test_immv WHERE name = 'test1';")
if [ "$(echo $UPDATED_VALUE | tr -d ' ')" != "150" ]; then
echo "错误: IMMV 更新同步失败"
exit 1
fi
echo "✓ IMMV 更新同步正常"
# 测试删除
docker exec "$CONTAINER_NAME" psql -U postgres -d testdb -c "DELETE FROM test_table WHERE name = 'test2';" > /dev/null 2>&1
IMMV_COUNT_AFTER=$(docker exec "$CONTAINER_NAME" psql -U postgres -d testdb -t -c "SELECT COUNT(*) FROM test_immv;")
if [ "$(echo $IMMV_COUNT_AFTER | tr -d ' ')" != "1" ]; then
echo "错误: IMMV 删除同步失败"
exit 1
fi
echo "✓ IMMV 删除同步正常"
echo ""
echo "=========================================="
echo "✓ 所有测试通过"
echo "=========================================="
echo ""
echo "pg_ivm 安装验证成功,可以继续构建自定义 PostgreSQL 镜像"

View File

@@ -1,4 +1,4 @@
FROM python:3.10-slim
FROM python:3.10-slim-bookworm
WORKDIR /app
@@ -11,7 +11,16 @@ RUN apt-get update && apt-get install -y \
&& rm -rf /var/lib/apt/lists/*
# 安装 Docker CLI用于本地 Worker 任务分发)
RUN curl -fsSL https://get.docker.com | sh
# 只安装 docker-ce-cli避免安装完整 Docker 引擎
RUN apt-get update && \
apt-get install -y ca-certificates gnupg && \
install -m 0755 -d /etc/apt/keyrings && \
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg && \
chmod a+r /etc/apt/keyrings/docker.gpg && \
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian bookworm stable" > /etc/apt/sources.list.d/docker.list && \
apt-get update && \
apt-get install -y docker-ce-cli && \
rm -rf /var/lib/apt/lists/*
# 安装 uv超快的 Python 包管理器)
RUN pip install uv

View File

@@ -15,10 +15,12 @@ NC='\033[0m'
# 解析参数
WITH_FRONTEND=true
DEV_MODE=false
QUIET_MODE=false
for arg in "$@"; do
case $arg in
--no-frontend) WITH_FRONTEND=false ;;
--dev) DEV_MODE=true ;;
--quiet) QUIET_MODE=true ;;
esac
done
@@ -155,6 +157,11 @@ echo -e "${GREEN}[OK]${NC} 服务已启动"
# 数据初始化
./scripts/init-data.sh
# 静默模式下不显示结果(由调用方显示)
if [ "$QUIET_MODE" = true ]; then
exit 0
fi
# 获取访问地址
PUBLIC_HOST=$(grep "^PUBLIC_HOST=" .env 2>/dev/null | cut -d= -f2)
if [ -n "$PUBLIC_HOST" ] && [ "$PUBLIC_HOST" != "server" ]; then

View File

@@ -0,0 +1,5 @@
import { SearchPage } from "@/components/search"
export default function Search() {
return <SearchPage />
}

View File

@@ -44,7 +44,6 @@
--font-sans: 'Noto Sans SC', system-ui, -apple-system, PingFang SC, sans-serif;
--font-mono: 'JetBrains Mono', 'Fira Code', Consolas, monospace;
--font-serif: Georgia, 'Noto Serif SC', serif;
--radius: 0.625rem;
--tracking-tighter: calc(var(--tracking-normal) - 0.05em);
--tracking-tight: calc(var(--tracking-normal) - 0.025em);
--tracking-wide: calc(var(--tracking-normal) + 0.025em);

View File

@@ -16,6 +16,7 @@ import {
IconTerminal2, // Terminal icon
IconBug, // Vulnerability icon
IconMessageReport, // Feedback icon
IconSearch, // Search icon
} from "@tabler/icons-react"
// Import internationalization hook
import { useTranslations } from 'next-intl'
@@ -76,6 +77,11 @@ export function AppSidebar({ ...props }: React.ComponentProps<typeof Sidebar>) {
url: "/dashboard/",
icon: IconDashboard,
},
{
title: t('search'),
url: "/search/",
icon: IconSearch,
},
{
title: t('organization'),
url: "/organization/",

View File

@@ -67,6 +67,45 @@ const DEFAULT_FIELDS: FilterField[] = [
PREDEFINED_FIELDS.host,
]
// History storage key
const FILTER_HISTORY_KEY = 'smart_filter_history'
const MAX_HISTORY_PER_FIELD = 10
// Get history values for a field
function getFieldHistory(field: string): string[] {
if (typeof window === 'undefined') return []
try {
const history = JSON.parse(localStorage.getItem(FILTER_HISTORY_KEY) || '{}')
return history[field] || []
} catch {
return []
}
}
// Save a value to field history
function saveFieldHistory(field: string, value: string) {
if (typeof window === 'undefined' || !value.trim()) return
try {
const history = JSON.parse(localStorage.getItem(FILTER_HISTORY_KEY) || '{}')
const fieldHistory = (history[field] || []).filter((v: string) => v !== value)
fieldHistory.unshift(value)
history[field] = fieldHistory.slice(0, MAX_HISTORY_PER_FIELD)
localStorage.setItem(FILTER_HISTORY_KEY, JSON.stringify(history))
} catch {
// ignore
}
}
// Extract field-value pairs from query and save to history
function saveQueryHistory(query: string) {
const regex = /(\w+)(==|!=|=)"([^"]+)"/g
let match
while ((match = regex.exec(query)) !== null) {
const [, field, , value] = match
saveFieldHistory(field, value)
}
}
// Parse filter expression (FOFA style)
interface ParsedFilter {
field: string
@@ -115,10 +154,114 @@ export function SmartFilterInput({
const [open, setOpen] = React.useState(false)
const [inputValue, setInputValue] = React.useState(value ?? "")
const inputRef = React.useRef<HTMLInputElement>(null)
const ghostRef = React.useRef<HTMLSpanElement>(null)
const listRef = React.useRef<HTMLDivElement>(null)
const savedScrollTop = React.useRef<number | null>(null)
const hasInitialized = React.useRef(false)
// Calculate ghost text suggestion
const ghostText = React.useMemo(() => {
if (!inputValue) return ""
// Get the last word/token being typed
const lastSpaceIndex = inputValue.lastIndexOf(' ')
const currentToken = lastSpaceIndex === -1 ? inputValue : inputValue.slice(lastSpaceIndex + 1)
const lowerToken = currentToken.toLowerCase()
// If empty token after space, check if previous expression is complete
if (!currentToken && inputValue.trim()) {
// Check if last expression is complete (ends with ")
if (inputValue.trimEnd().endsWith('"')) {
return '&& '
}
return ""
}
if (!currentToken) return ""
// Priority 1: Field name completion (no = in token)
if (!currentToken.includes('=') && !currentToken.includes('!')) {
// Find matching field first
const matchingField = fields.find(f =>
f.key.toLowerCase().startsWith(lowerToken) &&
f.key.toLowerCase() !== lowerToken
)
if (matchingField) {
return matchingField.key.slice(currentToken.length) + '="'
}
// If exact match of field name, suggest ="
const exactField = fields.find(f => f.key.toLowerCase() === lowerToken)
if (exactField) {
return '="'
}
// Priority 2: Logical operators (only if no field matches)
if ('&&'.startsWith(currentToken) && currentToken.startsWith('&')) {
return '&&'.slice(currentToken.length) + ' '
}
if ('||'.startsWith(currentToken) && currentToken.startsWith('|')) {
return '||'.slice(currentToken.length) + ' '
}
// 'and' / 'or' only if no field name starts with these
if (!matchingField) {
if ('and'.startsWith(lowerToken) && lowerToken.length > 0 && !fields.some(f => f.key.toLowerCase().startsWith(lowerToken))) {
return 'and'.slice(lowerToken.length) + ' '
}
if ('or'.startsWith(lowerToken) && lowerToken.length > 0 && !fields.some(f => f.key.toLowerCase().startsWith(lowerToken))) {
return 'or'.slice(lowerToken.length) + ' '
}
}
return ""
}
// Check if typing ! for != operator
if (currentToken.match(/^(\w+)!$/)) {
return '="'
}
// Check if typing = and might want ==
const singleEqMatch = currentToken.match(/^(\w+)=$/)
if (singleEqMatch) {
// Suggest " for fuzzy match (most common)
return '"'
}
// Check if typed == or != (no opening quote yet)
const doubleOpMatch = currentToken.match(/^(\w+)(==|!=)$/)
if (doubleOpMatch) {
return '"'
}
// Check if typing a value (has = and opening quote)
const eqMatch = currentToken.match(/^(\w+)(==|!=|=)"([^"]*)$/)
if (eqMatch) {
const [, field, , partialValue] = eqMatch
// Get history for this field
const history = getFieldHistory(field)
// Find matching history value
const matchingValue = history.find(v =>
v.toLowerCase().startsWith(partialValue.toLowerCase()) &&
v.toLowerCase() !== partialValue.toLowerCase()
)
if (matchingValue) {
return matchingValue.slice(partialValue.length) + '"'
}
// If value has content but no closing quote, suggest closing quote
if (partialValue.length > 0) {
return '"'
}
}
// Check if a complete expression just finished (ends with ")
if (currentToken.match(/^\w+(==|!=|=)"[^"]+"$/)) {
return ' && '
}
return ""
}, [inputValue, fields])
// Synchronize external value changes
React.useEffect(() => {
if (value !== undefined) {
@@ -189,12 +332,27 @@ export function SmartFilterInput({
// Handle search
const handleSearch = () => {
// Save query values to history
saveQueryHistory(inputValue)
onSearch?.(parsedFilters, inputValue)
setOpen(false)
}
// Accept ghost text suggestion
const acceptGhostText = () => {
if (ghostText) {
setInputValue(inputValue + ghostText)
return true
}
return false
}
// Handle keyboard events
const handleKeyDown = (e: React.KeyboardEvent) => {
if (e.key === "Tab" && ghostText) {
e.preventDefault()
acceptGhostText()
}
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault()
handleSearch()
@@ -202,6 +360,14 @@ export function SmartFilterInput({
if (e.key === "Escape") {
setOpen(false)
}
// Right arrow at end of input accepts ghost text
if (e.key === "ArrowRight" && ghostText) {
const input = inputRef.current
if (input && input.selectionStart === input.value.length) {
e.preventDefault()
acceptGhostText()
}
}
}
// Append example to input box (not overwrite), then close popover
@@ -215,36 +381,46 @@ export function SmartFilterInput({
return (
<div className={className}>
<Popover open={open} onOpenChange={setOpen} modal={false}>
<PopoverAnchor asChild>
<div className="flex items-center gap-2">
<Input
ref={inputRef}
type="text"
value={inputValue}
onChange={(e) => {
setInputValue(e.target.value)
if (!open) setOpen(true)
}}
onFocus={() => setOpen(true)}
onBlur={(e) => {
// If focus moves to inside Popover or input itself, don't close
const relatedTarget = e.relatedTarget as HTMLElement | null
if (relatedTarget?.closest('[data-radix-popper-content-wrapper]')) {
return
}
// Delay close to let CommandItem's onSelect execute first
setTimeout(() => setOpen(false), 150)
}}
onKeyDown={handleKeyDown}
placeholder={placeholder || defaultPlaceholder}
className="h-8 w-full"
/>
<Button variant="outline" size="sm" onClick={handleSearch}>
<IconSearch className="h-4 w-4" />
</Button>
</div>
</PopoverAnchor>
<div className="flex items-center gap-2">
<Popover open={open} onOpenChange={setOpen} modal={false}>
<PopoverAnchor asChild>
<div className="relative flex-1">
<Input
ref={inputRef}
type="text"
value={inputValue}
onChange={(e) => {
setInputValue(e.target.value)
if (!open) setOpen(true)
}}
onFocus={() => setOpen(true)}
onBlur={(e) => {
// If focus moves to inside Popover or input itself, don't close
const relatedTarget = e.relatedTarget as HTMLElement | null
if (relatedTarget?.closest('[data-radix-popper-content-wrapper]')) {
return
}
// Delay close to let CommandItem's onSelect execute first
setTimeout(() => setOpen(false), 150)
}}
onKeyDown={handleKeyDown}
placeholder={placeholder || defaultPlaceholder}
className="h-8 w-full font-mono text-sm"
/>
{/* Ghost text overlay */}
{ghostText && (
<div
className="absolute inset-0 flex items-center pointer-events-none overflow-hidden px-3"
aria-hidden="true"
>
<span className="font-mono text-sm">
<span className="invisible">{inputValue}</span>
<span ref={ghostRef} className="text-muted-foreground/40">{ghostText}</span>
</span>
</div>
)}
</div>
</PopoverAnchor>
<PopoverContent
className="w-[var(--radix-popover-trigger-width)] p-0"
align="start"
@@ -343,6 +519,10 @@ export function SmartFilterInput({
</Command>
</PopoverContent>
</Popover>
<Button variant="outline" size="sm" onClick={handleSearch}>
<IconSearch className="h-4 w-4" />
</Button>
</div>
</div>
)
}

View File

@@ -0,0 +1,4 @@
export { SearchPage } from "./search-page"
export { SearchResultCard } from "./search-result-card"
export { SearchPagination } from "./search-pagination"
export { SearchResultsTable } from "./search-results-table"

View File

@@ -0,0 +1,492 @@
"use client"
import { useState, useCallback, useMemo, useEffect } from "react"
import { useSearchParams } from "next/navigation"
import { motion, AnimatePresence } from "framer-motion"
import { Search, AlertCircle, History, X, Download } from "lucide-react"
import { useTranslations } from "next-intl"
import { toast } from "sonner"
import { SmartFilterInput, type FilterField } from "@/components/common/smart-filter-input"
import { SearchPagination } from "./search-pagination"
import { useAssetSearch } from "@/hooks/use-search"
import { VulnerabilityDetailDialog } from "@/components/vulnerabilities/vulnerability-detail-dialog"
import { VulnerabilityService } from "@/services/vulnerability.service"
import { SearchService } from "@/services/search.service"
import type { SearchParams, SearchState, Vulnerability as SearchVuln, AssetType } from "@/types/search.types"
import type { Vulnerability } from "@/types/vulnerability.types"
import { Alert, AlertDescription } from "@/components/ui/alert"
import { Button } from "@/components/ui/button"
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
import { SearchResultsTable } from "./search-results-table"
import { SearchResultCard } from "./search-result-card"
import { Badge } from "@/components/ui/badge"
import { cn } from "@/lib/utils"
// Website 搜索示例
const WEBSITE_SEARCH_EXAMPLES = [
'host="api"',
'title="Dashboard"',
'tech="nginx"',
'status=="200"',
'host="api" && status=="200"',
'tech="vue" || tech="react"',
'host="admin" && tech="php" && status=="200"',
'status!="404"',
]
// Endpoint 搜索示例
const ENDPOINT_SEARCH_EXAMPLES = [
'host="api"',
'url="/api/v1"',
'title="Dashboard"',
'tech="nginx"',
'status=="200"',
'host="api" && status=="200"',
'url="/admin" && status=="200"',
'tech="vue" || tech="react"',
]
// 快捷搜索标签
const QUICK_SEARCH_TAGS = [
{ label: 'status=="200"', query: 'status=="200"' },
{ label: 'tech="nginx"', query: 'tech="nginx"' },
{ label: 'tech="php"', query: 'tech="php"' },
{ label: 'tech="vue"', query: 'tech="vue"' },
{ label: 'tech="react"', query: 'tech="react"' },
{ label: 'status=="403"', query: 'status=="403"' },
]
// 最近搜索本地存储 key
const RECENT_SEARCHES_KEY = 'xingrin_recent_searches'
const MAX_RECENT_SEARCHES = 5
// 获取最近搜索记录
function getRecentSearches(): string[] {
if (typeof window === 'undefined') return []
try {
const saved = localStorage.getItem(RECENT_SEARCHES_KEY)
return saved ? JSON.parse(saved) : []
} catch {
return []
}
}
// 保存搜索记录
function saveRecentSearch(query: string) {
if (typeof window === 'undefined' || !query.trim()) return
try {
const searches = getRecentSearches().filter(s => s !== query)
searches.unshift(query)
localStorage.setItem(
RECENT_SEARCHES_KEY,
JSON.stringify(searches.slice(0, MAX_RECENT_SEARCHES))
)
} catch {
// ignore
}
}
// 删除搜索记录
function removeRecentSearch(query: string) {
if (typeof window === 'undefined') return
try {
const searches = getRecentSearches().filter(s => s !== query)
localStorage.setItem(RECENT_SEARCHES_KEY, JSON.stringify(searches))
} catch {
// ignore
}
}
export function SearchPage() {
const t = useTranslations('search')
const urlSearchParams = useSearchParams()
const [searchState, setSearchState] = useState<SearchState>("initial")
const [query, setQuery] = useState("")
const [assetType, setAssetType] = useState<AssetType>("website")
const [searchParams, setSearchParams] = useState<SearchParams>({})
const [page, setPage] = useState(1)
const [pageSize, setPageSize] = useState(10)
const [selectedVuln, setSelectedVuln] = useState<Vulnerability | null>(null)
const [vulnDialogOpen, setVulnDialogOpen] = useState(false)
const [, setLoadingVuln] = useState(false)
const [recentSearches, setRecentSearches] = useState<string[]>([])
const [initialQueryProcessed, setInitialQueryProcessed] = useState(false)
// 加载最近搜索记录
useEffect(() => {
setRecentSearches(getRecentSearches())
}, [])
// 处理 URL 参数中的搜索查询
useEffect(() => {
if (initialQueryProcessed) return
const q = urlSearchParams.get('q')
if (q) {
setQuery(q)
setSearchParams({ q, asset_type: assetType })
setSearchState("searching")
saveRecentSearch(q)
setRecentSearches(getRecentSearches())
}
setInitialQueryProcessed(true)
}, [urlSearchParams, assetType, initialQueryProcessed])
// 根据资产类型选择搜索示例
const searchExamples = useMemo(() => {
return assetType === 'endpoint' ? ENDPOINT_SEARCH_EXAMPLES : WEBSITE_SEARCH_EXAMPLES
}, [assetType])
// 搜索过滤字段配置
const SEARCH_FILTER_FIELDS: FilterField[] = [
{ key: "host", label: "Host", description: t('fields.host') },
{ key: "url", label: "URL", description: t('fields.url') },
{ key: "title", label: "Title", description: t('fields.title') },
{ key: "tech", label: "Tech", description: t('fields.tech') },
{ key: "status", label: "Status", description: t('fields.status') },
{ key: "body", label: "Body", description: t('fields.body') },
{ key: "header", label: "Header", description: t('fields.header') },
]
// 使用搜索 Hook
const { data, isLoading, error, isFetching } = useAssetSearch(
{ ...searchParams, page, pageSize },
{ enabled: searchState === "results" || searchState === "searching" }
)
const handleSearch = useCallback((_filters: unknown, rawQuery: string) => {
if (!rawQuery.trim()) return
setQuery(rawQuery)
setSearchParams({ q: rawQuery, asset_type: assetType })
setPage(1)
setSearchState("searching")
// 保存到最近搜索
saveRecentSearch(rawQuery)
setRecentSearches(getRecentSearches())
}, [assetType])
// 处理快捷标签点击
const handleQuickTagClick = useCallback((tagQuery: string) => {
setQuery(tagQuery)
}, [])
// 处理最近搜索点击
const handleRecentSearchClick = useCallback((recentQuery: string) => {
setQuery(recentQuery)
setSearchParams({ q: recentQuery, asset_type: assetType })
setPage(1)
setSearchState("searching")
saveRecentSearch(recentQuery)
setRecentSearches(getRecentSearches())
}, [assetType])
// 删除最近搜索
const handleRemoveRecentSearch = useCallback((e: React.MouseEvent, searchQuery: string) => {
e.stopPropagation()
removeRecentSearch(searchQuery)
setRecentSearches(getRecentSearches())
}, [])
// 导出状态
const [isExporting, setIsExporting] = useState(false)
// 导出 CSV调用后端 API 导出全部结果)
const handleExportCSV = useCallback(async () => {
if (!searchParams.q) return
setIsExporting(true)
try {
await SearchService.exportCSV(searchParams.q, assetType)
toast.success(t('exportSuccess'))
} catch (error) {
console.error('Export failed:', error)
toast.error(t('exportFailed'))
} finally {
setIsExporting(false)
}
}, [searchParams.q, assetType, t])
// 当数据加载完成时更新状态
if (searchState === "searching" && data && !isLoading) {
setSearchState("results")
}
const handleAssetTypeChange = useCallback((value: AssetType) => {
setAssetType(value)
// 清空搜索结果
if (searchState === "results") {
setSearchState("initial")
setSearchParams({})
setQuery("")
}
}, [searchState])
const handlePageChange = useCallback((newPage: number) => {
setPage(newPage)
}, [])
const handlePageSizeChange = useCallback((newPageSize: number) => {
setPageSize(newPageSize)
setPage(1)
}, [])
const handleViewVulnerability = useCallback(async (vuln: SearchVuln) => {
if (!vuln.id) return
setLoadingVuln(true)
try {
const fullVuln = await VulnerabilityService.getVulnerabilityById(vuln.id)
setSelectedVuln(fullVuln)
setVulnDialogOpen(true)
} catch {
toast.error(t('vulnLoadError'))
} finally {
setLoadingVuln(false)
}
}, [t])
// 资产类型选择器组件
const AssetTypeSelector = (
<Select value={assetType} onValueChange={handleAssetTypeChange}>
<SelectTrigger size="sm" className="w-[100px]">
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="website">{t('assetTypes.website')}</SelectItem>
<SelectItem value="endpoint">{t('assetTypes.endpoint')}</SelectItem>
</SelectContent>
</Select>
)
return (
<div className="flex-1 w-full flex flex-col">
<AnimatePresence mode="wait">
{searchState === "initial" && (
<motion.div
key="initial"
initial={{ opacity: 0, y: 20 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -50 }}
transition={{ duration: 0.3 }}
className="flex-1 flex flex-col items-center justify-center px-4 relative overflow-hidden"
>
{/* 背景装饰 */}
<div className="absolute inset-0 -z-10 overflow-hidden pointer-events-none">
<div className="absolute left-1/2 top-1/4 -translate-x-1/2 h-[400px] w-[600px] rounded-full bg-primary/5 blur-3xl" />
<div className="absolute right-1/4 top-1/2 h-[200px] w-[300px] rounded-full bg-primary/3 blur-2xl" />
</div>
<div className="flex flex-col items-center gap-6 w-full max-w-3xl -mt-16">
{/* 标题 */}
<div className="flex flex-col items-center gap-2">
<div className="flex items-center justify-center w-16 h-16 rounded-2xl bg-primary/10 mb-2">
<Search className="h-8 w-8 text-primary" />
</div>
<h1 className="text-3xl font-semibold text-foreground">
{t('title')}
</h1>
<p className="text-sm text-muted-foreground">
{t('hint')}
</p>
</div>
{/* 搜索框 */}
<div className="flex items-center gap-3 w-full">
{AssetTypeSelector}
<SmartFilterInput
fields={SEARCH_FILTER_FIELDS}
examples={searchExamples}
placeholder='host="api" && tech="nginx" && status=="200"'
value={query}
onSearch={handleSearch}
className="flex-1"
/>
</div>
{/* 快捷搜索标签 */}
<div className="flex flex-wrap justify-center gap-2">
{QUICK_SEARCH_TAGS.map((tag) => (
<Badge
key={tag.query}
variant="outline"
className="cursor-pointer hover:bg-accent transition-colors px-3 py-1"
onClick={() => handleQuickTagClick(tag.query)}
>
{tag.label}
</Badge>
))}
</div>
{/* 最近搜索 */}
{recentSearches.length > 0 && (
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
transition={{ delay: 0.3 }}
className="w-full max-w-xl mt-2"
>
<div className="flex items-center gap-2 text-xs text-muted-foreground mb-2">
<History className="h-3.5 w-3.5" />
<span>{t('recentSearches')}</span>
</div>
<div className="flex flex-wrap gap-2">
{recentSearches.map((search) => (
<Badge
key={search}
variant="secondary"
className={cn(
"cursor-pointer hover:bg-secondary/80 transition-colors",
"pl-3 pr-1.5 py-1 gap-1 group"
)}
onClick={() => handleRecentSearchClick(search)}
>
<span className="font-mono text-xs truncate max-w-[200px]">{search}</span>
<button
onClick={(e) => handleRemoveRecentSearch(e, search)}
className="ml-1 p-0.5 rounded hover:bg-muted-foreground/20 opacity-0 group-hover:opacity-100 transition-opacity"
>
<X className="h-3 w-3" />
</button>
</Badge>
))}
</div>
</motion.div>
)}
</div>
</motion.div>
)}
{searchState === "searching" && isLoading && (
<motion.div
key="searching"
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
exit={{ opacity: 0 }}
transition={{ duration: 0.2 }}
className="h-full flex flex-col items-center justify-center"
>
<div className="flex flex-col items-center gap-4">
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-primary" />
<span className="text-muted-foreground">{t('searching')}</span>
</div>
</motion.div>
)}
{(searchState === "results" || (searchState === "searching" && !isLoading)) && (
<motion.div
key="results"
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
transition={{ duration: 0.3 }}
className="h-full flex flex-col"
>
{/* 顶部搜索栏 */}
<motion.div
initial={{ y: -20, opacity: 0 }}
animate={{ y: 0, opacity: 1 }}
transition={{ duration: 0.3, delay: 0.1 }}
className="sticky top-0 z-10 bg-background/95 backdrop-blur supports-[backdrop-filter]:bg-background/60 border-b px-4 py-3"
>
<div className="flex items-center gap-3">
{AssetTypeSelector}
<SmartFilterInput
fields={SEARCH_FILTER_FIELDS}
examples={searchExamples}
placeholder='host="api" && tech="nginx" && status=="200"'
value={query}
onSearch={handleSearch}
className="flex-1"
/>
<span className="text-sm text-muted-foreground whitespace-nowrap">
{isFetching ? t('loading') : t('resultsCount', { count: data?.total ?? 0 })}
</span>
<Button
variant="outline"
size="sm"
onClick={handleExportCSV}
disabled={!data?.results || data.results.length === 0 || isExporting}
>
<Download className="h-4 w-4 mr-1.5" />
{isExporting ? t('exporting') : t('export')}
</Button>
</div>
</motion.div>
{/* 错误提示 */}
{error && (
<div className="p-4 w-full">
<Alert variant="destructive">
<AlertCircle className="h-4 w-4" />
<AlertDescription>
{t('error')}
</AlertDescription>
</Alert>
</div>
)}
{/* 空结果提示 */}
{!error && data?.results.length === 0 && (
<div className="flex-1 flex flex-col items-center justify-center p-4">
<div className="text-center">
<Search className="h-12 w-12 text-muted-foreground mx-auto mb-4" />
<h3 className="text-lg font-medium mb-2">{t('noResults')}</h3>
<p className="text-sm text-muted-foreground">
{t('noResultsHint')}
</p>
</div>
</div>
)}
{/* 搜索结果 */}
{!error && data && data.results.length > 0 && (
<>
<div className="flex-1 overflow-auto p-4">
{assetType === 'website' ? (
// Website 使用卡片样式
<div className="space-y-4 max-w-4xl mx-auto">
{data.results.map((result) => (
<SearchResultCard
key={result.id}
result={result}
onViewVulnerability={handleViewVulnerability}
/>
))}
</div>
) : (
// Endpoint 使用表格样式
<SearchResultsTable
results={data.results}
assetType={assetType}
onViewVulnerability={handleViewVulnerability}
/>
)}
</div>
{/* 分页控制 */}
<div className="border-t px-4 py-3">
<SearchPagination
page={page}
pageSize={pageSize}
total={data.total}
totalPages={data.totalPages}
onPageChange={handlePageChange}
onPageSizeChange={handlePageSizeChange}
/>
</div>
</>
)}
</motion.div>
)}
</AnimatePresence>
{/* 漏洞详情弹窗 - 复用现有组件 */}
<VulnerabilityDetailDialog
vulnerability={selectedVuln}
open={vulnDialogOpen}
onOpenChange={setVulnDialogOpen}
/>
</div>
)
}

View File

@@ -0,0 +1,148 @@
"use client"
import * as React from "react"
import {
IconChevronLeft,
IconChevronRight,
IconChevronsLeft,
IconChevronsRight,
} from "@tabler/icons-react"
import { useTranslations } from 'next-intl'
import { Button } from "@/components/ui/button"
import { Label } from "@/components/ui/label"
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select"
interface SearchPaginationProps {
page: number
pageSize: number
total: number
totalPages: number
onPageChange: (page: number) => void
onPageSizeChange: (pageSize: number) => void
pageSizeOptions?: number[]
}
const DEFAULT_PAGE_SIZE_OPTIONS = [10, 20, 50, 100]
/**
* 搜索结果分页组件
*/
export function SearchPagination({
page,
pageSize,
total,
totalPages,
onPageChange,
onPageSizeChange,
pageSizeOptions = DEFAULT_PAGE_SIZE_OPTIONS,
}: SearchPaginationProps) {
const t = useTranslations('common.pagination')
const handlePageSizeChange = React.useCallback((value: string) => {
onPageSizeChange(Number(value))
}, [onPageSizeChange])
const handleFirstPage = React.useCallback(() => {
onPageChange(1)
}, [onPageChange])
const handlePreviousPage = React.useCallback(() => {
onPageChange(Math.max(1, page - 1))
}, [onPageChange, page])
const handleNextPage = React.useCallback(() => {
onPageChange(Math.min(totalPages, page + 1))
}, [onPageChange, page, totalPages])
const handleLastPage = React.useCallback(() => {
onPageChange(totalPages)
}, [onPageChange, totalPages])
const canPreviousPage = page > 1
const canNextPage = page < totalPages
return (
<div className="flex items-center justify-between">
{/* 总数信息 */}
<div className="flex-1 text-sm text-muted-foreground">
{t('total', { count: total })}
</div>
{/* 分页控制 */}
<div className="flex items-center space-x-6 lg:space-x-8">
{/* 每页条数选择 */}
<div className="flex items-center space-x-2">
<Label htmlFor="rows-per-page" className="text-sm font-medium">
{t('rowsPerPage')}
</Label>
<Select
value={`${pageSize}`}
onValueChange={handlePageSizeChange}
>
<SelectTrigger className="h-8 w-[90px]" id="rows-per-page">
<SelectValue placeholder={pageSize} />
</SelectTrigger>
<SelectContent side="top">
{pageSizeOptions.map((size) => (
<SelectItem key={size} value={`${size}`}>
{size}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
{/* 页码信息 */}
<div className="flex items-center justify-center text-sm font-medium whitespace-nowrap">
{t('page', { current: page, total: totalPages || 1 })}
</div>
{/* 分页按钮 */}
<div className="flex items-center space-x-2">
<Button
variant="outline"
className="hidden h-8 w-8 p-0 lg:flex"
onClick={handleFirstPage}
disabled={!canPreviousPage}
>
<span className="sr-only">{t('first')}</span>
<IconChevronsLeft className="h-4 w-4" />
</Button>
<Button
variant="outline"
className="h-8 w-8 p-0"
onClick={handlePreviousPage}
disabled={!canPreviousPage}
>
<span className="sr-only">{t('previous')}</span>
<IconChevronLeft className="h-4 w-4" />
</Button>
<Button
variant="outline"
className="h-8 w-8 p-0"
onClick={handleNextPage}
disabled={!canNextPage}
>
<span className="sr-only">{t('next')}</span>
<IconChevronRight className="h-4 w-4" />
</Button>
<Button
variant="outline"
className="hidden h-8 w-8 p-0 lg:flex"
onClick={handleLastPage}
disabled={!canNextPage}
>
<span className="sr-only">{t('last')}</span>
<IconChevronsRight className="h-4 w-4" />
</Button>
</div>
</div>
</div>
)
}

View File

@@ -0,0 +1,317 @@
"use client"
import { useState, useRef, useEffect } from "react"
import { ChevronDown, ChevronUp, Eye } from "lucide-react"
import { useTranslations } from "next-intl"
import { Badge } from "@/components/ui/badge"
import { Button } from "@/components/ui/button"
import { Card, CardContent } from "@/components/ui/card"
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"
import {
Collapsible,
CollapsibleContent,
CollapsibleTrigger,
} from "@/components/ui/collapsible"
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "@/components/ui/table"
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip"
import type { SearchResult, Vulnerability, WebsiteSearchResult } from "@/types/search.types"
// 类型守卫:检查是否为 WebsiteSearchResult
function isWebsiteResult(result: SearchResult): result is WebsiteSearchResult {
return 'vulnerabilities' in result
}
interface SearchResultCardProps {
result: SearchResult
onViewVulnerability?: (vuln: Vulnerability) => void
}
// 漏洞严重程度颜色配置
const severityColors: Record<string, string> = {
critical: "bg-[#da3633]/10 text-[#da3633] border border-[#da3633]/20 dark:text-[#f85149]",
high: "bg-[#d29922]/10 text-[#d29922] border border-[#d29922]/20",
medium: "bg-[#d4a72c]/10 text-[#d4a72c] border border-[#d4a72c]/20",
low: "bg-[#238636]/10 text-[#238636] border border-[#238636]/20 dark:text-[#3fb950]",
info: "bg-[#848d97]/10 text-[#848d97] border border-[#848d97]/20",
}
// 状态码 Badge variant
function getStatusVariant(status: number | null): "default" | "secondary" | "destructive" | "outline" {
if (!status) return "outline"
if (status >= 200 && status < 300) return "default"
if (status >= 300 && status < 400) return "secondary"
if (status >= 400) return "destructive"
return "outline"
}
export function SearchResultCard({ result, onViewVulnerability }: SearchResultCardProps) {
const t = useTranslations('search.card')
const [vulnOpen, setVulnOpen] = useState(false)
const [techExpanded, setTechExpanded] = useState(false)
const [isOverflowing, setIsOverflowing] = useState(false)
const containerRef = useRef<HTMLDivElement>(null)
const formatHeaders = (headers: Record<string, string>) => {
return Object.entries(headers)
.map(([key, value]) => `${key}: ${value}`)
.join("\n")
}
// 格式化字节数
const formatBytes = (bytes: number | null) => {
if (bytes === null || bytes === undefined) return null
if (bytes < 1024) return `${bytes} B`
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`
return `${(bytes / (1024 * 1024)).toFixed(1)} MB`
}
// 检测内容是否溢出
const maxHeight = 26 * 4
useEffect(() => {
const el = containerRef.current
if (!el || techExpanded) return
const checkOverflow = () => {
setIsOverflowing(el.scrollHeight > maxHeight)
}
checkOverflow()
const resizeObserver = new ResizeObserver(checkOverflow)
resizeObserver.observe(el)
return () => resizeObserver.disconnect()
}, [result.technologies, techExpanded, maxHeight])
const handleViewVulnerability = (vuln: Vulnerability) => {
if (onViewVulnerability) {
onViewVulnerability(vuln)
}
}
return (
<Card className="overflow-hidden py-0 gap-0">
<CardContent className="p-0">
{/* 顶部 URL + Badge 行 */}
<div className="px-4 py-2 bg-muted/30 border-b space-y-2">
<h3 className="font-mono text-sm break-all">
{result.url || result.host}
</h3>
{/* Badge 行 */}
<div className="flex flex-wrap items-center gap-2">
<Badge variant={getStatusVariant(result.statusCode)} className="font-mono text-xs">
{result.statusCode ?? '-'}
</Badge>
{result.webserver && (
<Badge variant="outline" className="font-mono text-xs">
{result.webserver}
</Badge>
)}
{result.contentType && (
<Badge variant="outline" className="font-mono text-xs">
{result.contentType.split(';')[0]}
</Badge>
)}
{formatBytes(result.contentLength) && (
<Badge variant="outline" className="font-mono text-xs">
{formatBytes(result.contentLength)}
</Badge>
)}
</div>
</div>
{/* 中间左右分栏 */}
<div className="flex flex-col md:flex-row">
{/* 左侧信息区 */}
<div className="w-full md:w-[320px] md:shrink-0 px-4 py-3 border-b md:border-b-0 md:border-r flex flex-col">
<div className="space-y-1.5 text-sm">
<div className="flex items-baseline">
<span className="text-muted-foreground w-12 shrink-0">Title</span>
<span className="truncate" title={result.title}>{result.title || '-'}</span>
</div>
<div className="flex items-baseline">
<span className="text-muted-foreground w-12 shrink-0">Host</span>
<span className="font-mono truncate" title={result.host}>{result.host || '-'}</span>
</div>
</div>
{/* Technologies */}
{result.technologies && result.technologies.length > 0 && (
<div className="mt-3 flex flex-col gap-1">
<div
ref={containerRef}
className="flex flex-wrap items-start gap-1 overflow-hidden transition-all duration-200"
style={{ maxHeight: techExpanded ? "none" : `${maxHeight}px` }}
>
{result.technologies.map((tech, index) => (
<Badge
key={`${tech}-${index}`}
variant="secondary"
className="text-xs"
>
{tech}
</Badge>
))}
</div>
{(isOverflowing || techExpanded) && (
<button
onClick={() => setTechExpanded(!techExpanded)}
className="inline-flex items-center gap-0.5 text-xs text-muted-foreground hover:text-foreground transition-colors self-start"
>
{techExpanded ? (
<>
<ChevronUp className="h-3 w-3" />
<span>{t('collapse')}</span>
</>
) : (
<>
<ChevronDown className="h-3 w-3" />
<span>{t('expand')}</span>
</>
)}
</button>
)}
</div>
)}
</div>
{/* 右侧 Tab 区 */}
<div className="w-full md:flex-1 flex flex-col">
<Tabs defaultValue="header" className="w-full h-full flex flex-col gap-0">
<TabsList className="h-[28px] gap-4 rounded-none border-b bg-transparent px-4 pt-1">
<TabsTrigger
value="header"
className="h-full rounded-none border-b-2 border-transparent border-x-0 border-t-0 bg-transparent px-1 text-sm shadow-none focus-visible:ring-0 focus-visible:outline-none data-[state=active]:border-b-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
Header
</TabsTrigger>
<TabsTrigger
value="body"
className="h-full rounded-none border-b-2 border-transparent border-x-0 border-t-0 bg-transparent px-1 text-sm shadow-none focus-visible:ring-0 focus-visible:outline-none data-[state=active]:border-b-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
Body
</TabsTrigger>
{result.location && (
<TabsTrigger
value="location"
className="h-full rounded-none border-b-2 border-transparent border-x-0 border-t-0 bg-transparent px-1 text-sm shadow-none focus-visible:ring-0 focus-visible:outline-none data-[state=active]:border-b-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
Location
</TabsTrigger>
)}
</TabsList>
<TabsContent value="header" className="flex-1 overflow-auto bg-muted/30 px-4 py-2 max-h-[200px]">
<pre className="text-xs font-mono whitespace-pre-wrap">
{result.responseHeaders ? formatHeaders(result.responseHeaders) : '-'}
</pre>
</TabsContent>
<TabsContent value="body" className="flex-1 overflow-auto bg-muted/30 px-4 py-2 max-h-[200px]">
<pre className="text-xs font-mono whitespace-pre-wrap">
{result.responseBody || '-'}
</pre>
</TabsContent>
{result.location && (
<TabsContent value="location" className="flex-1 overflow-auto bg-muted/30 px-4 py-2 max-h-[200px]">
<pre className="text-xs font-mono whitespace-pre-wrap">
{result.location}
</pre>
</TabsContent>
)}
</Tabs>
</div>
</div>
{/* 底部漏洞区 - 仅 Website 类型显示 */}
{isWebsiteResult(result) && result.vulnerabilities && result.vulnerabilities.length > 0 && (
<div className="border-t">
<Collapsible open={vulnOpen} onOpenChange={setVulnOpen}>
<CollapsibleTrigger className="flex items-center gap-1 px-4 py-2 text-sm text-muted-foreground hover:text-foreground transition-colors w-full">
{vulnOpen ? (
<ChevronDown className="size-4" />
) : (
<ChevronUp className="size-4 rotate-90" />
)}
<span>{t('vulnerabilities', { count: result.vulnerabilities.length })}</span>
</CollapsibleTrigger>
<CollapsibleContent>
<div className="px-4 pb-4">
<Table className="table-fixed">
<TableHeader>
<TableRow>
<TableHead className="text-xs w-[50%]">{t('vulnName')}</TableHead>
<TableHead className="text-xs w-[20%]">{t('vulnType')}</TableHead>
<TableHead className="text-xs w-[20%]">{t('severity')}</TableHead>
<TableHead className="text-xs w-[10%]"></TableHead>
</TableRow>
</TableHeader>
<TableBody>
{result.vulnerabilities.map((vuln, index) => (
<TableRow key={`${vuln.name}-${index}`}>
<TableCell className="text-xs font-medium">
<Tooltip>
<TooltipTrigger asChild>
<span className="truncate block max-w-full cursor-default">
{vuln.name}
</span>
</TooltipTrigger>
<TooltipContent side="top" className="max-w-[400px]">
{vuln.name}
</TooltipContent>
</Tooltip>
</TableCell>
<TableCell className="text-xs">
<Tooltip>
<TooltipTrigger asChild>
<span className="truncate block max-w-full cursor-default">
{vuln.vulnType}
</span>
</TooltipTrigger>
<TooltipContent side="top">
{vuln.vulnType}
</TooltipContent>
</Tooltip>
</TableCell>
<TableCell>
<Badge
variant="outline"
className={`text-xs ${severityColors[vuln.severity] || severityColors.info}`}
>
{vuln.severity}
</Badge>
</TableCell>
<TableCell className="text-right">
<Button
variant="ghost"
size="sm"
className="h-7 px-2"
onClick={() => handleViewVulnerability(vuln)}
>
<Eye className="h-3.5 w-3.5" />
</Button>
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</div>
</CollapsibleContent>
</Collapsible>
</div>
)}
</CardContent>
</Card>
)
}

View File

@@ -0,0 +1,285 @@
"use client"
import { useMemo } from "react"
import { useFormatter } from "next-intl"
import type { ColumnDef } from "@tanstack/react-table"
import { Badge } from "@/components/ui/badge"
import { DataTableColumnHeader, UnifiedDataTable } from "@/components/ui/data-table"
import { ExpandableCell, ExpandableTagList } from "@/components/ui/data-table/expandable-cell"
import type { SearchResult, AssetType, Vulnerability, EndpointSearchResult } from "@/types/search.types"
interface SearchResultsTableProps {
results: SearchResult[]
assetType: AssetType
onViewVulnerability?: (vuln: Vulnerability) => void
}
export function SearchResultsTable({ results, assetType }: SearchResultsTableProps) {
const format = useFormatter()
const formatDate = (dateString: string) => {
return format.dateTime(new Date(dateString), {
year: 'numeric',
month: '2-digit',
day: '2-digit',
hour: '2-digit',
minute: '2-digit',
})
}
// 基础列定义Website 和 Endpoint 共用)
const baseColumns: ColumnDef<SearchResult, unknown>[] = useMemo(() => [
{
id: "url",
accessorKey: "url",
meta: { title: "URL" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="URL" />
),
size: 350,
minSize: 200,
maxSize: 600,
cell: ({ row }) => (
<ExpandableCell value={row.getValue("url")} />
),
},
{
id: "host",
accessorKey: "host",
meta: { title: "Host" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Host" />
),
size: 180,
minSize: 100,
maxSize: 250,
cell: ({ row }) => (
<ExpandableCell value={row.getValue("host")} />
),
},
{
id: "title",
accessorKey: "title",
meta: { title: "Title" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Title" />
),
size: 150,
minSize: 100,
maxSize: 300,
cell: ({ row }) => (
<ExpandableCell value={row.getValue("title")} />
),
},
{
id: "statusCode",
accessorKey: "statusCode",
meta: { title: "Status" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Status" />
),
size: 80,
minSize: 60,
maxSize: 100,
cell: ({ row }) => {
const statusCode = row.getValue("statusCode") as number | null
if (!statusCode) return <span className="text-muted-foreground">-</span>
let variant: "default" | "secondary" | "destructive" | "outline" = "outline"
if (statusCode >= 200 && statusCode < 300) {
variant = "outline"
} else if (statusCode >= 300 && statusCode < 400) {
variant = "secondary"
} else if (statusCode >= 400 && statusCode < 500) {
variant = "default"
} else if (statusCode >= 500) {
variant = "destructive"
}
return <Badge variant={variant} className="font-mono">{statusCode}</Badge>
},
},
{
id: "technologies",
accessorKey: "technologies",
meta: { title: "Tech" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Tech" />
),
size: 180,
minSize: 120,
cell: ({ row }) => {
const tech = row.getValue("technologies") as string[] | null
if (!tech || tech.length === 0) return <span className="text-muted-foreground">-</span>
return <ExpandableTagList items={tech} maxLines={2} variant="outline" />
},
},
{
id: "contentLength",
accessorKey: "contentLength",
meta: { title: "Length" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Length" />
),
size: 100,
minSize: 80,
maxSize: 150,
cell: ({ row }) => {
const len = row.getValue("contentLength") as number | null
if (len === null || len === undefined) return <span className="text-muted-foreground">-</span>
return <span className="font-mono tabular-nums">{new Intl.NumberFormat().format(len)}</span>
},
},
{
id: "location",
accessorKey: "location",
meta: { title: "Location" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Location" />
),
size: 150,
minSize: 100,
maxSize: 300,
cell: ({ row }) => (
<ExpandableCell value={row.getValue("location")} />
),
},
{
id: "webserver",
accessorKey: "webserver",
meta: { title: "Server" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Server" />
),
size: 120,
minSize: 80,
maxSize: 200,
cell: ({ row }) => (
<ExpandableCell value={row.getValue("webserver")} />
),
},
{
id: "contentType",
accessorKey: "contentType",
meta: { title: "Type" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Type" />
),
size: 120,
minSize: 80,
maxSize: 200,
cell: ({ row }) => (
<ExpandableCell value={row.getValue("contentType")} />
),
},
{
id: "responseBody",
accessorKey: "responseBody",
meta: { title: "Body" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Body" />
),
size: 300,
minSize: 200,
cell: ({ row }) => (
<ExpandableCell value={row.getValue("responseBody")} maxLines={3} />
),
},
{
id: "responseHeaders",
accessorKey: "responseHeaders",
meta: { title: "Headers" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Headers" />
),
size: 250,
minSize: 150,
maxSize: 400,
cell: ({ row }) => {
const headers = row.getValue("responseHeaders") as Record<string, string> | null
if (!headers || Object.keys(headers).length === 0) {
return <span className="text-muted-foreground">-</span>
}
const headersStr = Object.entries(headers)
.map(([k, v]) => `${k}: ${v}`)
.join('\n')
return <ExpandableCell value={headersStr} maxLines={3} />
},
},
{
id: "vhost",
accessorKey: "vhost",
meta: { title: "VHost" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="VHost" />
),
size: 80,
minSize: 60,
maxSize: 100,
cell: ({ row }) => {
const vhost = row.getValue("vhost") as boolean | null
if (vhost === null || vhost === undefined) return <span className="text-muted-foreground">-</span>
return <span className="font-mono text-sm">{vhost ? "true" : "false"}</span>
},
},
{
id: "createdAt",
accessorKey: "createdAt",
meta: { title: "Created" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Created" />
),
size: 150,
minSize: 120,
maxSize: 200,
cell: ({ row }) => {
const createdAt = row.getValue("createdAt") as string | null
if (!createdAt) return <span className="text-muted-foreground">-</span>
return <span className="text-sm">{formatDate(createdAt)}</span>
},
},
], [formatDate])
// Endpoint 特有列
const endpointColumns: ColumnDef<SearchResult, unknown>[] = useMemo(() => [
{
id: "matchedGfPatterns",
accessorKey: "matchedGfPatterns",
meta: { title: "GF Patterns" },
header: ({ column }) => (
<DataTableColumnHeader column={column} title="GF Patterns" />
),
size: 150,
minSize: 100,
maxSize: 250,
cell: ({ row }) => {
const patterns = (row.original as EndpointSearchResult).matchedGfPatterns
if (!patterns || patterns.length === 0) return <span className="text-muted-foreground">-</span>
return <ExpandableTagList items={patterns} maxLines={2} variant="secondary" />
},
},
], [])
// 根据资产类型组合列
const columns = useMemo(() => {
if (assetType === 'endpoint') {
// 在 technologies 后面插入 gfPatterns
const techIndex = baseColumns.findIndex(col => col.id === 'technologies')
const cols = [...baseColumns]
cols.splice(techIndex + 1, 0, ...endpointColumns)
return cols
}
return baseColumns
}, [assetType, baseColumns, endpointColumns])
return (
<UnifiedDataTable
columns={columns}
data={results}
getRowId={(row) => String(row.id)}
hideToolbar
hidePagination
enableRowSelection={false}
/>
)
}

View File

@@ -0,0 +1,26 @@
import { useQuery, keepPreviousData } from '@tanstack/react-query'
import { SearchService } from '@/services/search.service'
import type { SearchParams, SearchResponse } from '@/types/search.types'
/**
* 资产搜索 Hook
*
* @param params 搜索参数
* @param options 查询选项
* @returns 搜索结果
*/
export function useAssetSearch(
params: SearchParams,
options?: { enabled?: boolean }
) {
// 检查是否有有效的搜索查询
const hasSearchParams = !!(params.q && params.q.trim())
return useQuery<SearchResponse>({
queryKey: ['asset-search', params],
queryFn: () => SearchService.search(params),
enabled: (options?.enabled ?? true) && hasSearchParams,
placeholderData: keepPreviousData,
staleTime: 30000, // 30 秒内不重新请求
})
}

View File

@@ -296,6 +296,7 @@
"navigation": {
"mainFeatures": "Main Features",
"dashboard": "Dashboard",
"search": "Search",
"organization": "Organization",
"target": "Targets",
"vulnerabilities": "Vulnerabilities",
@@ -314,6 +315,71 @@
"help": "Get Help",
"feedback": "Feedback"
},
"search": {
"title": "Asset Search",
"hint": "Click search box to view available fields and syntax. Plain text defaults to hostname search",
"searching": "Searching...",
"loading": "Loading...",
"resultsCount": "Found {count} results",
"error": "Search failed, please try again later",
"noResults": "No matching assets found",
"noResultsHint": "Try adjusting your search criteria",
"vulnLoadError": "Failed to load vulnerability details",
"recentSearches": "Recent Searches",
"export": "Export",
"exporting": "Exporting...",
"exportSuccess": "Export successful",
"exportFailed": "Export failed",
"stats": {
"vulnerabilities": "Vulnerabilities"
},
"assetTypes": {
"website": "Website",
"endpoint": "Endpoint"
},
"fields": {
"host": "Hostname",
"url": "URL address",
"title": "Page title",
"tech": "Technology stack",
"status": "HTTP status code",
"body": "Response body content",
"header": "Response header content"
},
"table": {
"url": "URL",
"host": "Host",
"title": "Title",
"status": "Status",
"technologies": "Technologies",
"contentLength": "Content Length",
"location": "Location",
"webserver": "Web Server",
"contentType": "Content Type",
"responseBody": "Response Body",
"responseHeaders": "Response Headers",
"vhost": "VHost",
"createdAt": "Created At",
"gfPatterns": "GF Patterns"
},
"card": {
"title": "Title",
"expand": "Expand",
"collapse": "Collapse",
"vulnerabilities": "Vulnerabilities ({count})",
"vulnName": "Vulnerability Name",
"severity": "Severity",
"source": "Source",
"vulnType": "Vuln Type"
},
"vulnDetail": {
"title": "Vulnerability Details",
"name": "Vulnerability Name",
"source": "Source",
"type": "Vulnerability Type",
"url": "Vulnerability URL"
}
},
"dashboard": {
"title": "Dashboard",
"stats": {
@@ -715,6 +781,7 @@
"organizationMode": "Organization Scan",
"organizationModeHint": "In organization scan mode, all targets under this organization will be dynamically fetched at execution",
"noAvailableTarget": "No available targets",
"noEngine": "No engines available",
"selected": "Selected",
"selectedEngines": "{count} engines selected"
},
@@ -1908,6 +1975,16 @@
"formatInvalid": "Invalid format"
}
},
"globalSearch": {
"search": "Search",
"placeholder": "Search assets... (host=\"api\" && tech=\"nginx\")",
"noResults": "No results found",
"searchFor": "Search for",
"recent": "Recent Searches",
"quickSearch": "Quick Search",
"hint": "Supports FOFA-style syntax",
"toSearch": "to search"
},
"errors": {
"unknown": "Operation failed, please try again later",
"validation": "Invalid input data",

View File

@@ -296,6 +296,7 @@
"navigation": {
"mainFeatures": "主要功能",
"dashboard": "仪表盘",
"search": "搜索",
"organization": "组织",
"target": "目标",
"vulnerabilities": "漏洞",
@@ -314,6 +315,71 @@
"help": "获取帮助",
"feedback": "反馈建议"
},
"search": {
"title": "资产搜索",
"hint": "点击搜索框查看可用字段和语法,直接输入文本默认搜索主机名",
"searching": "搜索中...",
"loading": "加载中...",
"resultsCount": "找到 {count} 条结果",
"error": "搜索失败,请稍后重试",
"noResults": "未找到匹配的资产",
"noResultsHint": "请尝试调整搜索条件",
"vulnLoadError": "加载漏洞详情失败",
"recentSearches": "最近搜索",
"export": "导出",
"exporting": "导出中...",
"exportSuccess": "导出成功",
"exportFailed": "导出失败",
"stats": {
"vulnerabilities": "漏洞"
},
"assetTypes": {
"website": "网站",
"endpoint": "URL"
},
"fields": {
"host": "主机名",
"url": "URL 地址",
"title": "页面标题",
"tech": "技术栈",
"status": "HTTP 状态码",
"body": "响应体内容",
"header": "响应头内容"
},
"table": {
"url": "URL",
"host": "主机名",
"title": "标题",
"status": "状态码",
"technologies": "技术栈",
"contentLength": "内容长度",
"location": "跳转地址",
"webserver": "Web 服务器",
"contentType": "内容类型",
"responseBody": "响应体",
"responseHeaders": "响应头",
"vhost": "VHost",
"createdAt": "创建时间",
"gfPatterns": "GF 模式"
},
"card": {
"title": "标题",
"expand": "展开",
"collapse": "收起",
"vulnerabilities": "关联漏洞 ({count})",
"vulnName": "漏洞名称",
"severity": "严重程度",
"source": "来源",
"vulnType": "漏洞类型"
},
"vulnDetail": {
"title": "漏洞详情",
"name": "漏洞名称",
"source": "来源",
"type": "漏洞类型",
"url": "漏洞 URL"
}
},
"dashboard": {
"title": "仪表盘",
"stats": {
@@ -715,6 +781,7 @@
"organizationMode": "组织扫描",
"organizationModeHint": "组织扫描模式下,执行时将动态获取该组织下所有目标",
"noAvailableTarget": "暂无可用目标",
"noEngine": "暂无可用引擎",
"selected": "已选择",
"selectedEngines": "已选择 {count} 个引擎"
},
@@ -1908,6 +1975,16 @@
"formatInvalid": "格式无效"
}
},
"globalSearch": {
"search": "搜索",
"placeholder": "搜索资产... (host=\"api\" && tech=\"nginx\")",
"noResults": "未找到结果",
"searchFor": "搜索",
"recent": "最近搜索",
"quickSearch": "快捷搜索",
"hint": "支持 FOFA 风格语法",
"toSearch": "搜索"
},
"errors": {
"unknown": "操作失败,请稍后重试",
"validation": "输入数据无效",

23
frontend/mock/config.ts Normal file
View File

@@ -0,0 +1,23 @@
/**
* Mock 数据配置
*
* 使用方式:
* 1. 在 .env.local 中设置 NEXT_PUBLIC_USE_MOCK=true 启用 mock 数据
* 2. 或者直接修改下面的 FORCE_MOCK 为 true
*/
// 强制使用 mock 数据(一般保持 false通过环境变量控制
const FORCE_MOCK = false
// 从环境变量读取 mock 配置
export const USE_MOCK = FORCE_MOCK || process.env.NEXT_PUBLIC_USE_MOCK === 'true'
// Mock 数据延迟(模拟网络请求)
export const MOCK_DELAY = 300 // ms
/**
* 模拟网络延迟
*/
export function mockDelay(ms: number = MOCK_DELAY): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms))
}

View File

@@ -0,0 +1,22 @@
import type { User, MeResponse, LoginResponse, LogoutResponse } from '@/types/auth.types'
export const mockUser: User = {
id: 1,
username: 'admin',
isStaff: true,
isSuperuser: true,
}
export const mockMeResponse: MeResponse = {
authenticated: true,
user: mockUser,
}
export const mockLoginResponse: LoginResponse = {
message: 'Login successful',
user: mockUser,
}
export const mockLogoutResponse: LogoutResponse = {
message: 'Logout successful',
}

View File

@@ -0,0 +1,71 @@
import type { AssetStatistics, StatisticsHistoryItem, DashboardStats } from '@/types/dashboard.types'
export const mockDashboardStats: DashboardStats = {
totalTargets: 156,
totalSubdomains: 4823,
totalEndpoints: 12456,
totalVulnerabilities: 89,
}
export const mockAssetStatistics: AssetStatistics = {
totalTargets: 156,
totalSubdomains: 4823,
totalIps: 892,
totalEndpoints: 12456,
totalWebsites: 3421,
totalVulns: 89,
totalAssets: 21638,
runningScans: 3,
updatedAt: new Date().toISOString(),
// 变化值
changeTargets: 12,
changeSubdomains: 234,
changeIps: 45,
changeEndpoints: 567,
changeWebsites: 89,
changeVulns: -5,
changeAssets: 942,
// 漏洞严重程度分布
vulnBySeverity: {
critical: 3,
high: 12,
medium: 28,
low: 34,
info: 12,
},
}
// 生成过去 N 天的历史数据
function generateHistoryData(days: number): StatisticsHistoryItem[] {
const data: StatisticsHistoryItem[] = []
const now = new Date()
for (let i = days - 1; i >= 0; i--) {
const date = new Date(now)
date.setDate(date.getDate() - i)
// 模拟逐渐增长的趋势
const factor = 1 + (days - i) * 0.02
data.push({
date: date.toISOString().split('T')[0],
totalTargets: Math.floor(140 * factor),
totalSubdomains: Math.floor(4200 * factor),
totalIps: Math.floor(780 * factor),
totalEndpoints: Math.floor(10800 * factor),
totalWebsites: Math.floor(2980 * factor),
totalVulns: Math.floor(75 * factor),
totalAssets: Math.floor(18900 * factor),
})
}
return data
}
export const mockStatisticsHistory7Days = generateHistoryData(7)
export const mockStatisticsHistory30Days = generateHistoryData(30)
export function getMockStatisticsHistory(days: number): StatisticsHistoryItem[] {
if (days <= 7) return mockStatisticsHistory7Days
return generateHistoryData(days)
}

View File

@@ -0,0 +1,257 @@
import type { Endpoint, GetEndpointsResponse } from '@/types/endpoint.types'
export const mockEndpoints: Endpoint[] = [
{
id: 1,
url: 'https://acme.com/',
method: 'GET',
statusCode: 200,
title: 'Acme Corporation - Home',
contentLength: 45678,
contentType: 'text/html; charset=utf-8',
responseTime: 0.234,
host: 'acme.com',
webserver: 'nginx/1.24.0',
tech: ['React', 'Next.js', 'Node.js'],
createdAt: '2024-12-28T10:00:00Z',
},
{
id: 2,
url: 'https://acme.com/login',
method: 'GET',
statusCode: 200,
title: 'Login - Acme',
contentLength: 12345,
contentType: 'text/html; charset=utf-8',
responseTime: 0.156,
host: 'acme.com',
webserver: 'nginx/1.24.0',
tech: ['React', 'Next.js'],
createdAt: '2024-12-28T10:01:00Z',
},
{
id: 3,
url: 'https://api.acme.com/v1/users',
method: 'GET',
statusCode: 200,
title: '',
contentLength: 8923,
contentType: 'application/json',
responseTime: 0.089,
host: 'api.acme.com',
webserver: 'nginx/1.24.0',
tech: ['Django', 'Python'],
gfPatterns: ['api', 'json'],
createdAt: '2024-12-28T10:02:00Z',
},
{
id: 4,
url: 'https://api.acme.com/v1/products',
method: 'GET',
statusCode: 200,
title: '',
contentLength: 23456,
contentType: 'application/json',
responseTime: 0.145,
host: 'api.acme.com',
webserver: 'nginx/1.24.0',
tech: ['Django', 'Python'],
gfPatterns: ['api', 'json'],
createdAt: '2024-12-28T10:03:00Z',
},
{
id: 5,
url: 'https://acme.io/docs',
method: 'GET',
statusCode: 200,
title: 'Documentation - Acme.io',
contentLength: 67890,
contentType: 'text/html; charset=utf-8',
responseTime: 0.312,
host: 'acme.io',
webserver: 'cloudflare',
tech: ['Vue.js', 'Vitepress'],
createdAt: '2024-12-27T14:30:00Z',
},
{
id: 6,
url: 'https://acme.io/api/config',
method: 'GET',
statusCode: 200,
title: '',
contentLength: 1234,
contentType: 'application/json',
responseTime: 0.067,
host: 'acme.io',
webserver: 'cloudflare',
tech: ['Node.js', 'Express'],
gfPatterns: ['config', 'json'],
createdAt: '2024-12-27T14:31:00Z',
},
{
id: 7,
url: 'https://techstart.io/',
method: 'GET',
statusCode: 200,
title: 'TechStart - Innovation Hub',
contentLength: 34567,
contentType: 'text/html; charset=utf-8',
responseTime: 0.278,
host: 'techstart.io',
webserver: 'Apache/2.4.54',
tech: ['WordPress', 'PHP'],
createdAt: '2024-12-26T08:45:00Z',
},
{
id: 8,
url: 'https://techstart.io/admin',
method: 'GET',
statusCode: 302,
title: '',
contentLength: 0,
contentType: 'text/html',
responseTime: 0.045,
location: 'https://techstart.io/admin/login',
host: 'techstart.io',
webserver: 'Apache/2.4.54',
tech: ['WordPress', 'PHP'],
createdAt: '2024-12-26T08:46:00Z',
},
{
id: 9,
url: 'https://globalfinance.com/',
method: 'GET',
statusCode: 200,
title: 'Global Finance - Your Financial Partner',
contentLength: 56789,
contentType: 'text/html; charset=utf-8',
responseTime: 0.456,
host: 'globalfinance.com',
webserver: 'Microsoft-IIS/10.0',
tech: ['ASP.NET', 'C#', 'jQuery'],
createdAt: '2024-12-25T16:20:00Z',
},
{
id: 10,
url: 'https://globalfinance.com/.git/config',
method: 'GET',
statusCode: 200,
title: '',
contentLength: 456,
contentType: 'text/plain',
responseTime: 0.034,
host: 'globalfinance.com',
webserver: 'Microsoft-IIS/10.0',
gfPatterns: ['git', 'config'],
createdAt: '2024-12-25T16:21:00Z',
},
{
id: 11,
url: 'https://retailmax.com/',
method: 'GET',
statusCode: 200,
title: 'RetailMax - Shop Everything',
contentLength: 89012,
contentType: 'text/html; charset=utf-8',
responseTime: 0.567,
host: 'retailmax.com',
webserver: 'nginx/1.22.0',
tech: ['React', 'Redux', 'Node.js'],
createdAt: '2024-12-21T10:45:00Z',
},
{
id: 12,
url: 'https://retailmax.com/product?id=1',
method: 'GET',
statusCode: 200,
title: 'Product Detail - RetailMax',
contentLength: 23456,
contentType: 'text/html; charset=utf-8',
responseTime: 0.234,
host: 'retailmax.com',
webserver: 'nginx/1.22.0',
tech: ['React', 'Redux'],
gfPatterns: ['param', 'id'],
createdAt: '2024-12-21T10:46:00Z',
},
{
id: 13,
url: 'https://healthcareplus.com/',
method: 'GET',
statusCode: 200,
title: 'HealthCare Plus - Digital Health',
contentLength: 45678,
contentType: 'text/html; charset=utf-8',
responseTime: 0.345,
host: 'healthcareplus.com',
webserver: 'nginx/1.24.0',
tech: ['Angular', 'TypeScript'],
createdAt: '2024-12-23T11:00:00Z',
},
{
id: 14,
url: 'https://edutech.io/',
method: 'GET',
statusCode: 200,
title: 'EduTech - Learn Anywhere',
contentLength: 67890,
contentType: 'text/html; charset=utf-8',
responseTime: 0.289,
host: 'edutech.io',
webserver: 'cloudflare',
tech: ['Vue.js', 'Nuxt.js'],
createdAt: '2024-12-22T13:30:00Z',
},
{
id: 15,
url: 'https://cloudnine.host/',
method: 'GET',
statusCode: 200,
title: 'CloudNine Hosting',
contentLength: 34567,
contentType: 'text/html; charset=utf-8',
responseTime: 0.178,
host: 'cloudnine.host',
webserver: 'LiteSpeed',
tech: ['PHP', 'Laravel'],
createdAt: '2024-12-19T16:00:00Z',
},
]
export function getMockEndpoints(params?: {
page?: number
pageSize?: number
search?: string
}): GetEndpointsResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const search = params?.search?.toLowerCase() || ''
let filtered = mockEndpoints
if (search) {
filtered = mockEndpoints.filter(
ep =>
ep.url.toLowerCase().includes(search) ||
ep.title.toLowerCase().includes(search) ||
ep.host?.toLowerCase().includes(search)
)
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const endpoints = filtered.slice(start, start + pageSize)
return {
endpoints,
total,
page,
pageSize,
totalPages,
}
}
export function getMockEndpointById(id: number): Endpoint | undefined {
return mockEndpoints.find(ep => ep.id === id)
}

View File

@@ -0,0 +1,78 @@
import type { ScanEngine } from '@/types/engine.types'
export const mockEngines: ScanEngine[] = [
{
id: 1,
name: 'Full Scan',
configuration: `# Full reconnaissance scan
stages:
- name: subdomain_discovery
tools:
- subfinder
- amass
- name: port_scan
tools:
- nmap
- name: web_crawling
tools:
- httpx
- katana
- name: vulnerability_scan
tools:
- nuclei
`,
createdAt: '2024-01-15T08:00:00Z',
updatedAt: '2024-12-20T10:30:00Z',
},
{
id: 2,
name: 'Quick Scan',
configuration: `# Quick scan - subdomain and web only
stages:
- name: subdomain_discovery
tools:
- subfinder
- name: web_crawling
tools:
- httpx
`,
createdAt: '2024-02-10T09:00:00Z',
updatedAt: '2024-12-18T14:00:00Z',
},
{
id: 3,
name: 'Vulnerability Only',
configuration: `# Vulnerability scan only
stages:
- name: vulnerability_scan
tools:
- nuclei
options:
severity: critical,high,medium
`,
createdAt: '2024-03-05T11:00:00Z',
updatedAt: '2024-12-15T16:20:00Z',
},
{
id: 4,
name: 'Subdomain Discovery',
configuration: `# Subdomain enumeration only
stages:
- name: subdomain_discovery
tools:
- subfinder
- amass
- findomain
`,
createdAt: '2024-04-12T08:30:00Z',
updatedAt: '2024-12-10T09:00:00Z',
},
]
export function getMockEngines(): ScanEngine[] {
return mockEngines
}
export function getMockEngineById(id: number): ScanEngine | undefined {
return mockEngines.find(e => e.id === id)
}

View File

@@ -0,0 +1,110 @@
import type { BackendNotification, GetNotificationsResponse } from '@/types/notification.types'
export const mockNotifications: BackendNotification[] = [
{
id: 1,
category: 'vulnerability',
title: 'Critical Vulnerability Found',
message: 'SQL Injection detected in retailmax.com/product endpoint',
level: 'critical',
createdAt: '2024-12-29T10:30:00Z',
isRead: false,
},
{
id: 2,
category: 'scan',
title: 'Scan Completed',
message: 'Scan for acme.com completed successfully with 23 vulnerabilities found',
level: 'medium',
createdAt: '2024-12-29T09:00:00Z',
isRead: false,
},
{
id: 3,
category: 'vulnerability',
title: 'High Severity Vulnerability',
message: 'XSS vulnerability found in acme.com/search',
level: 'high',
createdAt: '2024-12-28T16:45:00Z',
isRead: true,
},
{
id: 4,
category: 'scan',
title: 'Scan Failed',
message: 'Scan for globalfinance.com failed: Connection timeout',
level: 'high',
createdAt: '2024-12-28T14:20:00Z',
isRead: true,
},
{
id: 5,
category: 'asset',
title: 'New Subdomains Discovered',
message: '15 new subdomains discovered for techstart.io',
level: 'low',
createdAt: '2024-12-27T11:00:00Z',
isRead: true,
},
{
id: 6,
category: 'system',
title: 'Worker Offline',
message: 'Worker node worker-03 is now offline',
level: 'medium',
createdAt: '2024-12-27T08:30:00Z',
isRead: true,
},
{
id: 7,
category: 'scan',
title: 'Scheduled Scan Started',
message: 'Scheduled scan for Acme Corporation started',
level: 'low',
createdAt: '2024-12-26T06:00:00Z',
isRead: true,
},
{
id: 8,
category: 'system',
title: 'System Update Available',
message: 'A new version of the scanner is available',
level: 'low',
createdAt: '2024-12-25T10:00:00Z',
isRead: true,
},
]
export function getMockNotifications(params?: {
page?: number
pageSize?: number
unread?: boolean
}): GetNotificationsResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
let filtered = mockNotifications
if (params?.unread) {
filtered = filtered.filter(n => !n.isRead)
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return {
results,
total,
page,
pageSize,
totalPages,
}
}
export function getMockUnreadCount(): { count: number } {
return {
count: mockNotifications.filter(n => !n.isRead).length,
}
}

View File

@@ -0,0 +1,145 @@
import type { Organization, OrganizationsResponse } from '@/types/organization.types'
export const mockOrganizations: Organization[] = [
{
id: 1,
name: 'Acme Corporation',
description: '全球领先的科技公司,专注于云计算和人工智能领域',
createdAt: '2024-01-15T08:30:00Z',
updatedAt: '2024-12-28T14:20:00Z',
targetCount: 12,
domainCount: 156,
endpointCount: 2341,
targets: [
{ id: 1, name: 'acme.com' },
{ id: 2, name: 'acme.io' },
],
},
{
id: 2,
name: 'TechStart Inc',
description: '创新型初创企业,主营 SaaS 产品开发',
createdAt: '2024-02-20T10:15:00Z',
updatedAt: '2024-12-27T09:45:00Z',
targetCount: 5,
domainCount: 78,
endpointCount: 892,
targets: [
{ id: 3, name: 'techstart.io' },
],
},
{
id: 3,
name: 'Global Finance Ltd',
description: '国际金融服务公司,提供银行和投资解决方案',
createdAt: '2024-03-10T14:00:00Z',
updatedAt: '2024-12-26T16:30:00Z',
targetCount: 8,
domainCount: 234,
endpointCount: 1567,
targets: [
{ id: 4, name: 'globalfinance.com' },
{ id: 5, name: 'gf-bank.net' },
],
},
{
id: 4,
name: 'HealthCare Plus',
description: '医疗健康科技公司,专注于数字化医疗解决方案',
createdAt: '2024-04-05T09:20:00Z',
updatedAt: '2024-12-25T11:10:00Z',
targetCount: 6,
domainCount: 89,
endpointCount: 723,
targets: [
{ id: 6, name: 'healthcareplus.com' },
],
},
{
id: 5,
name: 'EduTech Solutions',
description: '在线教育平台,提供 K-12 和职业培训课程',
createdAt: '2024-05-12T11:45:00Z',
updatedAt: '2024-12-24T13:55:00Z',
targetCount: 4,
domainCount: 45,
endpointCount: 456,
targets: [
{ id: 7, name: 'edutech.io' },
],
},
{
id: 6,
name: 'RetailMax',
description: '电子商务零售平台,覆盖多品类商品销售',
createdAt: '2024-06-08T16:30:00Z',
updatedAt: '2024-12-23T10:20:00Z',
targetCount: 15,
domainCount: 312,
endpointCount: 4521,
targets: [
{ id: 8, name: 'retailmax.com' },
{ id: 9, name: 'retailmax.cn' },
],
},
{
id: 7,
name: 'CloudNine Hosting',
description: '云托管服务提供商,提供 VPS 和专用服务器',
createdAt: '2024-07-20T08:00:00Z',
updatedAt: '2024-12-22T15:40:00Z',
targetCount: 3,
domainCount: 67,
endpointCount: 389,
targets: [
{ id: 10, name: 'cloudnine.host' },
],
},
{
id: 8,
name: 'MediaStream Corp',
description: '流媒体内容分发平台,提供视频和音频服务',
createdAt: '2024-08-15T12:10:00Z',
updatedAt: '2024-12-21T08:25:00Z',
targetCount: 7,
domainCount: 123,
endpointCount: 1234,
targets: [
{ id: 11, name: 'mediastream.tv' },
],
},
]
export function getMockOrganizations(params?: {
page?: number
pageSize?: number
search?: string
}): OrganizationsResponse<Organization> {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const search = params?.search?.toLowerCase() || ''
// 过滤搜索
let filtered = mockOrganizations
if (search) {
filtered = mockOrganizations.filter(
org =>
org.name.toLowerCase().includes(search) ||
org.description.toLowerCase().includes(search)
)
}
// 分页
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return {
results,
total,
page,
pageSize,
totalPages,
}
}

309
frontend/mock/data/scans.ts Normal file
View File

@@ -0,0 +1,309 @@
import type { ScanRecord, GetScansResponse, ScanStatus } from '@/types/scan.types'
import type { ScanStatistics } from '@/services/scan.service'
export const mockScans: ScanRecord[] = [
{
id: 1,
target: 1,
targetName: 'acme.com',
workerName: 'worker-01',
summary: {
subdomains: 156,
websites: 89,
directories: 234,
endpoints: 2341,
ips: 45,
vulnerabilities: {
total: 23,
critical: 1,
high: 4,
medium: 8,
low: 10,
},
},
engineIds: [1, 2, 3],
engineNames: ['Subdomain Discovery', 'Web Crawling', 'Nuclei Scanner'],
createdAt: '2024-12-28T10:00:00Z',
status: 'completed',
progress: 100,
},
{
id: 2,
target: 2,
targetName: 'acme.io',
workerName: 'worker-02',
summary: {
subdomains: 78,
websites: 45,
directories: 123,
endpoints: 892,
ips: 23,
vulnerabilities: {
total: 12,
critical: 0,
high: 2,
medium: 5,
low: 5,
},
},
engineIds: [1, 2],
engineNames: ['Subdomain Discovery', 'Web Crawling'],
createdAt: '2024-12-27T14:30:00Z',
status: 'running',
progress: 65,
currentStage: 'web_crawling',
stageProgress: {
subdomain_discovery: {
status: 'completed',
order: 0,
startedAt: '2024-12-27T14:30:00Z',
duration: 1200,
detail: 'Found 78 subdomains',
},
web_crawling: {
status: 'running',
order: 1,
startedAt: '2024-12-27T14:50:00Z',
},
},
},
{
id: 3,
target: 3,
targetName: 'techstart.io',
workerName: 'worker-01',
summary: {
subdomains: 45,
websites: 28,
directories: 89,
endpoints: 567,
ips: 12,
vulnerabilities: {
total: 8,
critical: 0,
high: 1,
medium: 3,
low: 4,
},
},
engineIds: [1, 2, 3],
engineNames: ['Subdomain Discovery', 'Web Crawling', 'Nuclei Scanner'],
createdAt: '2024-12-26T08:45:00Z',
status: 'completed',
progress: 100,
},
{
id: 4,
target: 4,
targetName: 'globalfinance.com',
workerName: 'worker-03',
summary: {
subdomains: 0,
websites: 0,
directories: 0,
endpoints: 0,
ips: 0,
vulnerabilities: {
total: 0,
critical: 0,
high: 0,
medium: 0,
low: 0,
},
},
engineIds: [1],
engineNames: ['Subdomain Discovery'],
createdAt: '2024-12-25T16:20:00Z',
status: 'failed',
progress: 15,
errorMessage: 'Connection timeout: Unable to reach target',
},
{
id: 5,
target: 6,
targetName: 'healthcareplus.com',
workerName: 'worker-02',
summary: {
subdomains: 34,
websites: 0,
directories: 0,
endpoints: 0,
ips: 8,
vulnerabilities: {
total: 0,
critical: 0,
high: 0,
medium: 0,
low: 0,
},
},
engineIds: [1, 2, 3],
engineNames: ['Subdomain Discovery', 'Web Crawling', 'Nuclei Scanner'],
createdAt: '2024-12-29T09:00:00Z',
status: 'running',
progress: 25,
currentStage: 'subdomain_discovery',
stageProgress: {
subdomain_discovery: {
status: 'running',
order: 0,
startedAt: '2024-12-29T09:00:00Z',
},
web_crawling: {
status: 'pending',
order: 1,
},
nuclei_scan: {
status: 'pending',
order: 2,
},
},
},
{
id: 6,
target: 7,
targetName: 'edutech.io',
workerName: null,
summary: {
subdomains: 0,
websites: 0,
directories: 0,
endpoints: 0,
ips: 0,
vulnerabilities: {
total: 0,
critical: 0,
high: 0,
medium: 0,
low: 0,
},
},
engineIds: [1, 2],
engineNames: ['Subdomain Discovery', 'Web Crawling'],
createdAt: '2024-12-29T10:30:00Z',
status: 'initiated',
progress: 0,
},
{
id: 7,
target: 8,
targetName: 'retailmax.com',
workerName: 'worker-01',
summary: {
subdomains: 89,
websites: 56,
directories: 178,
endpoints: 1234,
ips: 28,
vulnerabilities: {
total: 15,
critical: 0,
high: 3,
medium: 6,
low: 6,
},
},
engineIds: [1, 2, 3],
engineNames: ['Subdomain Discovery', 'Web Crawling', 'Nuclei Scanner'],
createdAt: '2024-12-21T10:45:00Z',
status: 'completed',
progress: 100,
},
{
id: 8,
target: 11,
targetName: 'mediastream.tv',
workerName: 'worker-02',
summary: {
subdomains: 67,
websites: 0,
directories: 0,
endpoints: 0,
ips: 15,
vulnerabilities: {
total: 0,
critical: 0,
high: 0,
medium: 0,
low: 0,
},
},
engineIds: [1, 2, 3],
engineNames: ['Subdomain Discovery', 'Web Crawling', 'Nuclei Scanner'],
createdAt: '2024-12-29T08:00:00Z',
status: 'running',
progress: 45,
currentStage: 'web_crawling',
stageProgress: {
subdomain_discovery: {
status: 'completed',
order: 0,
startedAt: '2024-12-29T08:00:00Z',
duration: 900,
detail: 'Found 67 subdomains',
},
web_crawling: {
status: 'running',
order: 1,
startedAt: '2024-12-29T08:15:00Z',
},
nuclei_scan: {
status: 'pending',
order: 2,
},
},
},
]
export const mockScanStatistics: ScanStatistics = {
total: 156,
running: 3,
completed: 142,
failed: 11,
totalVulns: 89,
totalSubdomains: 4823,
totalEndpoints: 12456,
totalWebsites: 3421,
totalAssets: 21638,
}
export function getMockScans(params?: {
page?: number
pageSize?: number
status?: ScanStatus
search?: string
}): GetScansResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const status = params?.status
const search = params?.search?.toLowerCase() || ''
let filtered = mockScans
if (status) {
filtered = filtered.filter(scan => scan.status === status)
}
if (search) {
filtered = filtered.filter(scan =>
scan.targetName.toLowerCase().includes(search)
)
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return {
results,
total,
page,
pageSize,
totalPages,
}
}
export function getMockScanById(id: number): ScanRecord | undefined {
return mockScans.find(scan => scan.id === id)
}

View File

@@ -0,0 +1,132 @@
import type { ScheduledScan, GetScheduledScansResponse } from '@/types/scheduled-scan.types'
export const mockScheduledScans: ScheduledScan[] = [
{
id: 1,
name: 'Daily Acme Scan',
engineIds: [1],
engineNames: ['Full Scan'],
organizationId: 1,
organizationName: 'Acme Corporation',
targetId: null,
targetName: null,
scanMode: 'organization',
cronExpression: '0 2 * * *',
isEnabled: true,
nextRunTime: '2024-12-30T02:00:00Z',
lastRunTime: '2024-12-29T02:00:00Z',
runCount: 45,
createdAt: '2024-11-15T08:00:00Z',
updatedAt: '2024-12-29T02:00:00Z',
},
{
id: 2,
name: 'Weekly TechStart Vuln Scan',
engineIds: [3],
engineNames: ['Vulnerability Only'],
organizationId: 2,
organizationName: 'TechStart Inc',
targetId: null,
targetName: null,
scanMode: 'organization',
cronExpression: '0 3 * * 0',
isEnabled: true,
nextRunTime: '2025-01-05T03:00:00Z',
lastRunTime: '2024-12-29T03:00:00Z',
runCount: 12,
createdAt: '2024-10-01T10:00:00Z',
updatedAt: '2024-12-29T03:00:00Z',
},
{
id: 3,
name: 'Hourly API Monitoring',
engineIds: [2],
engineNames: ['Quick Scan'],
organizationId: null,
organizationName: null,
targetId: 12,
targetName: 'api.acme.com',
scanMode: 'target',
cronExpression: '0 * * * *',
isEnabled: true,
nextRunTime: '2024-12-29T12:00:00Z',
lastRunTime: '2024-12-29T11:00:00Z',
runCount: 720,
createdAt: '2024-12-01T00:00:00Z',
updatedAt: '2024-12-29T11:00:00Z',
},
{
id: 4,
name: 'Monthly Full Scan - Finance',
engineIds: [1],
engineNames: ['Full Scan'],
organizationId: 3,
organizationName: 'Global Finance Ltd',
targetId: null,
targetName: null,
scanMode: 'organization',
cronExpression: '0 0 1 * *',
isEnabled: false,
nextRunTime: '2025-01-01T00:00:00Z',
lastRunTime: '2024-12-01T00:00:00Z',
runCount: 6,
createdAt: '2024-06-01T08:00:00Z',
updatedAt: '2024-12-20T15:00:00Z',
},
{
id: 5,
name: 'RetailMax Daily Quick',
engineIds: [2, 3],
engineNames: ['Quick Scan', 'Vulnerability Only'],
organizationId: null,
organizationName: null,
targetId: 8,
targetName: 'retailmax.com',
scanMode: 'target',
cronExpression: '0 4 * * *',
isEnabled: true,
nextRunTime: '2024-12-30T04:00:00Z',
lastRunTime: '2024-12-29T04:00:00Z',
runCount: 30,
createdAt: '2024-11-29T09:00:00Z',
updatedAt: '2024-12-29T04:00:00Z',
},
]
export function getMockScheduledScans(params?: {
page?: number
pageSize?: number
search?: string
}): GetScheduledScansResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const search = params?.search?.toLowerCase() || ''
let filtered = mockScheduledScans
if (search) {
filtered = filtered.filter(
s =>
s.name.toLowerCase().includes(search) ||
s.organizationName?.toLowerCase().includes(search) ||
s.targetName?.toLowerCase().includes(search)
)
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return {
results,
total,
page,
pageSize,
totalPages,
}
}
export function getMockScheduledScanById(id: number): ScheduledScan | undefined {
return mockScheduledScans.find(s => s.id === id)
}

View File

@@ -0,0 +1,78 @@
import type { Subdomain, GetAllSubdomainsResponse } from '@/types/subdomain.types'
export const mockSubdomains: Subdomain[] = [
{ id: 1, name: 'acme.com', createdAt: '2024-12-28T10:00:00Z' },
{ id: 2, name: 'www.acme.com', createdAt: '2024-12-28T10:01:00Z' },
{ id: 3, name: 'api.acme.com', createdAt: '2024-12-28T10:02:00Z' },
{ id: 4, name: 'admin.acme.com', createdAt: '2024-12-28T10:03:00Z' },
{ id: 5, name: 'mail.acme.com', createdAt: '2024-12-28T10:04:00Z' },
{ id: 6, name: 'blog.acme.com', createdAt: '2024-12-28T10:05:00Z' },
{ id: 7, name: 'shop.acme.com', createdAt: '2024-12-28T10:06:00Z' },
{ id: 8, name: 'cdn.acme.com', createdAt: '2024-12-28T10:07:00Z' },
{ id: 9, name: 'static.acme.com', createdAt: '2024-12-28T10:08:00Z' },
{ id: 10, name: 'dev.acme.com', createdAt: '2024-12-28T10:09:00Z' },
{ id: 11, name: 'staging.acme.com', createdAt: '2024-12-28T10:10:00Z' },
{ id: 12, name: 'test.acme.com', createdAt: '2024-12-28T10:11:00Z' },
{ id: 13, name: 'acme.io', createdAt: '2024-12-27T14:30:00Z' },
{ id: 14, name: 'docs.acme.io', createdAt: '2024-12-27T14:31:00Z' },
{ id: 15, name: 'api.acme.io', createdAt: '2024-12-27T14:32:00Z' },
{ id: 16, name: 'status.acme.io', createdAt: '2024-12-27T14:33:00Z' },
{ id: 17, name: 'techstart.io', createdAt: '2024-12-26T08:45:00Z' },
{ id: 18, name: 'www.techstart.io', createdAt: '2024-12-26T08:46:00Z' },
{ id: 19, name: 'app.techstart.io', createdAt: '2024-12-26T08:47:00Z' },
{ id: 20, name: 'globalfinance.com', createdAt: '2024-12-25T16:20:00Z' },
{ id: 21, name: 'www.globalfinance.com', createdAt: '2024-12-25T16:21:00Z' },
{ id: 22, name: 'secure.globalfinance.com', createdAt: '2024-12-25T16:22:00Z' },
{ id: 23, name: 'portal.globalfinance.com', createdAt: '2024-12-25T16:23:00Z' },
{ id: 24, name: 'healthcareplus.com', createdAt: '2024-12-23T11:00:00Z' },
{ id: 25, name: 'www.healthcareplus.com', createdAt: '2024-12-23T11:01:00Z' },
{ id: 26, name: 'patient.healthcareplus.com', createdAt: '2024-12-23T11:02:00Z' },
{ id: 27, name: 'edutech.io', createdAt: '2024-12-22T13:30:00Z' },
{ id: 28, name: 'learn.edutech.io', createdAt: '2024-12-22T13:31:00Z' },
{ id: 29, name: 'retailmax.com', createdAt: '2024-12-21T10:45:00Z' },
{ id: 30, name: 'www.retailmax.com', createdAt: '2024-12-21T10:46:00Z' },
{ id: 31, name: 'm.retailmax.com', createdAt: '2024-12-21T10:47:00Z' },
{ id: 32, name: 'api.retailmax.com', createdAt: '2024-12-21T10:48:00Z' },
{ id: 33, name: 'cloudnine.host', createdAt: '2024-12-19T16:00:00Z' },
{ id: 34, name: 'panel.cloudnine.host', createdAt: '2024-12-19T16:01:00Z' },
{ id: 35, name: 'mediastream.tv', createdAt: '2024-12-18T09:30:00Z' },
{ id: 36, name: 'www.mediastream.tv', createdAt: '2024-12-18T09:31:00Z' },
{ id: 37, name: 'cdn.mediastream.tv', createdAt: '2024-12-18T09:32:00Z' },
{ id: 38, name: 'stream.mediastream.tv', createdAt: '2024-12-18T09:33:00Z' },
]
export function getMockSubdomains(params?: {
page?: number
pageSize?: number
search?: string
organizationId?: number
}): GetAllSubdomainsResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const search = params?.search?.toLowerCase() || ''
let filtered = mockSubdomains
if (search) {
filtered = mockSubdomains.filter(sub =>
sub.name.toLowerCase().includes(search)
)
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const domains = filtered.slice(start, start + pageSize)
return {
domains,
total,
page,
pageSize,
totalPages,
}
}
export function getMockSubdomainById(id: number): Subdomain | undefined {
return mockSubdomains.find(sub => sub.id === id)
}

View File

@@ -0,0 +1,205 @@
import type { Target, TargetsResponse, TargetDetail } from '@/types/target.types'
export const mockTargets: Target[] = [
{
id: 1,
name: 'acme.com',
type: 'domain',
description: 'Acme Corporation 主站',
createdAt: '2024-01-15T08:30:00Z',
lastScannedAt: '2024-12-28T10:00:00Z',
organizations: [{ id: 1, name: 'Acme Corporation' }],
},
{
id: 2,
name: 'acme.io',
type: 'domain',
description: 'Acme Corporation 开发者平台',
createdAt: '2024-01-16T09:00:00Z',
lastScannedAt: '2024-12-27T14:30:00Z',
organizations: [{ id: 1, name: 'Acme Corporation' }],
},
{
id: 3,
name: 'techstart.io',
type: 'domain',
description: 'TechStart 官网',
createdAt: '2024-02-20T10:15:00Z',
lastScannedAt: '2024-12-26T08:45:00Z',
organizations: [{ id: 2, name: 'TechStart Inc' }],
},
{
id: 4,
name: 'globalfinance.com',
type: 'domain',
description: 'Global Finance 主站',
createdAt: '2024-03-10T14:00:00Z',
lastScannedAt: '2024-12-25T16:20:00Z',
organizations: [{ id: 3, name: 'Global Finance Ltd' }],
},
{
id: 5,
name: '192.168.1.0/24',
type: 'cidr',
description: '内网 IP 段',
createdAt: '2024-03-15T11:30:00Z',
lastScannedAt: '2024-12-24T09:15:00Z',
organizations: [{ id: 3, name: 'Global Finance Ltd' }],
},
{
id: 6,
name: 'healthcareplus.com',
type: 'domain',
description: 'HealthCare Plus 官网',
createdAt: '2024-04-05T09:20:00Z',
lastScannedAt: '2024-12-23T11:00:00Z',
organizations: [{ id: 4, name: 'HealthCare Plus' }],
},
{
id: 7,
name: 'edutech.io',
type: 'domain',
description: 'EduTech 在线教育平台',
createdAt: '2024-05-12T11:45:00Z',
lastScannedAt: '2024-12-22T13:30:00Z',
organizations: [{ id: 5, name: 'EduTech Solutions' }],
},
{
id: 8,
name: 'retailmax.com',
type: 'domain',
description: 'RetailMax 电商主站',
createdAt: '2024-06-08T16:30:00Z',
lastScannedAt: '2024-12-21T10:45:00Z',
organizations: [{ id: 6, name: 'RetailMax' }],
},
{
id: 9,
name: '10.0.0.1',
type: 'ip',
description: '核心服务器 IP',
createdAt: '2024-07-01T08:00:00Z',
lastScannedAt: '2024-12-20T14:20:00Z',
organizations: [{ id: 7, name: 'CloudNine Hosting' }],
},
{
id: 10,
name: 'cloudnine.host',
type: 'domain',
description: 'CloudNine 托管服务',
createdAt: '2024-07-20T08:00:00Z',
lastScannedAt: '2024-12-19T16:00:00Z',
organizations: [{ id: 7, name: 'CloudNine Hosting' }],
},
{
id: 11,
name: 'mediastream.tv',
type: 'domain',
description: 'MediaStream 流媒体平台',
createdAt: '2024-08-15T12:10:00Z',
lastScannedAt: '2024-12-18T09:30:00Z',
organizations: [{ id: 8, name: 'MediaStream Corp' }],
},
{
id: 12,
name: 'api.acme.com',
type: 'domain',
description: 'Acme API 服务',
createdAt: '2024-09-01T10:00:00Z',
lastScannedAt: '2024-12-17T11:15:00Z',
organizations: [{ id: 1, name: 'Acme Corporation' }],
},
]
export const mockTargetDetails: Record<number, TargetDetail> = {
1: {
...mockTargets[0],
summary: {
subdomains: 156,
websites: 89,
endpoints: 2341,
ips: 45,
vulnerabilities: {
total: 23,
critical: 1,
high: 4,
medium: 8,
low: 10,
},
},
},
2: {
...mockTargets[1],
summary: {
subdomains: 78,
websites: 45,
endpoints: 892,
ips: 23,
vulnerabilities: {
total: 12,
critical: 0,
high: 2,
medium: 5,
low: 5,
},
},
},
}
export function getMockTargets(params?: {
page?: number
pageSize?: number
search?: string
}): TargetsResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const search = params?.search?.toLowerCase() || ''
let filtered = mockTargets
if (search) {
filtered = mockTargets.filter(
target =>
target.name.toLowerCase().includes(search) ||
target.description?.toLowerCase().includes(search)
)
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return {
results,
total,
page,
pageSize,
totalPages,
}
}
export function getMockTargetById(id: number): TargetDetail | undefined {
if (mockTargetDetails[id]) {
return mockTargetDetails[id]
}
const target = mockTargets.find(t => t.id === id)
if (target) {
return {
...target,
summary: {
subdomains: Math.floor(Math.random() * 100) + 10,
websites: Math.floor(Math.random() * 50) + 5,
endpoints: Math.floor(Math.random() * 1000) + 100,
ips: Math.floor(Math.random() * 30) + 5,
vulnerabilities: {
total: Math.floor(Math.random() * 20) + 1,
critical: Math.floor(Math.random() * 2),
high: Math.floor(Math.random() * 5),
medium: Math.floor(Math.random() * 8),
low: Math.floor(Math.random() * 10),
},
},
}
}
return undefined
}

View File

@@ -0,0 +1,275 @@
import type { Vulnerability, GetVulnerabilitiesResponse, VulnerabilitySeverity } from '@/types/vulnerability.types'
export const mockVulnerabilities: Vulnerability[] = [
{
id: 1,
target: 1,
url: 'https://acme.com/search?q=test',
vulnType: 'xss-reflected',
severity: 'critical',
source: 'dalfox',
cvssScore: 9.1,
description: 'Reflected XSS in search parameter',
rawOutput: {
type: 'R',
inject_type: 'inHTML-URL',
method: 'GET',
data: 'https://acme.com/search?q=<script>alert(1)</script>',
param: 'q',
payload: '<script>alert(1)</script>',
evidence: '<script>alert(1)</script>',
cwe: 'CWE-79',
},
createdAt: '2024-12-28T10:30:00Z',
},
{
id: 2,
target: 1,
url: 'https://api.acme.com/v1/users',
vulnType: 'CVE-2024-1234',
severity: 'high',
source: 'nuclei',
cvssScore: 8.5,
description: 'SQL Injection in user API endpoint',
rawOutput: {
'template-id': 'CVE-2024-1234',
'matched-at': 'https://api.acme.com/v1/users',
host: 'api.acme.com',
info: {
name: 'SQL Injection',
description: 'SQL injection vulnerability in user endpoint',
severity: 'high',
tags: ['sqli', 'cve'],
reference: ['https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-1234'],
classification: {
'cve-id': 'CVE-2024-1234',
'cwe-id': ['CWE-89'],
},
},
},
createdAt: '2024-12-28T10:45:00Z',
},
{
id: 3,
target: 1,
url: 'https://acme.com/login',
vulnType: 'xss-stored',
severity: 'high',
source: 'dalfox',
cvssScore: 8.2,
description: 'Stored XSS in user profile',
rawOutput: {
type: 'S',
inject_type: 'inHTML-TAG',
method: 'POST',
param: 'bio',
payload: '<img src=x onerror=alert(1)>',
},
createdAt: '2024-12-27T14:20:00Z',
},
{
id: 4,
target: 2,
url: 'https://acme.io/api/config',
vulnType: 'information-disclosure',
severity: 'medium',
source: 'nuclei',
cvssScore: 5.3,
description: 'Exposed configuration file',
rawOutput: {
'template-id': 'exposed-config',
'matched-at': 'https://acme.io/api/config',
host: 'acme.io',
info: {
name: 'Exposed Configuration',
description: 'Configuration file accessible without authentication',
severity: 'medium',
tags: ['exposure', 'config'],
},
},
createdAt: '2024-12-27T15:00:00Z',
},
{
id: 5,
target: 3,
url: 'https://techstart.io/admin',
vulnType: 'open-redirect',
severity: 'medium',
source: 'nuclei',
cvssScore: 4.7,
description: 'Open redirect vulnerability',
rawOutput: {
'template-id': 'open-redirect',
'matched-at': 'https://techstart.io/admin?redirect=evil.com',
host: 'techstart.io',
info: {
name: 'Open Redirect',
description: 'URL redirect without validation',
severity: 'medium',
tags: ['redirect'],
},
},
createdAt: '2024-12-26T09:30:00Z',
},
{
id: 6,
target: 4,
url: 'https://globalfinance.com/.git/config',
vulnType: 'git-config-exposure',
severity: 'high',
source: 'nuclei',
cvssScore: 7.5,
description: 'Git configuration file exposed',
rawOutput: {
'template-id': 'git-config',
'matched-at': 'https://globalfinance.com/.git/config',
host: 'globalfinance.com',
info: {
name: 'Git Config Exposure',
description: 'Git configuration file is publicly accessible',
severity: 'high',
tags: ['git', 'exposure'],
},
},
createdAt: '2024-12-25T11:15:00Z',
},
{
id: 7,
target: 8,
url: 'https://retailmax.com/product?id=1',
vulnType: 'sqli',
severity: 'critical',
source: 'nuclei',
cvssScore: 9.8,
description: 'SQL Injection in product parameter',
rawOutput: {
'template-id': 'generic-sqli',
'matched-at': "https://retailmax.com/product?id=1'",
host: 'retailmax.com',
info: {
name: 'SQL Injection',
description: 'SQL injection in product ID parameter',
severity: 'critical',
tags: ['sqli'],
classification: {
'cwe-id': ['CWE-89'],
},
},
},
createdAt: '2024-12-21T12:00:00Z',
},
{
id: 8,
target: 1,
url: 'https://acme.com/robots.txt',
vulnType: 'robots-txt-exposure',
severity: 'info',
source: 'nuclei',
description: 'Robots.txt file found',
rawOutput: {
'template-id': 'robots-txt',
'matched-at': 'https://acme.com/robots.txt',
host: 'acme.com',
info: {
name: 'Robots.txt',
description: 'Robots.txt file detected',
severity: 'info',
tags: ['misc'],
},
},
createdAt: '2024-12-28T10:00:00Z',
},
{
id: 9,
target: 2,
url: 'https://acme.io/sitemap.xml',
vulnType: 'sitemap-exposure',
severity: 'info',
source: 'nuclei',
description: 'Sitemap.xml file found',
rawOutput: {
'template-id': 'sitemap-xml',
'matched-at': 'https://acme.io/sitemap.xml',
host: 'acme.io',
info: {
name: 'Sitemap.xml',
description: 'Sitemap.xml file detected',
severity: 'info',
tags: ['misc'],
},
},
createdAt: '2024-12-27T14:00:00Z',
},
{
id: 10,
target: 3,
url: 'https://techstart.io/api/v2/debug',
vulnType: 'debug-endpoint',
severity: 'low',
source: 'nuclei',
cvssScore: 3.1,
description: 'Debug endpoint exposed',
rawOutput: {
'template-id': 'debug-endpoint',
'matched-at': 'https://techstart.io/api/v2/debug',
host: 'techstart.io',
info: {
name: 'Debug Endpoint',
description: 'Debug endpoint accessible in production',
severity: 'low',
tags: ['debug', 'exposure'],
},
},
createdAt: '2024-12-26T10:00:00Z',
},
]
export function getMockVulnerabilities(params?: {
page?: number
pageSize?: number
targetId?: number
severity?: VulnerabilitySeverity
search?: string
}): GetVulnerabilitiesResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const targetId = params?.targetId
const severity = params?.severity
const search = params?.search?.toLowerCase() || ''
let filtered = mockVulnerabilities
if (targetId) {
filtered = filtered.filter(v => v.target === targetId)
}
if (severity) {
filtered = filtered.filter(v => v.severity === severity)
}
if (search) {
filtered = filtered.filter(
v =>
v.url.toLowerCase().includes(search) ||
v.vulnType.toLowerCase().includes(search) ||
v.description?.toLowerCase().includes(search)
)
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const vulnerabilities = filtered.slice(start, start + pageSize)
return {
vulnerabilities,
total,
page,
pageSize,
totalPages,
}
}
export function getMockVulnerabilityById(id: number): Vulnerability | undefined {
return mockVulnerabilities.find(v => v.id === id)
}

View File

@@ -0,0 +1,252 @@
import type { WebSite, WebSiteListResponse } from '@/types/website.types'
export const mockWebsites: WebSite[] = [
{
id: 1,
target: 1,
url: 'https://acme.com',
host: 'acme.com',
location: '',
title: 'Acme Corporation - Home',
webserver: 'nginx/1.24.0',
contentType: 'text/html; charset=utf-8',
statusCode: 200,
contentLength: 45678,
responseBody: '<!DOCTYPE html>...',
tech: ['React', 'Next.js', 'Node.js', 'Tailwind CSS'],
vhost: false,
subdomain: 'acme.com',
createdAt: '2024-12-28T10:00:00Z',
},
{
id: 2,
target: 1,
url: 'https://www.acme.com',
host: 'www.acme.com',
location: 'https://acme.com',
title: 'Acme Corporation - Home',
webserver: 'nginx/1.24.0',
contentType: 'text/html; charset=utf-8',
statusCode: 301,
contentLength: 0,
responseBody: '',
tech: [],
vhost: false,
subdomain: 'www.acme.com',
createdAt: '2024-12-28T10:01:00Z',
},
{
id: 3,
target: 1,
url: 'https://api.acme.com',
host: 'api.acme.com',
location: '',
title: 'Acme API',
webserver: 'nginx/1.24.0',
contentType: 'application/json',
statusCode: 200,
contentLength: 234,
responseBody: '{"status":"ok","version":"1.0"}',
tech: ['Django', 'Python', 'PostgreSQL'],
vhost: false,
subdomain: 'api.acme.com',
createdAt: '2024-12-28T10:02:00Z',
},
{
id: 4,
target: 1,
url: 'https://admin.acme.com',
host: 'admin.acme.com',
location: '',
title: 'Admin Panel - Acme',
webserver: 'nginx/1.24.0',
contentType: 'text/html; charset=utf-8',
statusCode: 200,
contentLength: 23456,
responseBody: '<!DOCTYPE html>...',
tech: ['React', 'Ant Design'],
vhost: false,
subdomain: 'admin.acme.com',
createdAt: '2024-12-28T10:03:00Z',
},
{
id: 5,
target: 2,
url: 'https://acme.io',
host: 'acme.io',
location: '',
title: 'Acme Developer Platform',
webserver: 'cloudflare',
contentType: 'text/html; charset=utf-8',
statusCode: 200,
contentLength: 56789,
responseBody: '<!DOCTYPE html>...',
tech: ['Vue.js', 'Vitepress', 'CloudFlare'],
vhost: false,
subdomain: 'acme.io',
createdAt: '2024-12-27T14:30:00Z',
},
{
id: 6,
target: 2,
url: 'https://docs.acme.io',
host: 'docs.acme.io',
location: '',
title: 'Documentation - Acme.io',
webserver: 'cloudflare',
contentType: 'text/html; charset=utf-8',
statusCode: 200,
contentLength: 67890,
responseBody: '<!DOCTYPE html>...',
tech: ['Vue.js', 'Vitepress'],
vhost: false,
subdomain: 'docs.acme.io',
createdAt: '2024-12-27T14:31:00Z',
},
{
id: 7,
target: 3,
url: 'https://techstart.io',
host: 'techstart.io',
location: '',
title: 'TechStart - Innovation Hub',
webserver: 'Apache/2.4.54',
contentType: 'text/html; charset=utf-8',
statusCode: 200,
contentLength: 34567,
responseBody: '<!DOCTYPE html>...',
tech: ['WordPress', 'PHP', 'MySQL'],
vhost: false,
subdomain: 'techstart.io',
createdAt: '2024-12-26T08:45:00Z',
},
{
id: 8,
target: 4,
url: 'https://globalfinance.com',
host: 'globalfinance.com',
location: '',
title: 'Global Finance - Your Financial Partner',
webserver: 'Microsoft-IIS/10.0',
contentType: 'text/html; charset=utf-8',
statusCode: 200,
contentLength: 56789,
responseBody: '<!DOCTYPE html>...',
tech: ['ASP.NET', 'C#', 'jQuery', 'SQL Server'],
vhost: false,
subdomain: 'globalfinance.com',
createdAt: '2024-12-25T16:20:00Z',
},
{
id: 9,
target: 6,
url: 'https://healthcareplus.com',
host: 'healthcareplus.com',
location: '',
title: 'HealthCare Plus - Digital Health',
webserver: 'nginx/1.24.0',
contentType: 'text/html; charset=utf-8',
statusCode: 200,
contentLength: 45678,
responseBody: '<!DOCTYPE html>...',
tech: ['Angular', 'TypeScript', 'Node.js'],
vhost: false,
subdomain: 'healthcareplus.com',
createdAt: '2024-12-23T11:00:00Z',
},
{
id: 10,
target: 7,
url: 'https://edutech.io',
host: 'edutech.io',
location: '',
title: 'EduTech - Learn Anywhere',
webserver: 'cloudflare',
contentType: 'text/html; charset=utf-8',
statusCode: 200,
contentLength: 67890,
responseBody: '<!DOCTYPE html>...',
tech: ['Vue.js', 'Nuxt.js', 'PostgreSQL'],
vhost: false,
subdomain: 'edutech.io',
createdAt: '2024-12-22T13:30:00Z',
},
{
id: 11,
target: 8,
url: 'https://retailmax.com',
host: 'retailmax.com',
location: '',
title: 'RetailMax - Shop Everything',
webserver: 'nginx/1.22.0',
contentType: 'text/html; charset=utf-8',
statusCode: 200,
contentLength: 89012,
responseBody: '<!DOCTYPE html>...',
tech: ['React', 'Redux', 'Node.js', 'MongoDB'],
vhost: false,
subdomain: 'retailmax.com',
createdAt: '2024-12-21T10:45:00Z',
},
{
id: 12,
target: 10,
url: 'https://cloudnine.host',
host: 'cloudnine.host',
location: '',
title: 'CloudNine Hosting',
webserver: 'LiteSpeed',
contentType: 'text/html; charset=utf-8',
statusCode: 200,
contentLength: 34567,
responseBody: '<!DOCTYPE html>...',
tech: ['PHP', 'Laravel', 'MySQL'],
vhost: false,
subdomain: 'cloudnine.host',
createdAt: '2024-12-19T16:00:00Z',
},
]
export function getMockWebsites(params?: {
page?: number
pageSize?: number
search?: string
targetId?: number
}): WebSiteListResponse {
const page = params?.page || 1
const pageSize = params?.pageSize || 10
const search = params?.search?.toLowerCase() || ''
const targetId = params?.targetId
let filtered = mockWebsites
if (targetId) {
filtered = filtered.filter(w => w.target === targetId)
}
if (search) {
filtered = filtered.filter(
w =>
w.url.toLowerCase().includes(search) ||
w.title.toLowerCase().includes(search) ||
w.host.toLowerCase().includes(search)
)
}
const total = filtered.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = filtered.slice(start, start + pageSize)
return {
results,
total,
page,
pageSize,
totalPages,
}
}
export function getMockWebsiteById(id: number): WebSite | undefined {
return mockWebsites.find(w => w.id === id)
}

View File

@@ -0,0 +1,78 @@
import type { WorkerNode, WorkersResponse } from '@/types/worker.types'
export const mockWorkers: WorkerNode[] = [
{
id: 1,
name: 'local-worker',
ipAddress: '127.0.0.1',
sshPort: 22,
username: 'root',
status: 'online',
isLocal: true,
createdAt: '2024-01-01T00:00:00Z',
updatedAt: '2024-12-29T10:00:00Z',
info: {
cpuPercent: 23.5,
memoryPercent: 45.2,
},
},
{
id: 2,
name: 'worker-01',
ipAddress: '192.168.1.101',
sshPort: 22,
username: 'scanner',
status: 'online',
isLocal: false,
createdAt: '2024-06-15T08:00:00Z',
updatedAt: '2024-12-29T09:30:00Z',
info: {
cpuPercent: 56.8,
memoryPercent: 72.1,
},
},
{
id: 3,
name: 'worker-02',
ipAddress: '192.168.1.102',
sshPort: 22,
username: 'scanner',
status: 'online',
isLocal: false,
createdAt: '2024-07-20T10:00:00Z',
updatedAt: '2024-12-29T09:45:00Z',
info: {
cpuPercent: 34.2,
memoryPercent: 58.9,
},
},
{
id: 4,
name: 'worker-03',
ipAddress: '192.168.1.103',
sshPort: 22,
username: 'scanner',
status: 'offline',
isLocal: false,
createdAt: '2024-08-10T14:00:00Z',
updatedAt: '2024-12-28T16:00:00Z',
},
]
export function getMockWorkers(page = 1, pageSize = 10): WorkersResponse {
const total = mockWorkers.length
const totalPages = Math.ceil(total / pageSize)
const start = (page - 1) * pageSize
const results = mockWorkers.slice(start, start + pageSize)
return {
results,
total,
page,
pageSize,
}
}
export function getMockWorkerById(id: number): WorkerNode | undefined {
return mockWorkers.find(w => w.id === id)
}

107
frontend/mock/index.ts Normal file
View File

@@ -0,0 +1,107 @@
/**
* Mock 数据统一导出
*
* 使用方式:
* import { USE_MOCK, mockData } from '@/mock'
*
* if (USE_MOCK) {
* return mockData.dashboard.assetStatistics
* }
*/
export { USE_MOCK, MOCK_DELAY, mockDelay } from './config'
// Dashboard
export {
mockDashboardStats,
mockAssetStatistics,
mockStatisticsHistory7Days,
mockStatisticsHistory30Days,
getMockStatisticsHistory,
} from './data/dashboard'
// Organizations
export {
mockOrganizations,
getMockOrganizations,
} from './data/organizations'
// Targets
export {
mockTargets,
mockTargetDetails,
getMockTargets,
getMockTargetById,
} from './data/targets'
// Scans
export {
mockScans,
mockScanStatistics,
getMockScans,
getMockScanById,
} from './data/scans'
// Vulnerabilities
export {
mockVulnerabilities,
getMockVulnerabilities,
getMockVulnerabilityById,
} from './data/vulnerabilities'
// Endpoints
export {
mockEndpoints,
getMockEndpoints,
getMockEndpointById,
} from './data/endpoints'
// Websites
export {
mockWebsites,
getMockWebsites,
getMockWebsiteById,
} from './data/websites'
// Subdomains
export {
mockSubdomains,
getMockSubdomains,
getMockSubdomainById,
} from './data/subdomains'
// Auth
export {
mockUser,
mockMeResponse,
mockLoginResponse,
mockLogoutResponse,
} from './data/auth'
// Engines
export {
mockEngines,
getMockEngines,
getMockEngineById,
} from './data/engines'
// Workers
export {
mockWorkers,
getMockWorkers,
getMockWorkerById,
} from './data/workers'
// Notifications
export {
mockNotifications,
getMockNotifications,
getMockUnreadCount,
} from './data/notifications'
// Scheduled Scans
export {
mockScheduledScans,
getMockScheduledScans,
getMockScheduledScanById,
} from './data/scheduled-scans'

View File

@@ -3,9 +3,12 @@ import createNextIntlPlugin from 'next-intl/plugin';
const withNextIntl = createNextIntlPlugin('./i18n/request.ts');
// Check if running on Vercel
const isVercel = process.env.VERCEL === '1';
const nextConfig: NextConfig = {
// Use standalone mode for Docker deployment
output: 'standalone',
// Use standalone mode for Docker deployment (not needed on Vercel)
...(isVercel ? {} : { output: 'standalone' }),
// Disable Next.js automatic add/remove trailing slash behavior
// Let us manually control URL format
skipTrailingSlashRedirect: true,
@@ -17,6 +20,10 @@ const nextConfig: NextConfig = {
allowedDevOrigins: ['192.168.*.*', '10.*.*.*', '172.16.*.*'],
async rewrites() {
// Skip rewrites on Vercel when using mock data
if (isVercel) {
return [];
}
// Use server service name in Docker environment, localhost for local development
const apiHost = process.env.API_HOST || 'localhost';
return [

View File

@@ -4,6 +4,7 @@
"private": true,
"scripts": {
"dev": "next dev --turbopack",
"dev:mock": "NEXT_PUBLIC_USE_MOCK=true next dev --turbopack",
"dev:noauth": "NEXT_PUBLIC_SKIP_AUTH=true next dev --turbopack",
"build": "next build --turbopack",
"start": "next start",
@@ -53,6 +54,7 @@
"cron-parser": "^5.4.0",
"cronstrue": "^3.9.0",
"date-fns": "^4.1.0",
"framer-motion": "^12.23.26",
"geist": "^1.5.1",
"is-ip": "^5.0.1",
"js-yaml": "^4.1.0",

View File

@@ -137,6 +137,9 @@ importers:
date-fns:
specifier: ^4.1.0
version: 4.1.0
framer-motion:
specifier: ^12.23.26
version: 12.23.26(react-dom@19.1.2(react@19.1.2))(react@19.1.2)
geist:
specifier: ^1.5.1
version: 1.5.1(next@15.5.9(react-dom@19.1.2(react@19.1.2))(react@19.1.2))
@@ -2311,6 +2314,20 @@ packages:
resolution: {integrity: sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==}
engines: {node: '>= 6'}
framer-motion@12.23.26:
resolution: {integrity: sha512-cPcIhgR42xBn1Uj+PzOyheMtZ73H927+uWPDVhUMqxy8UHt6Okavb6xIz9J/phFUHUj0OncR6UvMfJTXoc/LKA==}
peerDependencies:
'@emotion/is-prop-valid': '*'
react: ^18.0.0 || ^19.0.0
react-dom: ^18.0.0 || ^19.0.0
peerDependenciesMeta:
'@emotion/is-prop-valid':
optional: true
react:
optional: true
react-dom:
optional: true
function-bind@1.1.2:
resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==}
@@ -2767,6 +2784,12 @@ packages:
monaco-editor@0.55.1:
resolution: {integrity: sha512-jz4x+TJNFHwHtwuV9vA9rMujcZRb0CEilTEwG2rRSpe/A7Jdkuj8xPKttCgOh+v/lkHy7HsZ64oj+q3xoAFl9A==}
motion-dom@12.23.23:
resolution: {integrity: sha512-n5yolOs0TQQBRUFImrRfs/+6X4p3Q4n1dUEqt/H58Vx7OW6RF+foWEgmTVDhIWJIMXOuNNL0apKH2S16en9eiA==}
motion-utils@12.23.6:
resolution: {integrity: sha512-eAWoPgr4eFEOFfg2WjIsMoqJTW6Z8MTUCgn/GZ3VRpClWBdnbjryiA3ZSNLyxCTmCQx4RmYX6jX1iWHbenUPNQ==}
ms@2.1.3:
resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==}
@@ -5577,6 +5600,15 @@ snapshots:
hasown: 2.0.2
mime-types: 2.1.35
framer-motion@12.23.26(react-dom@19.1.2(react@19.1.2))(react@19.1.2):
dependencies:
motion-dom: 12.23.23
motion-utils: 12.23.6
tslib: 2.8.1
optionalDependencies:
react: 19.1.2
react-dom: 19.1.2(react@19.1.2)
function-bind@1.1.2: {}
function-timeout@0.1.1: {}
@@ -5996,6 +6028,12 @@ snapshots:
dompurify: 3.2.7
marked: 14.0.0
motion-dom@12.23.23:
dependencies:
motion-utils: 12.23.6
motion-utils@12.23.6: {}
ms@2.1.3: {}
msw@2.11.6(@types/node@20.19.19)(typescript@5.9.3):

View File

@@ -10,11 +10,16 @@ import type {
ChangePasswordRequest,
ChangePasswordResponse
} from '@/types/auth.types'
import { USE_MOCK, mockDelay, mockLoginResponse, mockLogoutResponse, mockMeResponse } from '@/mock'
/**
* User login
*/
export async function login(data: LoginRequest): Promise<LoginResponse> {
if (USE_MOCK) {
await mockDelay()
return mockLoginResponse
}
const res = await api.post<LoginResponse>('/auth/login/', data)
return res.data
}
@@ -23,6 +28,10 @@ export async function login(data: LoginRequest): Promise<LoginResponse> {
* User logout
*/
export async function logout(): Promise<LogoutResponse> {
if (USE_MOCK) {
await mockDelay()
return mockLogoutResponse
}
const res = await api.post<LogoutResponse>('/auth/logout/')
return res.data
}
@@ -31,6 +40,10 @@ export async function logout(): Promise<LogoutResponse> {
* Get current user information
*/
export async function getMe(): Promise<MeResponse> {
if (USE_MOCK) {
await mockDelay()
return mockMeResponse
}
const res = await api.get<MeResponse>('/auth/me/')
return res.data
}
@@ -39,6 +52,10 @@ export async function getMe(): Promise<MeResponse> {
* Change password
*/
export async function changePassword(data: ChangePasswordRequest): Promise<ChangePasswordResponse> {
if (USE_MOCK) {
await mockDelay()
return { message: 'Password changed successfully' }
}
const res = await api.post<ChangePasswordResponse>('/auth/change-password/', data)
return res.data
}

View File

@@ -1,7 +1,12 @@
import { api } from '@/lib/api-client'
import type { DashboardStats, AssetStatistics, StatisticsHistoryItem } from '@/types/dashboard.types'
import { USE_MOCK, mockDelay, mockDashboardStats, mockAssetStatistics, getMockStatisticsHistory } from '@/mock'
export async function getDashboardStats(): Promise<DashboardStats> {
if (USE_MOCK) {
await mockDelay()
return mockDashboardStats
}
const res = await api.get<DashboardStats>('/dashboard/stats/')
return res.data
}
@@ -10,6 +15,10 @@ export async function getDashboardStats(): Promise<DashboardStats> {
* Get asset statistics data (pre-aggregated)
*/
export async function getAssetStatistics(): Promise<AssetStatistics> {
if (USE_MOCK) {
await mockDelay()
return mockAssetStatistics
}
const res = await api.get<AssetStatistics>('/assets/statistics/')
return res.data
}
@@ -18,6 +27,10 @@ export async function getAssetStatistics(): Promise<AssetStatistics> {
* Get statistics history data (for line charts)
*/
export async function getStatisticsHistory(days: number = 7): Promise<StatisticsHistoryItem[]> {
if (USE_MOCK) {
await mockDelay()
return getMockStatisticsHistory(days)
}
const res = await api.get<StatisticsHistoryItem[]>('/assets/statistics/history/', {
params: { days }
})

View File

@@ -8,6 +8,7 @@ import type {
BatchDeleteEndpointsRequest,
BatchDeleteEndpointsResponse
} from "@/types/endpoint.types"
import { USE_MOCK, mockDelay, getMockEndpoints, getMockEndpointById } from '@/mock'
// Bulk create endpoints response type
export interface BulkCreateEndpointsResponse {
@@ -38,6 +39,12 @@ export class EndpointService {
* @returns Promise<Endpoint>
*/
static async getEndpointById(id: number): Promise<Endpoint> {
if (USE_MOCK) {
await mockDelay()
const endpoint = getMockEndpointById(id)
if (!endpoint) throw new Error('Endpoint not found')
return endpoint
}
const response = await api.get<Endpoint>(`/endpoints/${id}/`)
return response.data
}
@@ -48,6 +55,10 @@ export class EndpointService {
* @returns Promise<GetEndpointsResponse>
*/
static async getEndpoints(params: GetEndpointsRequest): Promise<GetEndpointsResponse> {
if (USE_MOCK) {
await mockDelay()
return getMockEndpoints(params)
}
// api-client.ts automatically converts camelCase params to snake_case
const response = await api.get<GetEndpointsResponse>('/endpoints/', {
params

View File

@@ -1,5 +1,6 @@
import apiClient from '@/lib/api-client'
import type { ScanEngine } from '@/types/engine.types'
import { USE_MOCK, mockDelay, getMockEngines, getMockEngineById } from '@/mock'
/**
* Engine API service
@@ -9,6 +10,10 @@ import type { ScanEngine } from '@/types/engine.types'
* Get engine list
*/
export async function getEngines(): Promise<ScanEngine[]> {
if (USE_MOCK) {
await mockDelay()
return getMockEngines()
}
// Engines are usually not many, get all
const response = await apiClient.get('/engines/', {
params: { pageSize: 1000 }
@@ -21,6 +26,12 @@ export async function getEngines(): Promise<ScanEngine[]> {
* Get engine details
*/
export async function getEngine(id: number): Promise<ScanEngine> {
if (USE_MOCK) {
await mockDelay()
const engine = getMockEngineById(id)
if (!engine) throw new Error('Engine not found')
return engine
}
const response = await apiClient.get(`/engines/${id}/`)
return response.data
}

View File

@@ -9,6 +9,7 @@ import type {
GetNotificationsRequest,
GetNotificationsResponse,
} from '@/types/notification.types'
import { USE_MOCK, mockDelay, getMockNotifications, getMockUnreadCount } from '@/mock'
export class NotificationService {
/**
@@ -18,6 +19,10 @@ export class NotificationService {
static async getNotifications(
params: GetNotificationsRequest = {}
): Promise<GetNotificationsResponse> {
if (USE_MOCK) {
await mockDelay()
return getMockNotifications(params)
}
const response = await api.get<GetNotificationsResponse>('/notifications/', {
params,
})
@@ -29,6 +34,10 @@ export class NotificationService {
* 后端返回: { updated: number }
*/
static async markAllAsRead(): Promise<{ updated: number }> {
if (USE_MOCK) {
await mockDelay()
return { updated: 2 }
}
const response = await api.post<{ updated: number }>('/notifications/mark-all-as-read/')
return response.data
}
@@ -38,6 +47,10 @@ export class NotificationService {
* 后端返回: { count: number }
*/
static async getUnreadCount(): Promise<{ count: number }> {
if (USE_MOCK) {
await mockDelay()
return getMockUnreadCount()
}
const response = await api.get<{ count: number }>('/notifications/unread-count/')
return response.data
}

View File

@@ -1,5 +1,6 @@
import { api } from "@/lib/api-client"
import type { Organization, OrganizationsResponse } from "@/types/organization.types"
import { USE_MOCK, mockDelay, getMockOrganizations, mockOrganizations } from '@/mock'
export class OrganizationService {
@@ -18,6 +19,10 @@ export class OrganizationService {
pageSize?: number
search?: string
}): Promise<OrganizationsResponse<Organization>> {
if (USE_MOCK) {
await mockDelay()
return getMockOrganizations(params)
}
const response = await api.get<OrganizationsResponse<Organization>>(
'/organizations/',
{ params }
@@ -31,6 +36,12 @@ export class OrganizationService {
* @returns Promise<Organization>
*/
static async getOrganizationById(id: string | number): Promise<Organization> {
if (USE_MOCK) {
await mockDelay()
const org = mockOrganizations.find(o => o.id === Number(id))
if (!org) throw new Error('Organization not found')
return org
}
const response = await api.get<Organization>(`/organizations/${id}/`)
return response.data
}

View File

@@ -8,11 +8,16 @@ import type {
QuickScanResponse,
ScanRecord
} from '@/types/scan.types'
import { USE_MOCK, mockDelay, getMockScans, getMockScanById, mockScanStatistics } from '@/mock'
/**
* Get scan list
*/
export async function getScans(params?: GetScansParams): Promise<GetScansResponse> {
if (USE_MOCK) {
await mockDelay()
return getMockScans(params)
}
const res = await api.get<GetScansResponse>('/scans/', { params })
return res.data
}
@@ -23,6 +28,12 @@ export async function getScans(params?: GetScansParams): Promise<GetScansRespons
* @returns Scan details
*/
export async function getScan(id: number): Promise<ScanRecord> {
if (USE_MOCK) {
await mockDelay()
const scan = getMockScanById(id)
if (!scan) throw new Error('Scan not found')
return scan
}
const res = await api.get<ScanRecord>(`/scans/${id}/`)
return res.data
}
@@ -95,6 +106,10 @@ export interface ScanStatistics {
* @returns Statistics data
*/
export async function getScanStatistics(): Promise<ScanStatistics> {
if (USE_MOCK) {
await mockDelay()
return mockScanStatistics
}
const res = await api.get<ScanStatistics>('/scans/statistics/')
return res.data
}

View File

@@ -5,11 +5,16 @@ import type {
CreateScheduledScanRequest,
UpdateScheduledScanRequest
} from '@/types/scheduled-scan.types'
import { USE_MOCK, mockDelay, getMockScheduledScans, getMockScheduledScanById } from '@/mock'
/**
* Get scheduled scan list
*/
export async function getScheduledScans(params?: { page?: number; pageSize?: number; search?: string }): Promise<GetScheduledScansResponse> {
if (USE_MOCK) {
await mockDelay()
return getMockScheduledScans(params)
}
const res = await api.get<GetScheduledScansResponse>('/scheduled-scans/', { params })
return res.data
}
@@ -18,6 +23,12 @@ export async function getScheduledScans(params?: { page?: number; pageSize?: num
* Get scheduled scan details
*/
export async function getScheduledScan(id: number): Promise<ScheduledScan> {
if (USE_MOCK) {
await mockDelay()
const scan = getMockScheduledScanById(id)
if (!scan) throw new Error('Scheduled scan not found')
return scan
}
const res = await api.get<ScheduledScan>(`/scheduled-scans/${id}/`)
return res.data
}

View File

@@ -0,0 +1,75 @@
import { api } from "@/lib/api-client"
import type { SearchParams, SearchResponse, AssetType } from "@/types/search.types"
/**
* 资产搜索 API 服务
*
* 搜索语法:
* - field="value" 模糊匹配ILIKE %value%
* - field=="value" 精确匹配
* - field!="value" 不等于
* - && AND 连接
* - || OR 连接
*
* 支持的资产类型:
* - website: 站点(默认)
* - endpoint: 端点
*
* 示例:
* - host="api" && tech="nginx"
* - tech="vue" || tech="react"
* - status=="200" && host!="test"
*/
export class SearchService {
/**
* 搜索资产
* GET /api/assets/search/
*/
static async search(params: SearchParams): Promise<SearchResponse> {
const queryParams = new URLSearchParams()
if (params.q) queryParams.append('q', params.q)
if (params.asset_type) queryParams.append('asset_type', params.asset_type)
if (params.page) queryParams.append('page', params.page.toString())
if (params.pageSize) queryParams.append('pageSize', params.pageSize.toString())
const response = await api.get<SearchResponse>(
`/assets/search/?${queryParams.toString()}`
)
return response.data
}
/**
* 导出搜索结果为 CSV
* GET /api/assets/search/export/
*/
static async exportCSV(query: string, assetType: AssetType): Promise<void> {
const queryParams = new URLSearchParams()
queryParams.append('q', query)
queryParams.append('asset_type', assetType)
const response = await api.get(
`/assets/search/export/?${queryParams.toString()}`,
{ responseType: 'blob' }
)
// 从响应头获取文件名
const contentDisposition = response.headers?.['content-disposition']
let filename = `search_${assetType}_${new Date().toISOString().slice(0, 10)}.csv`
if (contentDisposition) {
const match = contentDisposition.match(/filename="?([^"]+)"?/)
if (match) filename = match[1]
}
// 创建下载链接
const blob = new Blob([response.data as BlobPart], { type: 'text/csv;charset=utf-8' })
const url = URL.createObjectURL(blob)
const link = document.createElement('a')
link.href = url
link.download = filename
document.body.appendChild(link)
link.click()
document.body.removeChild(link)
URL.revokeObjectURL(url)
}
}

View File

@@ -1,5 +1,6 @@
import { api } from "@/lib/api-client"
import type { Subdomain, GetSubdomainsParams, GetSubdomainsResponse, GetAllSubdomainsParams, GetAllSubdomainsResponse, GetSubdomainByIDResponse, BatchCreateSubdomainsResponse } from "@/types/subdomain.types"
import { USE_MOCK, mockDelay, getMockSubdomains, getMockSubdomainById } from '@/mock'
// Bulk create subdomains response type
export interface BulkCreateSubdomainsResponse {
@@ -48,6 +49,12 @@ export class SubdomainService {
* Get single subdomain details
*/
static async getSubdomainById(id: string | number): Promise<GetSubdomainByIDResponse> {
if (USE_MOCK) {
await mockDelay()
const subdomain = getMockSubdomainById(Number(id))
if (!subdomain) throw new Error('Subdomain not found')
return subdomain
}
const response = await api.get<GetSubdomainByIDResponse>(`/domains/${id}/`)
return response.data
}
@@ -164,6 +171,10 @@ export class SubdomainService {
/** Get all subdomains list (server-side pagination) */
static async getAllSubdomains(params?: GetAllSubdomainsParams): Promise<GetAllSubdomainsResponse> {
if (USE_MOCK) {
await mockDelay()
return getMockSubdomains(params)
}
const response = await api.get<GetAllSubdomainsResponse>('/domains/', {
params: {
page: params?.page || 1,

View File

@@ -12,11 +12,16 @@ import type {
BatchCreateTargetsRequest,
BatchCreateTargetsResponse,
} from '@/types/target.types'
import { USE_MOCK, mockDelay, getMockTargets, getMockTargetById } from '@/mock'
/**
* Get all targets list (paginated)
*/
export async function getTargets(page = 1, pageSize = 10, search?: string): Promise<TargetsResponse> {
if (USE_MOCK) {
await mockDelay()
return getMockTargets({ page, pageSize, search })
}
const response = await api.get<TargetsResponse>('/targets/', {
params: {
page,
@@ -31,6 +36,12 @@ export async function getTargets(page = 1, pageSize = 10, search?: string): Prom
* Get single target details
*/
export async function getTargetById(id: number): Promise<Target> {
if (USE_MOCK) {
await mockDelay()
const target = getMockTargetById(id)
if (!target) throw new Error('Target not found')
return target
}
const response = await api.get<Target>(`/targets/${id}/`)
return response.data
}

View File

@@ -1,5 +1,6 @@
import { api } from "@/lib/api-client"
import type { GetVulnerabilitiesParams } from "@/types/vulnerability.types"
import type { GetVulnerabilitiesParams, Vulnerability } from "@/types/vulnerability.types"
import { USE_MOCK, mockDelay, getMockVulnerabilities } from '@/mock'
export class VulnerabilityService {
/** Get all vulnerabilities list (used by global vulnerabilities page) */
@@ -7,12 +8,22 @@ export class VulnerabilityService {
params: GetVulnerabilitiesParams,
filter?: string,
): Promise<any> {
if (USE_MOCK) {
await mockDelay()
return getMockVulnerabilities(params)
}
const response = await api.get(`/assets/vulnerabilities/`, {
params: { ...params, filter },
})
return response.data
}
/** Get single vulnerability by ID */
static async getVulnerabilityById(id: number): Promise<Vulnerability> {
const response = await api.get<Vulnerability>(`/assets/vulnerabilities/${id}/`)
return response.data
}
/** Get vulnerability snapshot list by scan task (used by scan history page) */
static async getVulnerabilitiesByScanId(
scanId: number,

View File

@@ -9,6 +9,7 @@ import type {
CreateWorkerRequest,
UpdateWorkerRequest,
} from '@/types/worker.types'
import { USE_MOCK, mockDelay, getMockWorkers, getMockWorkerById } from '@/mock'
const BASE_URL = '/workers'
@@ -17,6 +18,10 @@ export const workerService = {
* Get Worker list
*/
async getWorkers(page = 1, pageSize = 10): Promise<WorkersResponse> {
if (USE_MOCK) {
await mockDelay()
return getMockWorkers(page, pageSize)
}
const response = await apiClient.get<WorkersResponse>(
`${BASE_URL}/?page=${page}&page_size=${pageSize}`
)
@@ -27,6 +32,12 @@ export const workerService = {
* Get single Worker details
*/
async getWorker(id: number): Promise<WorkerNode> {
if (USE_MOCK) {
await mockDelay()
const worker = getMockWorkerById(id)
if (!worker) throw new Error('Worker not found')
return worker
}
const response = await apiClient.get<WorkerNode>(`${BASE_URL}/${id}/`)
return response.data
},

View File

@@ -0,0 +1,90 @@
// 资产类型
export type AssetType = 'website' | 'endpoint'
// Website 搜索结果类型
export interface WebsiteSearchResult {
id: number
url: string
host: string
title: string
technologies: string[]
statusCode: number | null
contentLength: number | null
contentType: string
webserver: string
location: string
vhost: boolean | null
responseHeaders: Record<string, string>
responseBody: string
createdAt: string | null
targetId: number
vulnerabilities: Vulnerability[]
}
// Endpoint 搜索结果类型
export interface EndpointSearchResult {
id: number
url: string
host: string
title: string
technologies: string[]
statusCode: number | null
contentLength: number | null
contentType: string
webserver: string
location: string
vhost: boolean | null
responseHeaders: Record<string, string>
responseBody: string
createdAt: string | null
targetId: number
matchedGfPatterns: string[]
}
// 通用搜索结果类型(兼容旧代码)
export type SearchResult = WebsiteSearchResult | EndpointSearchResult
export interface Vulnerability {
id?: number
name: string
severity: 'critical' | 'high' | 'medium' | 'low' | 'info' | 'unknown'
vulnType: string
url?: string
}
// 搜索状态
export type SearchState = 'initial' | 'searching' | 'results'
// 搜索响应类型
export interface SearchResponse {
results: SearchResult[]
total: number
page: number
pageSize: number
totalPages: number
assetType: AssetType
}
// 搜索操作符类型
export type SearchOperator = '=' | '==' | '!='
// 单个搜索条件
export interface SearchCondition {
field: string
operator: SearchOperator
value: string
}
// 搜索表达式(支持 AND/OR 组合)
export interface SearchExpression {
conditions: SearchCondition[] // 同一组内的条件用 AND 连接
orGroups?: SearchExpression[] // 多组之间用 OR 连接
}
// 发送给后端的搜索参数
export interface SearchParams {
q?: string // 完整的搜索表达式字符串
asset_type?: AssetType // 资产类型
page?: number
pageSize?: number
}

10
frontend/vercel.json Normal file
View File

@@ -0,0 +1,10 @@
{
"$schema": "https://openapi.vercel.sh/vercel.json",
"framework": "nextjs",
"buildCommand": "pnpm build",
"installCommand": "pnpm install",
"env": {
"NEXT_PUBLIC_USE_MOCK": "true",
"NEXT_PUBLIC_SKIP_AUTH": "true"
}
}

View File

@@ -42,49 +42,61 @@ CYAN='\033[0;36m'
BOLD='\033[1m'
RESET='\033[0m'
# ==============================================================================
# 额外颜色定义
# ==============================================================================
MAGENTA='\033[0;35m'
DIM='\033[2m'
BG_BLUE='\033[44m'
BG_CYAN='\033[46m'
# ==============================================================================
# 日志函数
# ==============================================================================
info() {
echo -e "${BLUE}[INFO]${RESET} $1"
echo -e " ${CYAN}${RESET} $1"
}
success() {
echo -e "${GREEN}[OK]${RESET} $1"
echo -e " ${GREEN}${RESET} $1"
}
warn() {
echo -e "${YELLOW}[WARN]${RESET} $1"
echo -e " ${YELLOW}${RESET} $1"
}
error() {
echo -e "${RED}[ERROR]${RESET} $1"
echo -e " ${RED}${RESET} $1"
}
step() {
echo -e "\n${BOLD}${CYAN}>>> $1${RESET}"
echo -e "\n${BOLD}${CYAN}┌── $1${RESET}"
}
header() {
echo -e "${BOLD}${BLUE}============================================================${RESET}"
echo -e "${BOLD}${BLUE} $1${RESET}"
echo -e "${BOLD}${BLUE}============================================================${RESET}"
echo -e ""
echo -e "${BOLD}${BLUE}╔══════════════════════════════════════════════════════════╗${RESET}"
echo -e "${BOLD}${BLUE}${RESET} $1"
echo -e "${BOLD}${BLUE}╚══════════════════════════════════════════════════════════╝${RESET}"
}
# ==============================================================================
# 显示横幅
# ==============================================================================
show_banner() {
echo -e "${CYAN}"
cat << 'EOF'
__ __ _ ____ _
\ \/ /(_)_ __ __ _| _ \(_)_ __
\ / | | '_ \ / _` | |_) | | '_ \
/ \ | | | | | (_| | _ <| | | | |
/_/\_\|_|_| |_|\__, |_| \_\_|_| |_|
|___/
EOF
echo -e "${RESET}"
clear
echo -e ""
echo -e "${CYAN}${BOLD} ██╗ ██╗██╗███╗ ██╗ ██████╗ ██████╗ ██╗███╗ ██╗${RESET}"
echo -e "${CYAN} ╚██╗██╔╝██║████╗ ██║██╔════╝ ██╔══██╗██║████╗ ██║${RESET}"
echo -e "${BLUE}${BOLD} ╚███╔╝ ██║██╔██╗ ██║██║ ███╗██████╔╝██║██╔██╗ ██║${RESET}"
echo -e "${BLUE} ██╔██╗ ██║██║╚██╗██║██║ ██║██╔══██╗██║██║╚██╗██║${RESET}"
echo -e "${MAGENTA}${BOLD} ██╔╝ ██╗██║██║ ╚████║╚██████╔╝██║ ██║██║██║ ╚████║${RESET}"
echo -e "${MAGENTA} ╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝${RESET}"
echo -e ""
echo -e "${DIM} ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
echo -e "${BOLD} 🔒 分布式安全扫描平台 │ 一键部署 (Ubuntu)${RESET}"
echo -e "${DIM} ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
echo -e ""
}
# ==============================================================================
@@ -120,7 +132,6 @@ fi
# 显示标题
show_banner
header "XingRin 一键安装脚本 (Ubuntu)"
info "当前用户: ${BOLD}$REAL_USER${RESET}"
info "项目路径: ${BOLD}$ROOT_DIR${RESET}"
info "安装版本: ${BOLD}$APP_VERSION${RESET}"
@@ -272,11 +283,62 @@ get_accelerated_image() {
echo "$image"
}
# 检测远程 PostgreSQL 是否有 pg_ivm 扩展
check_pg_ivm() {
local db_host="$1"
local db_port="$2"
local db_user="$3"
local db_password="$4"
local db_name="$5"
info "检测 pg_ivm 扩展..."
# 尝试创建 pg_ivm 扩展
if docker run --rm \
-e PGPASSWORD="$db_password" \
postgres:15 \
psql "postgresql://$db_user@$db_host:$db_port/$db_name" \
-c "CREATE EXTENSION IF NOT EXISTS pg_ivm;" 2>/dev/null; then
success "pg_ivm 扩展已启用"
return 0
else
echo
error "pg_ivm 扩展未安装或无法启用"
echo
echo -e "${YELLOW}=========================================="
echo -e "pg_ivm 是必需的扩展,用于增量维护物化视图"
echo -e "要求: PostgreSQL 13+ 版本"
echo -e "==========================================${RESET}"
echo
echo -e "请在远程 PostgreSQL 服务器上执行以下命令一键安装:"
echo
echo -e " ${BOLD}curl -sSL https://raw.githubusercontent.com/yyhuni/xingrin/main/docker/scripts/install-pg-ivm.sh | sudo bash${RESET}"
echo
echo -e "安装完成后,请重新运行 install.sh"
echo -e "${YELLOW}==========================================${RESET}"
return 1
fi
}
# 显示安装总结信息
show_summary() {
echo
if [ "$1" == "success" ]; then
header "服务已成功启动!"
if [ "$1" = "success" ]; then
# 成功 Banner
echo -e ""
echo -e "${GREEN}${BOLD} ╔═══════════════════════════════════════════════════╗${RESET}"
echo -e "${GREEN}${BOLD} ║ ║${RESET}"
echo -e "${GREEN}${BOLD} ║ ██████╗ ██████╗ ███╗ ██╗███████╗██║ ║${RESET}"
echo -e "${GREEN}${BOLD} ║ ██╔══██╗██╔═══██╗████╗ ██║██╔════╝██║ ║${RESET}"
echo -e "${GREEN}${BOLD} ║ ██║ ██║██║ ██║██╔██╗ ██║█████╗ ██║ ║${RESET}"
echo -e "${GREEN}${BOLD} ║ ██║ ██║██║ ██║██║╚██╗██║██╔══╝ ╚═╝ ║${RESET}"
echo -e "${GREEN}${BOLD} ║ ██████╔╝╚██████╔╝██║ ╚████║███████╗██║ ║${RESET}"
echo -e "${GREEN}${BOLD} ║ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚══════╝╚═╝ ║${RESET}"
echo -e "${GREEN}${BOLD} ║ ║${RESET}"
echo -e "${GREEN}${BOLD} ║ ✨ XingRin 平台部署成功! ║${RESET}"
echo -e "${GREEN}${BOLD} ║ ║${RESET}"
echo -e "${GREEN}${BOLD} ╚═══════════════════════════════════════════════════╝${RESET}"
echo -e ""
else
header "安装完成 Summary"
fi
@@ -287,12 +349,11 @@ show_summary() {
DB_USER=$(grep "^DB_USER=" "$DOCKER_DIR/.env" | cut -d= -f2)
DB_PASSWORD=$(grep "^DB_PASSWORD=" "$DOCKER_DIR/.env" | cut -d= -f2)
echo -e "${YELLOW}数据库配置:${RESET}"
echo -e "------------------------------------------------------------"
echo -e " 服务器地址: ${DB_HOST:-未知}"
echo -e " 用户名: ${DB_USER:-未知}"
echo -e " 密码: ${DB_PASSWORD:-未知}"
echo -e "------------------------------------------------------------"
echo -e "${DIM} ──────────────────────────────────────────────────────${RESET}"
echo -e " ${YELLOW}🗄 数据库配置${RESET}"
echo -e " ${DIM}├─${RESET} 服务器地址: ${BOLD}${DB_HOST:-未知}${RESET}"
echo -e " ${DIM}├─${RESET} 用户名: ${BOLD}${DB_USER:-未知}${RESET}"
echo -e " ${DIM}└─${RESET} 密码: ${BOLD}${DB_PASSWORD:-未知}${RESET}"
echo
fi
@@ -304,27 +365,33 @@ show_summary() {
ACCESS_HOST="localhost"
fi
echo -e "${GREEN}访问地址:${RESET}"
printf " %-16s %s\n" "XingRin:" "https://${ACCESS_HOST}:8083/"
echo -e "${DIM} ──────────────────────────────────────────────────────${RESET}"
echo -e " ${GREEN}🌐 访问地址${RESET}"
echo -e " ${DIM}└─${RESET} XingRin: ${BOLD}${CYAN}https://${ACCESS_HOST}:8083/${RESET}"
echo
echo -e "${YELLOW}默认登录账号:${RESET}"
printf " %-16s %s\n" "用户名:" "admin"
printf " %-16s %s\n" "密码:" "admin"
echo -e "${YELLOW} [!] 请首次登录后修改密码!${RESET}"
echo -e "${DIM} ──────────────────────────────────────────────────────${RESET}"
echo -e " ${MAGENTA}🔑 默认登录${RESET}"
echo -e " ${DIM}├─${RESET} 用户名: ${BOLD}admin${RESET}"
echo -e " ${DIM}└─${RESET} 密码: ${BOLD}admin${RESET}"
echo -e " ${YELLOW} ⚠ 请首次登录后修改密码!${RESET}"
echo
if [ "$1" != "success" ]; then
echo -e "${GREEN}后续启动命令:${RESET}"
echo -e " ./start.sh # 启动所有服务"
echo -e " ./start.sh --no-frontend # 只启动后端"
echo -e " ./stop.sh # 停止所有服务"
if [ "$1" = "success" ]; then
: # 成功模式,不显示后续命令
else
echo -e "${DIM} ──────────────────────────────────────────────────────${RESET}"
echo -e " ${BLUE}🚀 后续命令${RESET}"
echo -e " ${DIM}├─${RESET} ./start.sh ${DIM}# 启动所有服务${RESET}"
echo -e " ${DIM}├─${RESET} ./start.sh --no-frontend ${DIM}# 只启动后端${RESET}"
echo -e " ${DIM}└─${RESET} ./stop.sh ${DIM}# 停止所有服务${RESET}"
echo
fi
echo -e "${YELLOW}[!] 云服务器某些厂商默认开启了安全策略(阿里云/腾讯云/华为云等):${RESET}"
echo -e " 端口未放行可能导致无法访问或无法扫描强烈推荐用国外vps或者在云控制台放行"
echo -e " ${RESET}8083, 5432"
echo -e "${DIM} ──────────────────────────────────────────────────────${RESET}"
echo -e " ${YELLOW}⚠ 云服务器端口提醒${RESET}"
echo -e " ${DIM}└─${RESET} 某些厂商默认开启安全策略(阿里云/腾讯云/华为云)"
echo -e " 端口未放行可能导致无法访问,请在云控制台放行: ${BOLD}8083, 5432${RESET}"
echo
}
@@ -553,6 +620,11 @@ if [ -f "$DOCKER_DIR/.env.example" ]; then
-c "CREATE DATABASE $prefect_db;" 2>/dev/null || true
success "数据库准备完成"
# 检测 pg_ivm 扩展
if ! check_pg_ivm "$db_host" "$db_port" "$db_user" "$db_password" "$db_name"; then
exit 1
fi
sed_inplace "s/^DB_HOST=.*/DB_HOST=$db_host/" "$DOCKER_DIR/.env"
sed_inplace "s/^DB_PORT=.*/DB_PORT=$db_port/" "$DOCKER_DIR/.env"
sed_inplace "s/^DB_USER=.*/DB_USER=$db_user/" "$DOCKER_DIR/.env"
@@ -672,7 +744,7 @@ fi
# 启动服务
# ==============================================================================
step "正在启动服务..."
"$ROOT_DIR/start.sh" $START_ARGS
"$ROOT_DIR/start.sh" ${START_ARGS} --quiet
# ==============================================================================
# 完成总结