Compare commits

...

10 Commits

Author SHA1 Message Date
yyhuni
4bab76f233 fix:组织删除问题 2025-12-31 17:50:37 +08:00
yyhuni
09416b4615 fix:redis端口 2025-12-31 17:45:25 +08:00
github-actions[bot]
bc1c5f6b0e chore: bump version to v1.2.7-dev 2025-12-31 06:16:42 +00:00
github-actions[bot]
2f2742e6fe chore: bump version to v1.2.6-dev 2025-12-31 05:29:36 +00:00
yyhuni
be3c346a74 增加搜索字段 2025-12-31 12:40:21 +08:00
yyhuni
0c7a6fff12 增加tech字段的搜索 2025-12-31 12:37:02 +08:00
yyhuni
3b4f0e3147 fix:指纹识别 2025-12-31 12:30:31 +08:00
yyhuni
51212a2a0c fix:指纹识别 2025-12-31 12:17:23 +08:00
yyhuni
58533bbaf6 fix:docker api 2025-12-31 12:03:08 +08:00
github-actions[bot]
6ccca1602d chore: bump version to v1.2.5-dev 2025-12-31 03:48:32 +00:00
20 changed files with 38 additions and 62 deletions

View File

@@ -1 +1 @@
v1.2.2-dev
v1.2.7-dev

View File

@@ -28,6 +28,7 @@ class EndpointService:
'host': 'host',
'title': 'title',
'status': 'status_code',
'tech': 'tech',
}
def __init__(self):
@@ -115,7 +116,7 @@ class EndpointService:
"""获取目标下的所有端点"""
queryset = self.repo.get_by_target(target_id)
if filter_query:
queryset = apply_filters(queryset, filter_query, self.FILTER_FIELD_MAPPING)
queryset = apply_filters(queryset, filter_query, self.FILTER_FIELD_MAPPING, json_array_fields=['tech'])
return queryset
def count_endpoints_by_target(self, target_id: int) -> int:
@@ -134,7 +135,7 @@ class EndpointService:
"""获取所有端点(全局查询)"""
queryset = self.repo.get_all()
if filter_query:
queryset = apply_filters(queryset, filter_query, self.FILTER_FIELD_MAPPING)
queryset = apply_filters(queryset, filter_query, self.FILTER_FIELD_MAPPING, json_array_fields=['tech'])
return queryset
def iter_endpoint_urls_by_target(self, target_id: int, chunk_size: int = 1000) -> Iterator[str]:

View File

@@ -20,6 +20,7 @@ class WebSiteService:
'host': 'host',
'title': 'title',
'status': 'status_code',
'tech': 'tech',
}
def __init__(self, repository=None):
@@ -107,14 +108,14 @@ class WebSiteService:
"""获取目标下的所有网站"""
queryset = self.repo.get_by_target(target_id)
if filter_query:
queryset = apply_filters(queryset, filter_query, self.FILTER_FIELD_MAPPING)
queryset = apply_filters(queryset, filter_query, self.FILTER_FIELD_MAPPING, json_array_fields=['tech'])
return queryset
def get_all(self, filter_query: Optional[str] = None):
"""获取所有网站"""
queryset = self.repo.get_all()
if filter_query:
queryset = apply_filters(queryset, filter_query, self.FILTER_FIELD_MAPPING)
queryset = apply_filters(queryset, filter_query, self.FILTER_FIELD_MAPPING, json_array_fields=['tech'])
return queryset
def get_by_url(self, url: str, target_id: int) -> int:

View File

@@ -274,6 +274,7 @@ class WebSiteViewSet(viewsets.ModelViewSet):
- host="example" 主机名模糊匹配
- title="login" 标题模糊匹配
- status="200,301" 状态码多值匹配
- tech="nginx" 技术栈匹配(数组字段)
- 多条件空格分隔 AND 关系
"""
@@ -534,6 +535,7 @@ class EndpointViewSet(viewsets.ModelViewSet):
- host="example" 主机名模糊匹配
- title="login" 标题模糊匹配
- status="200,301" 状态码多值匹配
- tech="nginx" 技术栈匹配(数组字段)
- 多条件空格分隔 AND 关系
"""

View File

@@ -57,9 +57,6 @@ def fetch_config_and_setup_django():
os.environ.setdefault("DB_USER", db_user)
os.environ.setdefault("DB_PASSWORD", config['db']['password'])
# Redis 配置
os.environ.setdefault("REDIS_URL", config['redisUrl'])
# 日志配置
os.environ.setdefault("LOG_DIR", config['paths']['logs'])
os.environ.setdefault("LOG_LEVEL", config['logging']['level'])
@@ -71,7 +68,6 @@ def fetch_config_and_setup_django():
print(f"[CONFIG] DB_PORT: {db_port}")
print(f"[CONFIG] DB_NAME: {db_name}")
print(f"[CONFIG] DB_USER: {db_user}")
print(f"[CONFIG] REDIS_URL: {config['redisUrl']}")
except Exception as e:
print(f"[ERROR] 获取配置失败: {config_url} - {e}", file=sys.stderr)

View File

@@ -90,6 +90,7 @@ class Command(BaseCommand):
single_config,
sort_keys=False,
allow_unicode=True,
default_flow_style=None,
)
except yaml.YAMLError as e:
self.stdout.write(self.style.ERROR(f'生成子引擎 {scan_type} 配置失败: {e}'))

View File

@@ -264,10 +264,6 @@ class TaskDistributor:
"""
import shlex
# Docker API 版本配置(避免版本不兼容问题)
# 默认使用 1.40 以获得最大兼容性(支持 Docker 19.03+
api_version = getattr(settings, 'DOCKER_API_VERSION', '1.40')
# 根据 Worker 类型确定网络和 Server 地址
if worker.is_local:
# 本地:加入 Docker 网络,使用内部服务名
@@ -315,9 +311,7 @@ class TaskDistributor:
# - 本地 Workerinstall.sh 已预拉取镜像,直接使用本地版本
# - 远程 Workerdeploy 时已预拉取镜像,直接使用本地版本
# - 避免每次任务都检查 Docker Hub提升性能和稳定性
# 使用双引号包裹 sh -c 命令,内部 shlex.quote 生成的单引号参数可正确解析
# DOCKER_API_VERSION 环境变量确保客户端和服务端 API 版本兼容
cmd = f'''DOCKER_API_VERSION={api_version} docker run --rm -d --pull=missing {network_arg} \\
cmd = f'''docker run --rm -d --pull=missing {network_arg} \\
{' '.join(env_vars)} \\
{' '.join(volumes)} \\
{self.docker_image} \\

View File

@@ -340,13 +340,12 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
返回:
{
"db": {"host": "...", "port": "...", ...},
"redisUrl": "...",
"paths": {"results": "...", "logs": "..."}
}
配置逻辑:
- 本地 Worker (is_local=true): db_host=postgres, redis=redis:6379
- 远程 Worker (is_local=false): db_host=PUBLIC_HOST, redis=PUBLIC_HOST:6379
- 本地 Worker (is_local=true): db_host=postgres
- 远程 Worker (is_local=false): db_host=PUBLIC_HOST
"""
from django.conf import settings
import logging
@@ -371,20 +370,17 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
if is_local_worker:
# 本地 Worker直接用 Docker 内部服务名
worker_db_host = 'postgres'
worker_redis_url = 'redis://redis:6379/0'
else:
# 远程 Worker通过公网 IP 访问
public_host = settings.PUBLIC_HOST
if public_host in ('server', 'localhost', '127.0.0.1'):
logger.warning("远程 Worker 请求配置,但 PUBLIC_HOST=%s 不是有效的公网地址", public_host)
worker_db_host = public_host
worker_redis_url = f'redis://{public_host}:6379/0'
else:
# 远程数据库场景:所有 Worker 都用 DB_HOST
worker_db_host = db_host
worker_redis_url = getattr(settings, 'WORKER_REDIS_URL', 'redis://redis:6379/0')
logger.info("返回 Worker 配置 - db_host: %s, redis_url: %s", worker_db_host, worker_redis_url)
logger.info("返回 Worker 配置 - db_host: %s", worker_db_host)
return success_response(
data={
@@ -395,7 +391,6 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
'user': settings.DATABASES['default']['USER'],
'password': settings.DATABASES['default']['PASSWORD'],
},
'redisUrl': worker_redis_url,
'paths': {
'results': getattr(settings, 'CONTAINER_RESULTS_MOUNT', '/opt/xingrin/results'),
'logs': getattr(settings, 'CONTAINER_LOGS_MOUNT', '/opt/xingrin/logs'),

View File

@@ -87,7 +87,7 @@ fingerprint_detect:
tools:
xingfinger:
enabled: true
fingerprint-libs: [ehole, goby, wappalyzer] # 启用的指纹库ehole, goby, wappalyzer, fingers, fingerprinthub, arl
fingerprint-libs: [ehole, goby, wappalyzer, fingers, fingerprinthub, arl] # 全部指纹库
# ==================== 目录扫描 ====================
directory_scan:

View File

@@ -113,9 +113,10 @@ def bulk_merge_tech_field(
host = parsed.hostname or ''
# 插入新记录(带冲突处理)
# 显式传入所有 NOT NULL 字段的默认值
insert_sql = f"""
INSERT INTO {table_name} (target_id, url, host, tech, created_at)
VALUES (%s, %s, %s, %s::varchar[], NOW())
INSERT INTO {table_name} (target_id, url, host, location, title, webserver, body_preview, content_type, tech, created_at)
VALUES (%s, %s, %s, '', '', '', '', '', %s::varchar[], NOW())
ON CONFLICT (target_id, url) DO UPDATE SET
tech = (
SELECT ARRAY(SELECT DISTINCT unnest(

View File

@@ -345,12 +345,6 @@ TASK_SUBMIT_INTERVAL = int(os.getenv('TASK_SUBMIT_INTERVAL', '6'))
# 本地 Worker Docker 网络名称(与 docker-compose.yml 中定义的一致)
DOCKER_NETWORK_NAME = os.getenv('DOCKER_NETWORK_NAME', 'xingrin_network')
# Docker API 版本配置(防止客户端与服务端版本不匹配)
# API 1.40 支持 Docker 19.03+ (2019年至今),具有最大兼容性
# 如果所有 worker 节点都是 Docker 20.10+,可设置为 1.41
# 查看 worker 节点的 API 版本ssh user@worker "docker version --format '{{.Server.APIVersion}}'"
DOCKER_API_VERSION = os.getenv('DOCKER_API_VERSION', '1.40')
# 宿主机挂载源路径(所有节点统一使用固定路径)
# 部署前需创建mkdir -p /opt/xingrin
HOST_RESULTS_DIR = '/opt/xingrin/results'
@@ -361,25 +355,16 @@ HOST_WORDLISTS_DIR = '/opt/xingrin/wordlists'
# ============================================
# Worker 配置中心(任务容器从 /api/workers/config/ 获取)
# ============================================
# Worker 数据库/Redis 地址由 worker_views.py 的 config API 动态返回
# Worker 数据库地址由 worker_views.py 的 config API 动态返回
# 根据请求来源(本地/远程)返回不同的配置:
# - 本地 WorkerDocker 网络内):使用内部服务名postgres, redis
# - 本地 WorkerDocker 网络内):使用内部服务名 postgres
# - 远程 Worker公网访问使用 PUBLIC_HOST
#
# 以下变量仅作为备用/兼容配置,实际配置由 API 动态生成
# 注意Redis 仅在 Server 容器内使用Worker 不需要直接连接 Redis
_db_host = DATABASES['default']['HOST']
_is_internal_db = _db_host in ('postgres', 'localhost', '127.0.0.1')
WORKER_DB_HOST = os.getenv('WORKER_DB_HOST', _db_host)
# 远程 Worker 访问 Redis 的地址(自动推导)
# - 如果 PUBLIC_HOST 是外部 IP → 使用 PUBLIC_HOST
# - 如果 PUBLIC_HOST 是 Docker 内部名 → 使用 redis本地部署
_is_internal_public = PUBLIC_HOST in ('server', 'localhost', '127.0.0.1')
WORKER_REDIS_URL = os.getenv(
'WORKER_REDIS_URL',
'redis://redis:6379/0' if _is_internal_public else f'redis://{PUBLIC_HOST}:6379/0'
)
# 容器内挂载目标路径(统一使用 /opt/xingrin
CONTAINER_RESULTS_MOUNT = '/opt/xingrin/results'
CONTAINER_LOGS_MOUNT = '/opt/xingrin/logs'

View File

@@ -9,9 +9,8 @@ DB_USER=postgres
DB_PASSWORD=123.com
# ==================== Redis 配置 ====================
# 在 Docker 网络中Redis 服务名称为 redis
# Redis 仅在 Docker 内部网络使用,不暴露公网端口
REDIS_HOST=redis
REDIS_PORT=6379
REDIS_DB=0
# ==================== 服务端口配置 ====================

View File

@@ -24,8 +24,6 @@ services:
redis:
image: redis:7-alpine
restart: always
ports:
- "${REDIS_PORT}:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s

View File

@@ -30,8 +30,6 @@ services:
redis:
image: redis:7-alpine
restart: always
ports:
- "${REDIS_PORT}:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s

View File

@@ -13,9 +13,9 @@
- **权限**: sudo 管理员权限
- **端口要求**: 需要开放以下端口
- `8083` - HTTPS 访问(主要访问端口)
- `5432` - PostgreSQL 数据库(如使用本地数据库)
- `6379` - Redis 缓存服务
- `5432` - PostgreSQL 数据库(如使用本地数据库且有远程 Worker
- 后端 API 仅容器内监听 8888由 nginx 反代到 8083对公网无需放行 8888
- Redis 仅在 Docker 内部网络使用,无需对外开放
## 一键安装
@@ -60,8 +60,7 @@ sudo ./install.sh --no-frontend
#### 必须放行的端口
```
8083 - HTTPS 访问(主要访问端口)
5432 - PostgreSQL如使用本地数据库
6379 - Redis 缓存
5432 - PostgreSQL如使用本地数据库且有远程 Worker
```
#### 推荐方案

View File

@@ -14,6 +14,7 @@ const ENDPOINT_FILTER_FIELDS: FilterField[] = [
{ key: "host", label: "Host", description: "Hostname" },
{ key: "title", label: "Title", description: "Page title" },
{ key: "status", label: "Status", description: "HTTP status code" },
{ key: "tech", label: "Tech", description: "Technologies" },
]
// Endpoint page filter examples
@@ -21,6 +22,7 @@ const ENDPOINT_FILTER_EXAMPLES = [
'url="/api/*" && status="200"',
'host="api.example.com" || host="admin.example.com"',
'title="Dashboard" && status!="404"',
'tech="php" || tech="wordpress"',
]
interface EndpointsDataTableProps<TData extends { id: number | string }, TValue> {

View File

@@ -15,6 +15,7 @@ const WEBSITE_FILTER_FIELDS: FilterField[] = [
{ key: "host", label: "Host", description: "Hostname" },
{ key: "title", label: "Title", description: "Page title" },
{ key: "status", label: "Status", description: "HTTP status code" },
{ key: "tech", label: "Tech", description: "Technologies" },
]
// Website page filter examples
@@ -22,6 +23,7 @@ const WEBSITE_FILTER_EXAMPLES = [
'host="api.example.com" && status="200"',
'title="Login" || title="Admin"',
'url="/api/*" && status!="404"',
'tech="nginx" || tech="apache"',
]
interface WebSitesDataTableProps {

View File

@@ -244,8 +244,8 @@ export function useBatchDeleteOrganizations() {
},
onSuccess: (response) => {
toastMessages.dismiss('batch-delete')
const { deletedOrganizationCount } = response
toastMessages.success('toast.organization.delete.bulkSuccess', { count: deletedOrganizationCount })
const { deletedCount } = response
toastMessages.success('toast.organization.delete.bulkSuccess', { count: deletedCount })
},
onError: (error: any, deletedIds, context) => {
toastMessages.dismiss('batch-delete')

View File

@@ -127,13 +127,15 @@ export class OrganizationService {
*/
static async batchDeleteOrganizations(organizationIds: number[]): Promise<{
message: string
deletedOrganizationCount: number
deletedCount: number
deletedOrganizations: string[]
}> {
const response = await api.post<{
message: string
deletedOrganizationCount: number
}>('/organizations/batch_delete/', {
organizationIds // [OK] Use camelCase, interceptor will automatically convert to organization_ids
deletedCount: number
deletedOrganizations: string[]
}>('/organizations/bulk-delete/', {
ids: organizationIds // Backend expects 'ids' parameter
})
return response.data
}

View File

@@ -322,7 +322,7 @@ show_summary() {
echo -e "${YELLOW}[!] 云服务器某些厂商默认开启了安全策略(阿里云/腾讯云/华为云等):${RESET}"
echo -e " 端口未放行可能导致无法访问或无法扫描强烈推荐用国外vps或者在云控制台放行"
echo -e " ${RESET}8083, 5432, 6379"
echo -e " ${RESET}8083, 5432"
echo
}