Compare commits

...

10 Commits

Author SHA1 Message Date
yyhuni
1bb6e90c3d fix:github action 2025-12-19 12:14:40 +08:00
yyhuni
9004c77031 新增:负载感知通知 2025-12-19 12:11:20 +08:00
yyhuni
71de0b4b1b FIX: 前端显示 2025-12-19 11:32:02 +08:00
github-actions[bot]
1ef1f9709e chore: bump version to v1.0.14 2025-12-19 03:12:46 +00:00
yyhuni
3323bd2a4f fix:前端构建问题 2025-12-19 10:54:23 +08:00
yyhuni
df602dd1ae 优化:仓库获取失败的提示 2025-12-19 10:40:29 +08:00
yyhuni
372bab5267 fix:重构本地worker判断逻辑,修复数据库ip连接问题 2025-12-19 10:36:09 +08:00
yyhuni
bed80e4ba7 优化:卸载脚本兼容性,防止清理数据库不干净 2025-12-19 10:20:04 +08:00
github-actions[bot]
3b014bd04c chore: bump version to v1.0.13 2025-12-19 00:30:12 +00:00
yyhuni
5e60911cb3 fix:构建问题 2025-12-19 08:12:40 +08:00
21 changed files with 292 additions and 142 deletions

View File

@@ -16,7 +16,7 @@ env:
IMAGE_PREFIX: yyhuni
permissions:
contents: write # 允许修改仓库内容
contents: write
jobs:
build:
@@ -27,18 +27,23 @@ jobs:
- image: xingrin-server
dockerfile: docker/server/Dockerfile
context: .
platforms: linux/amd64,linux/arm64
- image: xingrin-frontend
dockerfile: docker/frontend/Dockerfile
context: .
platforms: linux/amd64 # ARM64 构建时 Next.js 在 QEMU 下会崩溃
- image: xingrin-worker
dockerfile: docker/worker/Dockerfile
context: .
platforms: linux/amd64,linux/arm64
- image: xingrin-nginx
dockerfile: docker/nginx/Dockerfile
context: .
platforms: linux/amd64,linux/arm64
- image: xingrin-agent
dockerfile: docker/agent/Dockerfile
context: .
platforms: linux/amd64,linux/arm64
steps:
- name: Checkout
@@ -48,7 +53,6 @@ jobs:
run: |
echo "=== Before cleanup ==="
df -h
# 删除不需要的大型软件包
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
@@ -95,22 +99,20 @@ jobs:
with:
context: ${{ matrix.context }}
file: ${{ matrix.dockerfile }}
platforms: linux/amd64,linux/arm64
platforms: ${{ matrix.platforms }}
push: true
tags: |
${{ env.IMAGE_PREFIX }}/${{ matrix.image }}:${{ steps.version.outputs.VERSION }}
${{ steps.version.outputs.IS_RELEASE == 'true' && format('{0}/{1}:latest', env.IMAGE_PREFIX, matrix.image) || '' }}
cache-from: |
type=gha
type=registry,ref=${{ env.IMAGE_PREFIX }}/${{ matrix.image }}:buildcache
cache-to: |
type=gha,mode=max
type=registry,ref=${{ env.IMAGE_PREFIX }}/${{ matrix.image }}:buildcache,mode=max
cache-from: type=gha
cache-to: type=gha,mode=max
provenance: false
sbom: false
# 所有镜像构建成功后,更新 VERSION 文件
update-version:
runs-on: ubuntu-latest
needs: build # 等待所有 build job 完成
needs: build
if: startsWith(github.ref, 'refs/tags/v')
steps:
- name: Checkout

View File

@@ -1 +1 @@
v1.0.10
v1.0.14

View File

@@ -30,17 +30,26 @@ def fetch_config_and_setup_django():
print("[ERROR] 缺少 SERVER_URL 环境变量", file=sys.stderr)
sys.exit(1)
config_url = f"{server_url}/api/workers/config/"
# 通过环境变量声明 Worker 身份(本地/远程)
is_local = os.environ.get("IS_LOCAL", "false").lower() == "true"
config_url = f"{server_url}/api/workers/config/?is_local={str(is_local).lower()}"
print(f"[CONFIG] 正在从配置中心获取配置: {config_url}")
print(f"[CONFIG] IS_LOCAL={is_local}")
try:
resp = requests.get(config_url, timeout=10)
resp.raise_for_status()
config = resp.json()
# 数据库配置(必需)
os.environ.setdefault("DB_HOST", config['db']['host'])
os.environ.setdefault("DB_PORT", config['db']['port'])
os.environ.setdefault("DB_NAME", config['db']['name'])
os.environ.setdefault("DB_USER", config['db']['user'])
db_host = config['db']['host']
db_port = config['db']['port']
db_name = config['db']['name']
db_user = config['db']['user']
os.environ.setdefault("DB_HOST", db_host)
os.environ.setdefault("DB_PORT", db_port)
os.environ.setdefault("DB_NAME", db_name)
os.environ.setdefault("DB_USER", db_user)
os.environ.setdefault("DB_PASSWORD", config['db']['password'])
# Redis 配置
@@ -52,7 +61,12 @@ def fetch_config_and_setup_django():
os.environ.setdefault("ENABLE_COMMAND_LOGGING", str(config['logging']['enableCommandLogging']).lower())
os.environ.setdefault("DEBUG", str(config['debug']))
print(f"[CONFIG] 从配置中心获取配置成功: {config_url}")
print(f"[CONFIG] ✓ 配置获取成功")
print(f"[CONFIG] DB_HOST: {db_host}")
print(f"[CONFIG] DB_PORT: {db_port}")
print(f"[CONFIG] DB_NAME: {db_name}")
print(f"[CONFIG] DB_USER: {db_user}")
print(f"[CONFIG] REDIS_URL: {config['redisUrl']}")
except Exception as e:
print(f"[ERROR] 获取配置失败: {config_url} - {e}", file=sys.stderr)

View File

@@ -27,3 +27,10 @@ vulnerabilities_saved = Signal()
# - worker_name: str Worker 名称
# - message: str 失败原因
worker_delete_failed = Signal()
# 所有 Worker 高负载信号
# 参数:
# - worker_name: str 被选中的 Worker 名称
# - cpu: float CPU 使用率
# - mem: float 内存使用率
all_workers_high_load = Signal()

View File

@@ -198,9 +198,27 @@ class NucleiTemplateRepoService:
# 判断是 clone 还是 pull
if git_dir.is_dir():
# 已有仓库,执行 pull
cmd = ["git", "-C", str(local_path), "pull", "--ff-only"]
action = "pull"
# 检查远程地址是否变化
current_remote = subprocess.run(
["git", "-C", str(local_path), "remote", "get-url", "origin"],
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
current_url = current_remote.stdout.strip() if current_remote.returncode == 0 else ""
if current_url != obj.repo_url:
# 远程地址变化,删除旧目录重新 clone
logger.info("nuclei 模板仓库 %s 远程地址变化,重新 clone: %s -> %s", obj.id, current_url, obj.repo_url)
shutil.rmtree(local_path)
local_path.mkdir(parents=True, exist_ok=True)
cmd = ["git", "clone", "--depth", "1", obj.repo_url, str(local_path)]
action = "clone"
else:
# 已有仓库且地址未变,执行 pull
cmd = ["git", "-C", str(local_path), "pull", "--ff-only"]
action = "pull"
else:
# 新仓库,执行 clone
if local_path.exists() and not local_path.is_dir():

View File

@@ -157,7 +157,23 @@ class TaskDistributor:
if not scored_workers:
if high_load_workers:
logger.warning("所有 Worker 高负载,降级选择负载最低的")
scored_workers = high_load_workers
high_load_workers.sort(key=lambda x: x[1])
best_worker, score, cpu, mem = high_load_workers[0]
# 发送高负载通知
from apps.common.signals import all_workers_high_load
all_workers_high_load.send(
sender=self.__class__,
worker_name=best_worker.name,
cpu=cpu,
mem=mem
)
logger.info(
"选择 Worker: %s (CPU: %.1f%%, MEM: %.1f%%, Score: %.1f)",
best_worker.name, cpu, mem, score
)
return best_worker
else:
logger.warning("没有可用的 Worker")
return None
@@ -221,10 +237,13 @@ class TaskDistributor:
host_results_dir = settings.HOST_RESULTS_DIR # /opt/xingrin/results
host_logs_dir = settings.HOST_LOGS_DIR # /opt/xingrin/logs
# 环境变量:只需 SERVER_URL其他配置容器启动时从配置中心获取
# 环境变量SERVER_URL + IS_LOCAL,其他配置容器启动时从配置中心获取
# IS_LOCAL 用于 Worker 向配置中心声明身份,决定返回的数据库地址
# Prefect 本地模式配置:禁用 API server 和事件系统
is_local_str = "true" if worker.is_local else "false"
env_vars = [
f"-e SERVER_URL={shlex.quote(server_url)}",
f"-e IS_LOCAL={is_local_str}",
"-e PREFECT_API_URL=", # 禁用 API server
"-e PREFECT_LOGGING_EXTRA_LOGGERS=", # 禁用 Prefect 的额外内部日志器
]
@@ -407,8 +426,20 @@ class TaskDistributor:
Note:
engine_config 由 Flow 内部通过 scan_id 查询数据库获取
"""
logger.info("="*60)
logger.info("execute_scan_flow 开始")
logger.info(" scan_id: %s", scan_id)
logger.info(" target_name: %s", target_name)
logger.info(" target_id: %s", target_id)
logger.info(" scan_workspace_dir: %s", scan_workspace_dir)
logger.info(" engine_name: %s", engine_name)
logger.info(" docker_image: %s", self.docker_image)
logger.info("="*60)
# 1. 等待提交间隔(后台线程执行,不阻塞 API
logger.info("等待提交间隔...")
self._wait_for_submit_interval()
logger.info("提交间隔等待完成")
# 2. 选择最佳 Worker
worker = self.select_best_worker()

View File

@@ -116,7 +116,7 @@ class NucleiTemplateRepoViewSet(viewsets.ModelViewSet):
return Response({"message": str(exc)}, status=status.HTTP_400_BAD_REQUEST)
except Exception as exc: # noqa: BLE001
logger.error("刷新 Nuclei 模板仓库失败: %s", exc, exc_info=True)
return Response({"message": "刷新仓库失败"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({"message": f"刷新仓库失败: {exc}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({"message": "刷新成功", "result": result}, status=status.HTTP_200_OK)

View File

@@ -177,75 +177,16 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
'created': created
})
def _get_client_ip(self, request) -> str:
"""获取客户端真实 IP"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
return x_forwarded_for.split(',')[0].strip()
return request.META.get('REMOTE_ADDR', '')
def _is_local_request(self, client_ip: str) -> bool:
"""
判断是否为本地请求Docker 网络内部)
本地请求特征:
- 来自 Docker 网络内部172.x.x.x
- 来自 localhost127.0.0.1
"""
if not client_ip:
return True # 无法获取 IP 时默认为本地
# Docker 默认网络段
if client_ip.startswith('172.') or client_ip.startswith('10.'):
return True
# localhost
if client_ip in ('127.0.0.1', '::1', 'localhost'):
return True
return False
@action(detail=False, methods=['get'])
def config(self, request):
"""
获取任务容器配置(配置中心 API
Worker 启动时调用此接口获取完整配置,实现配置中心化管理。
Worker 只需知道 SERVER_URL其他配置由此 API 动态返回
Worker 通过 IS_LOCAL 环境变量声明身份,请求时带上 ?is_local=true/false 参数
┌─────────────────────────────────────────────────────────────┐
配置分发流程 │
├─────────────────────────────────────────────────────────────┤
│ │
│ Worker 启动 │
│ │ │
│ ▼ │
│ GET /api/workers/config/ │
│ │ │
│ ▼ │
│ ┌─────────────────────┐ │
│ │ _get_client_ip() │ ← 获取请求来源 IP │
│ │ (X-Forwarded-For │ (支持 Nginx 代理场景) │
│ │ 或 REMOTE_ADDR) │ │
│ └─────────┬───────────┘ │
│ │ │
│ ▼ │
│ ┌─────────────────────┐ │
│ │ _is_local_request() │ ← 判断是否为 Docker 网络内部请求 │
│ │ 172.x.x.x / 10.x.x.x│ (Docker 默认网段) │
│ │ 127.0.0.1 / ::1 │ (localhost) │
│ └─────────┬───────────┘ │
│ │ │
│ ┌───────┴───────┐ │
│ ▼ ▼ │
│ 本地 Worker 远程 Worker │
│ (Docker内) (公网访问) │
│ │ │ │
│ ▼ ▼ │
│ db: postgres db: PUBLIC_HOST │
│ redis: redis redis: PUBLIC_HOST:6379 │
│ │
└─────────────────────────────────────────────────────────────┘
请求参数:
is_local: true/false - Worker 是否为本地节点Docker 网络内)
返回:
{
@@ -253,19 +194,29 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
"redisUrl": "...",
"paths": {"results": "...", "logs": "..."}
}
配置逻辑:
- 本地 Worker (is_local=true): db_host=postgres, redis=redis:6379
- 远程 Worker (is_local=false): db_host=PUBLIC_HOST, redis=PUBLIC_HOST:6379
"""
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
# 判断请求来源:本地 Worker 还是远程 Worker
# 本地 Worker 在 Docker 网络内,可以直接访问 postgres 服务
# 远程 Worker 需要通过公网 IP 访问
client_ip = self._get_client_ip(request)
is_local_worker = self._is_local_request(client_ip)
# 从请求参数获取 Worker 身份(由 Worker 自己声明)
# 不再依赖 IP 判断,避免不同网络环境下的兼容性问题
is_local_param = request.query_params.get('is_local', '').lower()
is_local_worker = is_local_param == 'true'
# 根据请求来源返回不同的数据库地址
db_host = settings.DATABASES['default']['HOST']
_is_internal_db = db_host in ('postgres', 'localhost', '127.0.0.1')
logger.info(
"Worker 配置请求 - is_local_param: %s, is_local_worker: %s, db_host: %s, is_internal_db: %s",
is_local_param, is_local_worker, db_host, _is_internal_db
)
if _is_internal_db:
# 本地数据库场景
if is_local_worker:
@@ -274,13 +225,18 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
worker_redis_url = 'redis://redis:6379/0'
else:
# 远程 Worker通过公网 IP 访问
worker_db_host = settings.PUBLIC_HOST
worker_redis_url = f'redis://{settings.PUBLIC_HOST}:6379/0'
public_host = settings.PUBLIC_HOST
if public_host in ('server', 'localhost', '127.0.0.1'):
logger.warning("远程 Worker 请求配置,但 PUBLIC_HOST=%s 不是有效的公网地址", public_host)
worker_db_host = public_host
worker_redis_url = f'redis://{public_host}:6379/0'
else:
# 远程数据库场景:所有 Worker 都用 DB_HOST
worker_db_host = db_host
worker_redis_url = getattr(settings, 'WORKER_REDIS_URL', 'redis://redis:6379/0')
logger.info("返回 Worker 配置 - db_host: %s, redis_url: %s", worker_db_host, worker_redis_url)
return Response({
'db': {
'host': worker_db_host,

View File

@@ -6,7 +6,7 @@
import logging
from django.dispatch import receiver
from apps.common.signals import vulnerabilities_saved, worker_delete_failed
from apps.common.signals import vulnerabilities_saved, worker_delete_failed, all_workers_high_load
from apps.scan.notifications import create_notification, NotificationLevel, NotificationCategory
logger = logging.getLogger(__name__)
@@ -80,3 +80,15 @@ def on_worker_delete_failed(sender, worker_name, message, **kwargs):
category=NotificationCategory.SYSTEM
)
logger.warning("Worker 删除失败通知已发送 - worker=%s, message=%s", worker_name, message)
@receiver(all_workers_high_load)
def on_all_workers_high_load(sender, worker_name, cpu, mem, **kwargs):
"""所有 Worker 高负载时的通知处理"""
create_notification(
title="系统负载较高",
message=f"所有节点负载较高,已选择负载最低的节点 {worker_name}CPU: {cpu:.1f}%, 内存: {mem:.1f}%)执行任务,扫描速度可能受影响",
level=NotificationLevel.MEDIUM,
category=NotificationCategory.SYSTEM
)
logger.warning("高负载通知已发送 - worker=%s, cpu=%.1f%%, mem=%.1f%%", worker_name, cpu, mem)

View File

@@ -6,14 +6,32 @@
必须在 Django 导入之前获取配置并设置环境变量。
"""
import argparse
from apps.common.container_bootstrap import fetch_config_and_setup_django
import sys
import os
import traceback
def main():
print("="*60)
print("run_initiate_scan.py 启动")
print(f" Python: {sys.version}")
print(f" CWD: {os.getcwd()}")
print(f" SERVER_URL: {os.environ.get('SERVER_URL', 'NOT SET')}")
print("="*60)
# 1. 从配置中心获取配置并初始化 Django必须在 Django 导入之前)
fetch_config_and_setup_django()
print("[1/4] 从配置中心获取配置...")
try:
from apps.common.container_bootstrap import fetch_config_and_setup_django
fetch_config_and_setup_django()
print("[1/4] ✓ 配置获取成功")
except Exception as e:
print(f"[1/4] ✗ 配置获取失败: {e}")
traceback.print_exc()
sys.exit(1)
# 2. 解析命令行参数
print("[2/4] 解析命令行参数...")
parser = argparse.ArgumentParser(description="执行扫描初始化 Flow")
parser.add_argument("--scan_id", type=int, required=True, help="扫描任务 ID")
parser.add_argument("--target_name", type=str, required=True, help="目标名称")
@@ -23,21 +41,41 @@ def main():
parser.add_argument("--scheduled_scan_name", type=str, default=None, help="定时扫描任务名称(可选)")
args = parser.parse_args()
print(f"[2/4] ✓ 参数解析成功:")
print(f" scan_id: {args.scan_id}")
print(f" target_name: {args.target_name}")
print(f" target_id: {args.target_id}")
print(f" scan_workspace_dir: {args.scan_workspace_dir}")
print(f" engine_name: {args.engine_name}")
print(f" scheduled_scan_name: {args.scheduled_scan_name}")
# 3. 现在可以安全导入 Django 相关模块
from apps.scan.flows.initiate_scan_flow import initiate_scan_flow
print("[3/4] 导入 initiate_scan_flow...")
try:
from apps.scan.flows.initiate_scan_flow import initiate_scan_flow
print("[3/4] ✓ 导入成功")
except Exception as e:
print(f"[3/4] ✗ 导入失败: {e}")
traceback.print_exc()
sys.exit(1)
# 4. 执行 Flow
result = initiate_scan_flow(
scan_id=args.scan_id,
target_name=args.target_name,
target_id=args.target_id,
scan_workspace_dir=args.scan_workspace_dir,
engine_name=args.engine_name,
scheduled_scan_name=args.scheduled_scan_name,
)
print(f"Flow 执行完成: {result}")
print("[4/4] 执行 initiate_scan_flow...")
try:
result = initiate_scan_flow(
scan_id=args.scan_id,
target_name=args.target_name,
target_id=args.target_id,
scan_workspace_dir=args.scan_workspace_dir,
engine_name=args.engine_name,
scheduled_scan_name=args.scheduled_scan_name,
)
print("[4/4] ✓ Flow 执行完成")
print(f"结果: {result}")
except Exception as e:
print(f"[4/4] ✗ Flow 执行失败: {e}")
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":

View File

@@ -266,15 +266,26 @@ class ScanCreationService:
Args:
scan_data: 扫描任务数据列表
"""
logger.info("="*60)
logger.info("开始分发扫描任务到 Workers - 数量: %d", len(scan_data))
logger.info("="*60)
# 后台线程需要新的数据库连接
connection.close()
logger.info("已关闭旧数据库连接,准备获取新连接")
distributor = get_task_distributor()
logger.info("TaskDistributor 初始化完成")
scan_repo = DjangoScanRepository()
logger.info("ScanRepository 初始化完成")
for data in scan_data:
scan_id = data['scan_id']
logger.info("-"*40)
logger.info("准备分发扫描任务 - Scan ID: %s, Target: %s", scan_id, data['target_name'])
try:
logger.info("调用 distributor.execute_scan_flow...")
success, message, container_id, worker_id = distributor.execute_scan_flow(
scan_id=scan_id,
target_name=data['target_name'],
@@ -284,20 +295,29 @@ class ScanCreationService:
scheduled_scan_name=data.get('scheduled_scan_name'),
)
logger.info(
"execute_scan_flow 返回 - success: %s, message: %s, container_id: %s, worker_id: %s",
success, message, container_id, worker_id
)
if success:
if container_id:
scan_repo.append_container_id(scan_id, container_id)
logger.info("已记录 container_id: %s", container_id)
if worker_id:
scan_repo.update_worker(scan_id, worker_id)
logger.info("已记录 worker_id: %s", worker_id)
logger.info(
"✓ 扫描任务已提交 - Scan ID: %s, Worker: %s",
scan_id, worker_id
)
else:
logger.error("execute_scan_flow 返回失败 - message: %s", message)
raise Exception(message)
except Exception as e:
logger.error("提交扫描任务失败 - Scan ID: %s, 错误: %s", scan_id, e)
logger.exception("详细堆栈:")
try:
scan_repo.update_status(
scan_id,

View File

@@ -157,7 +157,7 @@ class ScanService:
"""取消所有正在运行的阶段(委托给 ScanStateService"""
return self.state_service.cancel_running_stages(scan_id, final_status)
# todo:待接入
# TODO:待接入
def add_command_to_scan(self, scan_id: int, stage_name: str, tool_name: str, command: str) -> bool:
"""
增量添加命令到指定扫描阶段

View File

@@ -79,20 +79,20 @@ ENV GOPATH=/root/go
ENV PATH=/usr/local/go/bin:$PATH:$GOPATH/bin
ENV GOPROXY=https://goproxy.cn,direct
# 5. 安装 uv Python 包管理器)
RUN pip install uv --break-system-packages
# 安装 Python 依赖(使用 uv 并行下载)
COPY backend/requirements.txt .
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements.txt --break-system-packages && \
rm -f /usr/local/lib/python3.*/dist-packages/argparse.py && \
rm -rf /usr/local/lib/python3.*/dist-packages/__pycache__/argparse*
COPY --from=go-builder /usr/local/go /usr/local/go
COPY --from=go-builder /go/bin/* /usr/local/bin/
COPY --from=go-builder /usr/local/bin/massdns /usr/local/bin/massdns
# 5. 安装 uv Python 包管理器)并安装 Python 依赖
COPY backend/requirements.txt .
RUN pip install uv --break-system-packages && \
uv pip install --system -r requirements.txt --break-system-packages && \
rm -f /usr/local/lib/python3.*/dist-packages/argparse.py && \
rm -rf /usr/local/lib/python3.*/dist-packages/__pycache__/argparse* && \
rm -rf /root/.cache/uv && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# 6. 复制后端代码
COPY backend /app/backend
ENV PYTHONPATH=/app/backend

View File

@@ -231,7 +231,7 @@ export const createScheduledScanColumns = ({
// Cron 表达式列
{
accessorKey: "cronExpression",
header: "调度时间",
header: "Cron Expression",
cell: ({ row }) => {
const cron = row.original.cronExpression
return (
@@ -251,7 +251,7 @@ export const createScheduledScanColumns = ({
// 目标列(根据 scanMode 显示组织或目标)
{
accessorKey: "scanMode",
header: "目标",
header: "Target",
cell: ({ row }) => {
const scanMode = row.original.scanMode
const organizationName = row.original.organizationName

View File

@@ -9,7 +9,7 @@ import { useSystemLogs } from "@/hooks/use-system-logs"
export function SystemLogsView() {
const { theme } = useTheme()
const { data } = useSystemLogs({ lines: 200 })
const { data } = useSystemLogs({ lines: 500 })
const content = useMemo(() => data?.content ?? "", [data?.content])

View File

@@ -81,7 +81,7 @@ export function createVulnerabilityColumns({
},
{
accessorKey: "vulnType",
header: "类型",
header: "Vuln Type",
cell: ({ row }) => {
const vulnType = row.getValue("vulnType") as string
const vulnerability = row.original
@@ -143,7 +143,7 @@ export function createVulnerabilityColumns({
},
{
accessorKey: "discoveredAt",
header: "发现时间",
header: "Discovered At",
cell: ({ row }) => {
const discoveredAt = row.getValue("discoveredAt") as string
return (

View File

@@ -62,7 +62,7 @@ export function useUpdateNucleiRepo() {
mutationFn: (data: {
id: number
repoUrl?: string
}) => nucleiRepoApi.updateRepo(data.id, data),
}) => nucleiRepoApi.updateRepo(data.id, { repoUrl: data.repoUrl }),
onSuccess: (_data, variables) => {
toast.success("仓库配置已更新")
queryClient.invalidateQueries({ queryKey: ["nuclei-repos"] })

View File

@@ -75,9 +75,9 @@ export const nucleiRepoApi = {
return response.data
},
/** 更新仓库 */
/** 更新仓库(部分更新) */
updateRepo: async (repoId: number, payload: UpdateRepoPayload): Promise<NucleiRepoResponse> => {
const response = await api.put<NucleiRepoResponse>(`${BASE_URL}${repoId}/`, payload)
const response = await api.patch<NucleiRepoResponse>(`${BASE_URL}${repoId}/`, payload)
return response.data
},

View File

@@ -75,7 +75,12 @@ fi
# 获取真实用户(通过 sudo 运行时 $SUDO_USER 是真实用户)
REAL_USER="${SUDO_USER:-$USER}"
REAL_HOME=$(getent passwd "$REAL_USER" | cut -d: -f6)
# macOS 没有 getent使用 dscl 或 ~$USER 替代
if command -v getent &>/dev/null; then
REAL_HOME=$(getent passwd "$REAL_USER" | cut -d: -f6)
else
REAL_HOME=$(eval echo "~$REAL_USER")
fi
# 项目根目录
ROOT_DIR="$(cd "$(dirname "$0")" && pwd)"
@@ -110,13 +115,22 @@ generate_random_string() {
fi
}
# 跨平台 sed -i兼容 macOS 和 Linux
sed_inplace() {
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' "$@"
else
sed -i "$@"
fi
}
# 更新 .env 文件中的某个键
update_env_var() {
local file="$1"
local key="$2"
local value="$3"
if grep -q "^$key=" "$file"; then
sed -i -e "s|^$key=.*|$key=$value|" "$file"
sed_inplace "s|^$key=.*|$key=$value|" "$file"
else
echo "$key=$value" >> "$file"
fi
@@ -357,10 +371,10 @@ if [ -f "$DOCKER_DIR/.env.example" ]; then
-c "CREATE DATABASE $prefect_db;" 2>/dev/null || true
success "数据库准备完成"
sed -i "s/^DB_HOST=.*/DB_HOST=$db_host/" "$DOCKER_DIR/.env"
sed -i "s/^DB_PORT=.*/DB_PORT=$db_port/" "$DOCKER_DIR/.env"
sed -i "s/^DB_USER=.*/DB_USER=$db_user/" "$DOCKER_DIR/.env"
sed -i "s/^DB_PASSWORD=.*/DB_PASSWORD=$db_password/" "$DOCKER_DIR/.env"
sed_inplace "s/^DB_HOST=.*/DB_HOST=$db_host/" "$DOCKER_DIR/.env"
sed_inplace "s/^DB_PORT=.*/DB_PORT=$db_port/" "$DOCKER_DIR/.env"
sed_inplace "s/^DB_USER=.*/DB_USER=$db_user/" "$DOCKER_DIR/.env"
sed_inplace "s/^DB_PASSWORD=.*/DB_PASSWORD=$db_password/" "$DOCKER_DIR/.env"
success "已配置远程数据库: $db_user@$db_host:$db_port"
else
info "使用本地 PostgreSQL 容器"

View File

@@ -80,12 +80,12 @@ if [[ $ans_stop =~ ^[Yy]$ ]]; then
# 先强制停止并删除可能占用网络的容器xingrin-agent 等)
docker rm -f xingrin-agent xingrin-watchdog 2>/dev/null || true
# 停止两种模式的容器
# 停止两种模式的容器(不带 -vvolume 在第 5 步单独处理)
[ -f "docker-compose.yml" ] && ${COMPOSE_CMD} -f docker-compose.yml down 2>/dev/null || true
[ -f "docker-compose.dev.yml" ] && ${COMPOSE_CMD} -f docker-compose.dev.yml down 2>/dev/null || true
# 手动删除网络(以防 compose 未能删除)
docker network rm xingrin_network 2>/dev/null || true
docker network rm xingrin_network docker_default 2>/dev/null || true
success "容器和网络已停止/删除(如存在)。"
else
@@ -156,19 +156,28 @@ ans_db=${ans_db:-Y}
if [[ $ans_db =~ ^[Yy]$ ]]; then
info "尝试删除与 XingRin 相关的 Postgres 容器和数据卷..."
# docker-compose 项目名为 docker常见资源名如下忽略不存在的情况
# - 容器: docker-postgres-1
# - 数据卷: docker_postgres_data对应 compose 中的 postgres_data 卷)
docker rm -f docker-postgres-1 2>/dev/null || true
docker volume rm docker_postgres_data 2>/dev/null || true
success "本地 Postgres 容器及数据卷已尝试删除(不存在会自动忽略)。"
# 删除可能的容器名(不同 compose 版本命名不同)
docker rm -f docker-postgres-1 xingrin-postgres postgres 2>/dev/null || true
# 删除可能的 volume 名(取决于项目名和 compose 配置)
# 先列出要删除的 volume
for vol in postgres_data docker_postgres_data xingrin_postgres_data; do
if docker volume inspect "$vol" >/dev/null 2>&1; then
if docker volume rm "$vol" 2>/dev/null; then
success "已删除 volume: $vol"
else
warn "无法删除 volume: $vol(可能正在被使用,请先停止所有容器)"
fi
fi
done
success "本地 Postgres 数据卷清理完成。"
else
warn "已保留本地 Postgres 容器和 volume。"
fi
step "[6/6] 是否删除与 XingRin 相关的 Docker 镜像?(y/N)"
step "[6/6] 是否删除与 XingRin 相关的 Docker 镜像?(Y/n)"
read -r ans_images
ans_images=${ans_images:-N}
ans_images=${ans_images:-Y}
if [[ $ans_images =~ ^[Yy]$ ]]; then
info "正在删除 Docker 镜像..."
@@ -199,9 +208,29 @@ if [[ $ans_images =~ ^[Yy]$ ]]; then
fi
docker rmi redis:7-alpine 2>/dev/null || true
# 删除本地构建的开发镜像
docker rmi docker-server docker-frontend docker-nginx docker-agent docker-worker 2>/dev/null || true
docker rmi "docker-worker:${IMAGE_TAG}-dev" 2>/dev/null || true
success "Docker 镜像已删除(如存在)。"
else
warn "已保留 Docker 镜像。"
fi
# 清理构建缓存(可选,会导致下次构建变慢)
echo ""
echo -n -e "${BOLD}${CYAN}[?] 是否清理 Docker 构建缓存?(y/N) ${RESET}"
echo -e "${YELLOW}(清理后下次构建会很慢,一般不需要)${RESET}"
read -r ans_cache
ans_cache=${ans_cache:-N}
if [[ $ans_cache =~ ^[Yy]$ ]]; then
info "清理 Docker 构建缓存..."
docker builder prune -af 2>/dev/null || true
success "构建缓存已清理。"
else
warn "已保留构建缓存(推荐)。"
fi
success "卸载流程已完成。"

View File

@@ -18,6 +18,15 @@
cd "$(dirname "$0")"
# 跨平台 sed -i兼容 macOS 和 Linux
sed_inplace() {
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' "$@"
else
sed -i "$@"
fi
}
# 解析参数判断模式
DEV_MODE=false
for arg in "$@"; do
@@ -92,7 +101,7 @@ if [ -f "VERSION" ]; then
if [ -n "$NEW_VERSION" ]; then
# 更新 .env 中的 IMAGE_TAG所有节点将使用此版本的镜像
if grep -q "^IMAGE_TAG=" "docker/.env"; then
sed -i "s/^IMAGE_TAG=.*/IMAGE_TAG=$NEW_VERSION/" "docker/.env"
sed_inplace "s/^IMAGE_TAG=.*/IMAGE_TAG=$NEW_VERSION/" "docker/.env"
echo -e " ${GREEN}+${NC} 版本同步: IMAGE_TAG=$NEW_VERSION"
else
echo "IMAGE_TAG=$NEW_VERSION" >> "docker/.env"