mirror of
https://github.com/yyhuni/xingrin.git
synced 2026-01-31 19:53:11 +08:00
Compare commits
83 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
25074f9191 | ||
|
|
b06f33db5b | ||
|
|
a116755087 | ||
|
|
cddc4c244d | ||
|
|
9e7089a8c2 | ||
|
|
6f543072fd | ||
|
|
196058384a | ||
|
|
e076ea6849 | ||
|
|
abdc580a7a | ||
|
|
17134ceb4b | ||
|
|
98fba31118 | ||
|
|
75eff9929e | ||
|
|
9baa615623 | ||
|
|
69a598e789 | ||
|
|
54017d0334 | ||
|
|
8ac97b4451 | ||
|
|
0a4f1d45be | ||
|
|
bce310a4b0 | ||
|
|
8502daf8a0 | ||
|
|
d0066dd9fc | ||
|
|
3407a98cac | ||
|
|
3d189431fc | ||
|
|
1cbb6350c4 | ||
|
|
20a22f98d0 | ||
|
|
a96ab79891 | ||
|
|
3744a724be | ||
|
|
f63e40fbba | ||
|
|
54573e210a | ||
|
|
6179dd2ed3 | ||
|
|
34ac706fbc | ||
|
|
3ba1ba427e | ||
|
|
6019555729 | ||
|
|
750f52c515 | ||
|
|
bb5ce66a31 | ||
|
|
ac958571a5 | ||
|
|
bcb321f883 | ||
|
|
fd3cdf8033 | ||
|
|
f3f9718df2 | ||
|
|
984c34dbca | ||
|
|
e9dcbf510d | ||
|
|
65deb8c5d0 | ||
|
|
5a93ad878c | ||
|
|
51f25d0976 | ||
|
|
fe1579e7fb | ||
|
|
ef117d2245 | ||
|
|
39cea5a918 | ||
|
|
0d477ce269 | ||
|
|
1bb6e90c3d | ||
|
|
9004c77031 | ||
|
|
71de0b4b1b | ||
|
|
1ef1f9709e | ||
|
|
3323bd2a4f | ||
|
|
df602dd1ae | ||
|
|
372bab5267 | ||
|
|
bed80e4ba7 | ||
|
|
3b014bd04c | ||
|
|
5e60911cb3 | ||
|
|
5de7ea9dbc | ||
|
|
971641cdeb | ||
|
|
e5a74faf9f | ||
|
|
e9a58e89aa | ||
|
|
3d9d520dc7 | ||
|
|
8d814b5864 | ||
|
|
c16b7afabe | ||
|
|
fa55167989 | ||
|
|
55a2762c71 | ||
|
|
5532f1e63a | ||
|
|
948568e950 | ||
|
|
873b6893f1 | ||
|
|
dbb30f7c78 | ||
|
|
38eced3814 | ||
|
|
68fc7cee3b | ||
|
|
6e23824a45 | ||
|
|
a88cceb4f4 | ||
|
|
81164621d2 | ||
|
|
379abaeca7 | ||
|
|
de77057679 | ||
|
|
630747ed2b | ||
|
|
98c418ee8b | ||
|
|
cd54089c34 | ||
|
|
8fcda537a3 | ||
|
|
3ca94be7b7 | ||
|
|
eb70692843 |
16
.github/workflows/docker-build.yml
vendored
16
.github/workflows/docker-build.yml
vendored
@@ -16,7 +16,7 @@ env:
|
||||
IMAGE_PREFIX: yyhuni
|
||||
|
||||
permissions:
|
||||
contents: write # 允许修改仓库内容
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -27,18 +27,23 @@ jobs:
|
||||
- image: xingrin-server
|
||||
dockerfile: docker/server/Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- image: xingrin-frontend
|
||||
dockerfile: docker/frontend/Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64 # ARM64 构建时 Next.js 在 QEMU 下会崩溃
|
||||
- image: xingrin-worker
|
||||
dockerfile: docker/worker/Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- image: xingrin-nginx
|
||||
dockerfile: docker/nginx/Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- image: xingrin-agent
|
||||
dockerfile: docker/agent/Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -48,7 +53,6 @@ jobs:
|
||||
run: |
|
||||
echo "=== Before cleanup ==="
|
||||
df -h
|
||||
# 删除不需要的大型软件包
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /opt/ghc
|
||||
@@ -95,18 +99,22 @@ jobs:
|
||||
with:
|
||||
context: ${{ matrix.context }}
|
||||
file: ${{ matrix.dockerfile }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: ${{ matrix.platforms }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.IMAGE_PREFIX }}/${{ matrix.image }}:${{ steps.version.outputs.VERSION }}
|
||||
${{ steps.version.outputs.IS_RELEASE == 'true' && format('{0}/{1}:latest', env.IMAGE_PREFIX, matrix.image) || '' }}
|
||||
build-args: |
|
||||
IMAGE_TAG=${{ steps.version.outputs.VERSION }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
sbom: false
|
||||
|
||||
# 所有镜像构建成功后,更新 VERSION 文件
|
||||
update-version:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build # 等待所有 build job 完成
|
||||
needs: build
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
trigger: always_on
|
||||
---
|
||||
|
||||
1.后端网页应该是 8888 端口
|
||||
3.前端所有路由加上末尾斜杠,以匹配 django 的 DRF 规则
|
||||
4.网页测试可以用 curl
|
||||
8.所有前端 api 接口都应该写在@services 中,所有 type 类型都应该写在@types 中
|
||||
10.前端的加载等逻辑用 React Query来实现,自动管理
|
||||
17.所有业务操作的 toast 都放在 hook 中
|
||||
23.前端非必要不要采用window.location.href去跳转,而是用Next.js 客户端路由
|
||||
24.ui相关的都去调用mcp来看看有没有通用组件,美观的组件来实现
|
||||
@@ -1,85 +0,0 @@
|
||||
---
|
||||
trigger: manual
|
||||
description: 进行代码审查的时候,必须调用这个规则
|
||||
---
|
||||
|
||||
### **0. 逻辑正确性 & Bug 排查** *(最高优先级,必须手动推演)*
|
||||
|
||||
**目标**:不依赖测试,主动发现“代码能跑但结果错”的逻辑错误。
|
||||
|
||||
1. **手动推演关键路径**:
|
||||
- 选 2~3 个典型输入(含边界),**在脑中或纸上一步步推演代码执行流程**。
|
||||
- 输出是否符合预期?每一步变量变化是否正确?
|
||||
2. **常见逻辑 bug 检查**:
|
||||
- **off-by-one**:循环、数组索引、分页
|
||||
- **条件逻辑错误**:`and`/`or` 优先级、短路求值误用
|
||||
- **状态混乱**:变量未初始化、被意外覆盖
|
||||
- **算法偏差**:排序、搜索、二分查找的中点处理
|
||||
- **浮点精度**:是否误用 `==` 比较浮点数?
|
||||
3. **控制流审查**:
|
||||
- 所有 `if/else` 分支是否都覆盖?有无“不可达代码”?
|
||||
- `switch`/`match` 是否有 `default`?是否漏 case?
|
||||
- 异常路径会返回什么?是否遗漏 `finally` 清理?
|
||||
4. **业务逻辑一致性**:
|
||||
- 是否符合**业务规则**?(如“订单总额 = 商品价 × 数量 + 运费 - 折扣”)
|
||||
- 是否遗漏隐含约束?(如“用户只能评价已完成的订单”)
|
||||
|
||||
### **一、功能性 & 正确性** *(阻塞性问题必须修复)*
|
||||
|
||||
1. **需求符合度**:是否100%覆盖需求?遗漏/多余功能点?
|
||||
2. **边界条件**:
|
||||
- 输入:`null`、空、极值、非法格式
|
||||
- 集合:空、单元素、超大(如10⁶)
|
||||
- 循环:终止条件、off-by-one
|
||||
3. **错误处理**:
|
||||
- 异常捕获全面?失败路径有降级?
|
||||
- 错误信息清晰?不泄露栈迹?
|
||||
4. **并发安全**:
|
||||
- 竞态/死锁风险?共享资源是否同步?
|
||||
- 使用了`volatile`/`synchronized`/`Lock`/`atomic`?
|
||||
5. **单元测试**:
|
||||
- 覆盖率 ≥80%?包含正向/边界/异常用例?
|
||||
- 测试独立?无外部依赖?
|
||||
|
||||
### **二、代码质量与可读性**
|
||||
|
||||
1. **命名**:见名知意?遵循规范?
|
||||
2. **函数设计**:
|
||||
- **单一职责**?参数 ≤4?建议长度 <50行(视语言调整)
|
||||
- 可提取为工具函数?
|
||||
3. **结构与复杂度**:
|
||||
- 无重复代码?圈复杂度 <10?
|
||||
- 嵌套 ≤3层?使用卫语句提前返回
|
||||
4. **注释**:解释**为什么**而非**是什么**?复杂逻辑必注释
|
||||
5. **风格一致**:通过`Prettier`/`ESLint`/`Spotless`自动格式化
|
||||
|
||||
### **三、架构与设计**
|
||||
|
||||
1. **SOLID**:是否符合单一职责、开闭、依赖倒置?
|
||||
2. **依赖**:是否依赖接口而非实现?无循环依赖?
|
||||
3. **可测试性**:是否支持依赖注入?避免`new`硬编码
|
||||
4. **扩展性**:新增功能是否只需改一处?
|
||||
|
||||
### **四、性能优化**
|
||||
|
||||
- **N+1查询**?循环内IO/日志/分配?
|
||||
- 算法复杂度合理?(如O(n²)是否可优化)
|
||||
- 内存:无泄漏?大对象及时释放?缓存有失效?
|
||||
|
||||
### **五、其他**
|
||||
|
||||
1. **可维护性**:日志带上下文?修改后更干净?
|
||||
2. **兼容性**:API/数据库变更是否向后兼容?
|
||||
3. **依赖管理**:新库必要?许可证合规?
|
||||
|
||||
---
|
||||
|
||||
### **审查最佳实践**
|
||||
|
||||
- **小批次审查**:≤200行/次
|
||||
- **语气建议**:`“建议将函数拆分以提升可读性”` 而非 `“这个函数太长了”`
|
||||
- **自动化先行**:风格/空指针/安全扫描 → CI工具
|
||||
- **重点分级**:
|
||||
- 🛑 **阻塞**:功能错、安全漏洞
|
||||
- ⚠️ **必须改**:设计缺陷、性能瓶颈
|
||||
- 💡 **建议**:风格、命名、可读性
|
||||
@@ -1,195 +0,0 @@
|
||||
---
|
||||
trigger: always_on
|
||||
---
|
||||
|
||||
## 标准分层架构调用顺序
|
||||
|
||||
按照 **DDD(领域驱动设计)和清洁架构**原则,调用顺序应该是:
|
||||
|
||||
```
|
||||
HTTP请求 → Views → Tasks → Services → Repositories → Models
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 📊 完整的调用链路图
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ HTTP Request (前端) │
|
||||
└────────────────────────┬────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Views (HTTP 层) │
|
||||
│ - 参数验证 │
|
||||
│ - 权限检查 │
|
||||
│ - 调用 Tasks/Services │
|
||||
│ - 返回 HTTP 响应 │
|
||||
└────────────────────────┬────────────────────────────────────┘
|
||||
↓
|
||||
┌────────────────┴────────────────┐
|
||||
↓ (异步) ↓ (同步)
|
||||
┌──────────────────┐ ┌──────────────────┐
|
||||
│ Tasks (任务层) │ │ Services (业务层)│
|
||||
│ - 异步执行 │ │ - 业务逻辑 │
|
||||
│ - 后台作业 │───────>│ - 事务管理 │
|
||||
│ - 通知发送 │ │ - 数据验证 │
|
||||
└──────────────────┘ └────────┬─────────┘
|
||||
↓
|
||||
┌──────────────────────┐
|
||||
│ Repositories (存储层) │
|
||||
│ - 数据访问 │
|
||||
│ - 查询封装 │
|
||||
│ - 批量操作 │
|
||||
└────────┬─────────────┘
|
||||
↓
|
||||
┌──────────────────────┐
|
||||
│ Models (模型层) │
|
||||
│ - ORM 定义 │
|
||||
│ - 数据结构 │
|
||||
│ - 关系映射 │
|
||||
└──────────────────────┘
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 🔄 具体调用示例
|
||||
|
||||
### **场景 1:同步删除(Views → Services → Repositories → Models)**
|
||||
|
||||
```python
|
||||
# 1. Views 层 (views.py)
|
||||
def some_sync_delete(self, request):
|
||||
# 参数验证
|
||||
target_ids = request.data.get('ids')
|
||||
|
||||
# 调用 Service 层
|
||||
service = TargetService()
|
||||
result = service.bulk_delete_targets(target_ids)
|
||||
|
||||
# 返回响应
|
||||
return Response({'message': 'deleted'})
|
||||
|
||||
# 2. Services 层 (services/target_service.py)
|
||||
class TargetService:
|
||||
def bulk_delete_targets(self, target_ids):
|
||||
# 业务逻辑验证
|
||||
logger.info("准备删除...")
|
||||
|
||||
# 调用 Repository 层
|
||||
deleted_count = self.repo.bulk_delete_by_ids(target_ids)
|
||||
|
||||
# 返回结果
|
||||
return deleted_count
|
||||
|
||||
# 3. Repositories 层 (repositories/django_target_repository.py)
|
||||
class DjangoTargetRepository:
|
||||
def bulk_delete_by_ids(self, target_ids):
|
||||
# 数据访问操作
|
||||
return Target.objects.filter(id__in=target_ids).delete()
|
||||
|
||||
# 4. Models 层 (models.py)
|
||||
class Target(models.Model):
|
||||
# ORM 定义
|
||||
name = models.CharField(...)
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### **场景 2:异步删除(Views → Tasks → Services → Repositories → Models)**
|
||||
|
||||
```python
|
||||
# 1. Views 层 (views.py)
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
target = self.get_object()
|
||||
|
||||
# 调用 Tasks 层(异步)
|
||||
async_bulk_delete_targets([target.id], [target.name])
|
||||
|
||||
# 立即返回 202
|
||||
return Response(status=202)
|
||||
|
||||
# 2. Tasks 层 (tasks/target_tasks.py)
|
||||
def async_bulk_delete_targets(target_ids, target_names):
|
||||
def _delete():
|
||||
# 发送通知
|
||||
create_notification("删除中...")
|
||||
|
||||
# 调用 Service 层
|
||||
service = TargetService()
|
||||
result = service.bulk_delete_targets(target_ids)
|
||||
|
||||
# 发送完成通知
|
||||
create_notification("删除成功")
|
||||
|
||||
# 后台线程执行
|
||||
threading.Thread(target=_delete).start()
|
||||
|
||||
# 3. Services 层 (services/target_service.py)
|
||||
class TargetService:
|
||||
def bulk_delete_targets(self, target_ids):
|
||||
# 业务逻辑
|
||||
return self.repo.bulk_delete_by_ids(target_ids)
|
||||
|
||||
# 4. Repositories 层 (repositories/django_target_repository.py)
|
||||
class DjangoTargetRepository:
|
||||
def bulk_delete_by_ids(self, target_ids):
|
||||
# 数据访问
|
||||
return Target.objects.filter(id__in=target_ids).delete()
|
||||
|
||||
# 5. Models 层 (models.py)
|
||||
class Target(models.Model):
|
||||
# ORM 定义
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 📋 各层职责清单
|
||||
|
||||
| 层级 | 职责 | 不应该做 |
|
||||
| --- | --- | --- |
|
||||
| **Views** | HTTP 请求处理、参数验证、权限检查 | ❌ 直接访问 Models<br>❌ 业务逻辑 |
|
||||
| **Tasks** | 异步执行、后台作业、通知发送 | ❌ 直接访问 Models<br>❌ HTTP 响应 |
|
||||
| **Services** | 业务逻辑、事务管理、数据验证 | ❌ 直接写 SQL<br>❌ HTTP 相关 |
|
||||
| **Repositories** | 数据访问、查询封装、批量操作 | ❌ 业务逻辑<br>❌ 通知发送 |
|
||||
| **Models** | ORM 定义、数据结构、关系映射 | ❌ 业务逻辑<br>❌ 复杂查询 |
|
||||
|
||||
---
|
||||
|
||||
### ✅ 最佳实践原则
|
||||
|
||||
1. **单向依赖**:只能向下调用,不能向上调用
|
||||
|
||||
```
|
||||
Views → Tasks → Services → Repositories → Models
|
||||
(上层) (下层)
|
||||
|
||||
```
|
||||
|
||||
2. **层级隔离**:相邻层交互,禁止跨层
|
||||
- ✅ Views → Services
|
||||
- ✅ Tasks → Services
|
||||
- ✅ Services → Repositories
|
||||
- ❌ Views → Repositories(跨层)
|
||||
- ❌ Tasks → Models(跨层)
|
||||
3. **依赖注入**:通过构造函数注入依赖
|
||||
|
||||
```python
|
||||
class TargetService:
|
||||
def __init__(self):
|
||||
self.repo = DjangoTargetRepository() # 注入
|
||||
|
||||
```
|
||||
|
||||
4. **接口抽象**:使用 Protocol 定义接口
|
||||
|
||||
```python
|
||||
class TargetRepository(Protocol):
|
||||
def bulk_delete_by_ids(self, ids): ...
|
||||
|
||||
```
|
||||
733
LICENSE
733
LICENSE
@@ -1,131 +1,674 @@
|
||||
# PolyForm Noncommercial License 1.0.0
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
<https://polyformproject.org/licenses/noncommercial/1.0.0>
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
## Acceptance
|
||||
Preamble
|
||||
|
||||
In order to get any license under these terms, you must agree
|
||||
to them as both strict obligations and conditions to all
|
||||
your licenses.
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
## Copyright License
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
The licensor grants you a copyright license for the
|
||||
software to do everything you might do with the software
|
||||
that would otherwise infringe the licensor's copyright
|
||||
in it for any permitted purpose. However, you may
|
||||
only distribute the software according to [Distribution
|
||||
License](#distribution-license) and make changes or new works
|
||||
based on the software according to [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
## Distribution License
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
The licensor grants you an additional copyright license
|
||||
to distribute copies of the software. Your license
|
||||
to distribute covers distributing the software with
|
||||
changes and new works permitted by [Changes and New Works
|
||||
License](#changes-and-new-works-license).
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
## Notices
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
You must ensure that anyone who gets a copy of any part of
|
||||
the software from you also gets a copy of these terms or the
|
||||
URL for them above, as well as copies of any plain-text lines
|
||||
beginning with `Required Notice:` that the licensor provided
|
||||
with the software. For example:
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
> Required Notice: Copyright Yuhang Yang (yyhuni)
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
## Changes and New Works License
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The licensor grants you an additional copyright license to
|
||||
make changes and new works based on the software for any
|
||||
permitted purpose.
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
## Patent License
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
The licensor grants you a patent license for the software that
|
||||
covers patent claims the licensor can license, or becomes able
|
||||
to license, that you would infringe by using the software.
|
||||
0. Definitions.
|
||||
|
||||
## Noncommercial Purposes
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
Any noncommercial purpose is a permitted purpose.
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
## Personal Uses
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
Personal use for research, experiment, and testing for
|
||||
the benefit of public knowledge, personal study, private
|
||||
entertainment, hobby projects, amateur pursuits, or religious
|
||||
observance, without any anticipated commercial application,
|
||||
is use for a permitted purpose.
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
## Noncommercial Organizations
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
Use by any charitable organization, educational institution,
|
||||
public research organization, public safety or health
|
||||
organization, environmental protection organization,
|
||||
or government institution is use for a permitted purpose
|
||||
regardless of the source of funding or obligations resulting
|
||||
from the funding.
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
## Fair Use
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
You may have "fair use" rights for the software under the
|
||||
law. These terms do not limit them.
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
## No Other Rights
|
||||
1. Source Code.
|
||||
|
||||
These terms do not allow you to sublicense or transfer any of
|
||||
your licenses to anyone else, or prevent the licensor from
|
||||
granting licenses to anyone else. These terms do not imply
|
||||
any other licenses.
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
## Patent Defense
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
If you make any written claim that the software infringes or
|
||||
contributes to infringement of any patent, your patent license
|
||||
for the software granted under these terms ends immediately. If
|
||||
your company makes such a claim, your patent license ends
|
||||
immediately for work on behalf of your company.
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
## Violations
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The first time you are notified in writing that you have
|
||||
violated any of these terms, or done anything with the software
|
||||
not covered by your licenses, your licenses can nonetheless
|
||||
continue if you come into full compliance with these terms,
|
||||
and take practical steps to correct past violations, within
|
||||
32 days of receiving notice. Otherwise, all your licenses
|
||||
end immediately.
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
## No Liability
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
***As far as the law allows, the software comes as is, without
|
||||
any warranty or condition, and the licensor will not be liable
|
||||
to you for any damages arising out of these terms or the use
|
||||
or nature of the software, under any kind of legal claim.***
|
||||
2. Basic Permissions.
|
||||
|
||||
## Definitions
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
The **licensor** is the individual or entity offering these
|
||||
terms, and the **software** is the software the licensor makes
|
||||
available under these terms.
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
**You** refers to the individual or entity agreeing to these
|
||||
terms.
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
**Your company** is any legal entity, sole proprietorship,
|
||||
or other kind of organization that you work for, plus all
|
||||
organizations that have control over, are under the control of,
|
||||
or are under common control with that organization. **Control**
|
||||
means ownership of substantially all the assets of an entity,
|
||||
or the power to direct its management and policies by vote,
|
||||
contract, or otherwise. Control can be direct or indirect.
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
**Your licenses** are all the licenses granted to you for the
|
||||
software under these terms.
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
**Use** means anything you do with the software requiring one
|
||||
of your licenses.
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
|
||||
189
README.md
189
README.md
@@ -1,10 +1,30 @@
|
||||
<h1 align="center">Xingrin - 星环</h1>
|
||||
<h1 align="center">XingRin - 星环</h1>
|
||||
|
||||
<p align="center">
|
||||
<b>一款现代化的企业级漏洞扫描与资产管理平台</b><br>
|
||||
提供自动化安全检测、资产发现、漏洞管理等功能
|
||||
<b>🛡️ 攻击面管理平台 (ASM) | 自动化资产发现与漏洞扫描系统</b>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/yyhuni/xingrin/stargazers"><img src="https://img.shields.io/github/stars/yyhuni/xingrin?style=flat-square&logo=github" alt="GitHub stars"></a>
|
||||
<a href="https://github.com/yyhuni/xingrin/network/members"><img src="https://img.shields.io/github/forks/yyhuni/xingrin?style=flat-square&logo=github" alt="GitHub forks"></a>
|
||||
<a href="https://github.com/yyhuni/xingrin/issues"><img src="https://img.shields.io/github/issues/yyhuni/xingrin?style=flat-square&logo=github" alt="GitHub issues"></a>
|
||||
<a href="https://github.com/yyhuni/xingrin/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-PolyForm%20NC-blue?style=flat-square" alt="License"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="#-功能特性">功能特性</a> •
|
||||
<a href="#-快速开始">快速开始</a> •
|
||||
<a href="#-文档">文档</a> •
|
||||
<a href="#-技术栈">技术栈</a> •
|
||||
<a href="#-反馈与贡献">反馈与贡献</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<sub>🔍 关键词: ASM | 攻击面管理 | 漏洞扫描 | 资产发现 | Bug Bounty | 渗透测试 | Nuclei | 子域名枚举 | EASM</sub>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
<p align="center">
|
||||
<b>🌗 明暗模式切换</b>
|
||||
</p>
|
||||
@@ -29,6 +49,9 @@
|
||||
- [📖 技术文档](./docs/README.md) - 技术文档导航(🚧 持续完善中)
|
||||
- [🚀 快速开始](./docs/quick-start.md) - 一键安装和部署指南
|
||||
- [🔄 版本管理](./docs/version-management.md) - Git Tag 驱动的自动化版本管理系统
|
||||
- [📦 Nuclei 模板架构](./docs/nuclei-template-architecture.md) - 模板仓库的存储与同步
|
||||
- [📖 字典文件架构](./docs/wordlist-architecture.md) - 字典文件的存储与同步
|
||||
- [🔍 扫描流程架构](./docs/scan-flow-architecture.md) - 完整扫描流程与工具编排
|
||||
|
||||
|
||||
---
|
||||
@@ -46,6 +69,54 @@
|
||||
- **自定义流程** - YAML 配置扫描流程,灵活编排
|
||||
- **定时扫描** - Cron 表达式配置,自动化周期扫描
|
||||
|
||||
#### 扫描流程架构
|
||||
|
||||
完整的扫描流程包括:子域名发现、端口扫描、站点发现、URL 收集、目录扫描、漏洞扫描等阶段
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
START["开始扫描"]
|
||||
|
||||
subgraph STAGE1["阶段 1: 资产发现"]
|
||||
direction TB
|
||||
SUB["子域名发现<br/>subfinder, amass, puredns"]
|
||||
PORT["端口扫描<br/>naabu"]
|
||||
SITE["站点识别<br/>httpx"]
|
||||
SUB --> PORT --> SITE
|
||||
end
|
||||
|
||||
subgraph STAGE2["阶段 2: 深度分析"]
|
||||
direction TB
|
||||
URL["URL 收集<br/>waymore, katana"]
|
||||
DIR["目录扫描<br/>ffuf"]
|
||||
end
|
||||
|
||||
subgraph STAGE3["阶段 3: 漏洞检测"]
|
||||
VULN["漏洞扫描<br/>nuclei, dalfox"]
|
||||
end
|
||||
|
||||
FINISH["扫描完成"]
|
||||
|
||||
START --> STAGE1
|
||||
SITE --> STAGE2
|
||||
STAGE2 --> STAGE3
|
||||
STAGE3 --> FINISH
|
||||
|
||||
style START fill:#34495e,stroke:#2c3e50,stroke-width:2px,color:#fff
|
||||
style FINISH fill:#27ae60,stroke:#229954,stroke-width:2px,color:#fff
|
||||
style STAGE1 fill:#3498db,stroke:#2980b9,stroke-width:2px,color:#fff
|
||||
style STAGE2 fill:#9b59b6,stroke:#8e44ad,stroke-width:2px,color:#fff
|
||||
style STAGE3 fill:#e67e22,stroke:#d35400,stroke-width:2px,color:#fff
|
||||
style SUB fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style PORT fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style SITE fill:#5dade2,stroke:#3498db,stroke-width:1px,color:#fff
|
||||
style URL fill:#bb8fce,stroke:#9b59b6,stroke-width:1px,color:#fff
|
||||
style DIR fill:#bb8fce,stroke:#9b59b6,stroke-width:1px,color:#fff
|
||||
style VULN fill:#f0b27a,stroke:#e67e22,stroke-width:1px,color:#fff
|
||||
```
|
||||
|
||||
详细说明请查看 [扫描流程架构文档](./docs/scan-flow-architecture.md)
|
||||
|
||||
### 🖥️ 分布式架构
|
||||
- **多节点扫描** - 支持部署多个 Worker 节点,横向扩展扫描能力
|
||||
- **本地节点** - 零配置,安装即自动注册本地 Docker Worker
|
||||
@@ -54,34 +125,41 @@
|
||||
- **节点监控** - 实时心跳检测,CPU/内存/磁盘状态监控
|
||||
- **断线重连** - 节点离线自动检测,恢复后自动重新接入
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ 主服务器 (Master) │
|
||||
│ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ │
|
||||
│ │ Next.js │ │ Django │ │ Postgres│ │ Redis │ │
|
||||
│ │ 前端 │ │ 后端 │ │ 数据库 │ │ 缓存 │ │
|
||||
│ └─────────┘ └────┬────┘ └─────────┘ └─────────┘ │
|
||||
│ │ │
|
||||
│ ┌─────┴─────┐ │
|
||||
│ │ 任务调度器 │ │
|
||||
│ │ Scheduler │ │
|
||||
│ └─────┬─────┘ │
|
||||
└────────────────────┼────────────────────────────────────────────┘
|
||||
│
|
||||
┌────────────┼────────────┐
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌───────────┐ ┌───────────┐ ┌───────────┐
|
||||
│ Worker 1 │ │ Worker 2 │ │ Worker N │
|
||||
│ (本地) │ │ (远程) │ │ (远程) │
|
||||
├───────────┤ ├───────────┤ ├───────────┤
|
||||
│ • Nuclei │ │ • Nuclei │ │ • Nuclei │
|
||||
│ • httpx │ │ • httpx │ │ • httpx │
|
||||
│ • naabu │ │ • naabu │ │ • naabu │
|
||||
│ • ... │ │ • ... │ │ • ... │
|
||||
├───────────┤ ├───────────┤ ├───────────┤
|
||||
│ 心跳上报 │ │ 心跳上报 │ │ 心跳上报 │
|
||||
└───────────┘ └───────────┘ └───────────┘
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph MASTER["主服务器 (Master Server)"]
|
||||
direction TB
|
||||
|
||||
REDIS["Redis 负载缓存"]
|
||||
|
||||
subgraph SCHEDULER["任务调度器 (Task Distributor)"]
|
||||
direction TB
|
||||
SUBMIT["接收扫描任务"]
|
||||
SELECT["负载感知选择"]
|
||||
DISPATCH["智能分发"]
|
||||
|
||||
SUBMIT --> SELECT
|
||||
SELECT --> DISPATCH
|
||||
end
|
||||
|
||||
REDIS -.负载数据.-> SELECT
|
||||
end
|
||||
|
||||
subgraph WORKERS["Worker 节点集群"]
|
||||
direction TB
|
||||
|
||||
W1["Worker 1 (本地)<br/>CPU: 45% | MEM: 60%"]
|
||||
W2["Worker 2 (远程)<br/>CPU: 30% | MEM: 40%"]
|
||||
W3["Worker N (远程)<br/>CPU: 90% | MEM: 85%"]
|
||||
end
|
||||
|
||||
DISPATCH -->|任务分发| W1
|
||||
DISPATCH -->|任务分发| W2
|
||||
DISPATCH -->|高负载跳过| W3
|
||||
|
||||
W1 -.心跳上报.-> REDIS
|
||||
W2 -.心跳上报.-> REDIS
|
||||
W3 -.心跳上报.-> REDIS
|
||||
```
|
||||
|
||||
### 📊 可视化界面
|
||||
@@ -98,27 +176,12 @@
|
||||
- **数据库**: PostgreSQL + Redis
|
||||
- **部署**: Docker + Nginx
|
||||
|
||||
### 🔧 内置扫描工具
|
||||
|
||||
| 类别 | 工具 |
|
||||
|------|------|
|
||||
| 子域名爆破 | puredns, massdns, dnsgen |
|
||||
| 被动发现 | subfinder, amass, assetfinder, Sublist3r |
|
||||
| 端口扫描 | naabu |
|
||||
| 站点发现 | httpx |
|
||||
| 目录扫描 | ffuf |
|
||||
| 爬虫 | katana |
|
||||
| 被动URL收集 | waymore, uro |
|
||||
| 漏洞扫描 | nuclei, dalfox |
|
||||
|
||||
---
|
||||
|
||||
## 📦 快速开始
|
||||
|
||||
### 环境要求
|
||||
|
||||
- **操作系统**: Ubuntu 20.04+ / Debian 11+ (推荐)
|
||||
- **硬件**: 2核 4G 内存起步,10GB+ 磁盘空间
|
||||
- **硬件**: 2核 4G 内存起步,20GB+ 磁盘空间
|
||||
|
||||
### 一键安装
|
||||
|
||||
@@ -129,14 +192,11 @@ cd xingrin
|
||||
|
||||
# 安装并启动(生产模式)
|
||||
sudo ./install.sh
|
||||
|
||||
# 开发模式
|
||||
sudo ./install.sh --dev
|
||||
```
|
||||
|
||||
### 访问服务
|
||||
|
||||
- **Web 界面**: `https://localhost` 或 `http://localhost`
|
||||
- **Web 界面**: `https://localhost`
|
||||
|
||||
### 常用命令
|
||||
|
||||
@@ -156,9 +216,6 @@ sudo ./uninstall.sh
|
||||
# 更新
|
||||
sudo ./update.sh
|
||||
```
|
||||
## 日志
|
||||
- 项目日志:/opt/xingrin/logs 下存储了这个项目的运行日志信息,error文件存储了错误相关信息,xingrin.log存储了包括错误在内的所有项目日志
|
||||
- 工具调用日志:/opt/xingrin/results 下存储了工具的运行结果日志,比如naabu,httpx等的结果调用日志
|
||||
|
||||
## 🤝 反馈与贡献
|
||||
|
||||
@@ -190,22 +247,30 @@ sudo ./update.sh
|
||||
- 遵守所在地区的法律法规
|
||||
- 承担因滥用产生的一切后果
|
||||
|
||||
## 🌟 Star History
|
||||
|
||||
如果这个项目对你有帮助,请给一个 ⭐ Star 支持一下!
|
||||
|
||||
[](https://star-history.com/#yyhuni/xingrin&Date)
|
||||
|
||||
## 📄 许可证
|
||||
|
||||
本项目采用 [PolyForm Noncommercial License 1.0.0](LICENSE) 许可证。
|
||||
本项目采用 [GNU General Public License v3.0](LICENSE) 许可证。
|
||||
|
||||
### 允许的用途
|
||||
|
||||
- ✅ 个人学习和研究
|
||||
- ✅ 非商业安全测试
|
||||
- ✅ 教育机构使用
|
||||
- ✅ 非营利组织使用
|
||||
- ✅ 商业和非商业使用
|
||||
- ✅ 修改和分发
|
||||
- ✅ 专利使用
|
||||
- ✅ 私人使用
|
||||
|
||||
### 禁止的用途
|
||||
### 义务和限制
|
||||
|
||||
- ❌ **商业用途**(包括但不限于:出售、商业服务、SaaS 等)
|
||||
- 📋 **开源义务**:分发时必须提供源代码
|
||||
- 📋 **相同许可**:衍生作品必须使用相同许可证
|
||||
- 📋 **版权声明**:必须保留原始版权和许可证声明
|
||||
- ❌ **责任免除**:不提供任何担保
|
||||
- ❌ 未经授权的渗透测试
|
||||
- ❌ 任何违法行为
|
||||
|
||||
如需商业授权,请联系作者。
|
||||
|
||||
|
||||
@@ -14,6 +14,10 @@ import os
|
||||
import sys
|
||||
import requests
|
||||
import logging
|
||||
import urllib3
|
||||
|
||||
# 禁用自签名证书的 SSL 警告(远程 Worker 场景)
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -30,17 +34,27 @@ def fetch_config_and_setup_django():
|
||||
print("[ERROR] 缺少 SERVER_URL 环境变量", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
config_url = f"{server_url}/api/workers/config/"
|
||||
# 通过环境变量声明 Worker 身份(本地/远程)
|
||||
is_local = os.environ.get("IS_LOCAL", "false").lower() == "true"
|
||||
config_url = f"{server_url}/api/workers/config/?is_local={str(is_local).lower()}"
|
||||
print(f"[CONFIG] 正在从配置中心获取配置: {config_url}")
|
||||
print(f"[CONFIG] IS_LOCAL={is_local}")
|
||||
try:
|
||||
resp = requests.get(config_url, timeout=10)
|
||||
# verify=False: 远程 Worker 通过 HTTPS 访问时可能使用自签名证书
|
||||
resp = requests.get(config_url, timeout=10, verify=False)
|
||||
resp.raise_for_status()
|
||||
config = resp.json()
|
||||
|
||||
# 数据库配置(必需)
|
||||
os.environ.setdefault("DB_HOST", config['db']['host'])
|
||||
os.environ.setdefault("DB_PORT", config['db']['port'])
|
||||
os.environ.setdefault("DB_NAME", config['db']['name'])
|
||||
os.environ.setdefault("DB_USER", config['db']['user'])
|
||||
db_host = config['db']['host']
|
||||
db_port = config['db']['port']
|
||||
db_name = config['db']['name']
|
||||
db_user = config['db']['user']
|
||||
|
||||
os.environ.setdefault("DB_HOST", db_host)
|
||||
os.environ.setdefault("DB_PORT", db_port)
|
||||
os.environ.setdefault("DB_NAME", db_name)
|
||||
os.environ.setdefault("DB_USER", db_user)
|
||||
os.environ.setdefault("DB_PASSWORD", config['db']['password'])
|
||||
|
||||
# Redis 配置
|
||||
@@ -52,7 +66,12 @@ def fetch_config_and_setup_django():
|
||||
os.environ.setdefault("ENABLE_COMMAND_LOGGING", str(config['logging']['enableCommandLogging']).lower())
|
||||
os.environ.setdefault("DEBUG", str(config['debug']))
|
||||
|
||||
print(f"[CONFIG] 从配置中心获取配置成功: {config_url}")
|
||||
print(f"[CONFIG] ✓ 配置获取成功")
|
||||
print(f"[CONFIG] DB_HOST: {db_host}")
|
||||
print(f"[CONFIG] DB_PORT: {db_port}")
|
||||
print(f"[CONFIG] DB_NAME: {db_name}")
|
||||
print(f"[CONFIG] DB_USER: {db_user}")
|
||||
print(f"[CONFIG] REDIS_URL: {config['redisUrl']}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[ERROR] 获取配置失败: {config_url} - {e}", file=sys.stderr)
|
||||
|
||||
@@ -0,0 +1,10 @@
|
||||
"""
|
||||
通用服务模块
|
||||
|
||||
提供系统级别的公共服务,包括:
|
||||
- SystemLogService: 系统日志读取服务
|
||||
"""
|
||||
|
||||
from .system_log_service import SystemLogService
|
||||
|
||||
__all__ = ['SystemLogService']
|
||||
|
||||
@@ -1,24 +1,43 @@
|
||||
import glob
|
||||
import json
|
||||
"""
|
||||
系统日志服务模块
|
||||
|
||||
提供系统日志的读取功能,支持:
|
||||
- 从日志目录读取日志文件
|
||||
- 限制返回行数,防止内存溢出
|
||||
"""
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SystemLogService:
|
||||
"""
|
||||
系统日志服务类
|
||||
|
||||
负责读取系统日志文件,支持从容器内路径或宿主机挂载路径读取日志。
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.log_globs = [
|
||||
"/app/backend/logs/*",
|
||||
"/opt/xingrin/logs/*",
|
||||
]
|
||||
self.default_lines = 200
|
||||
self.max_lines = 10000
|
||||
self.timeout_seconds = 3
|
||||
# 日志文件路径(容器内路径,通过 volume 挂载到宿主机 /opt/xingrin/logs)
|
||||
self.log_file = "/app/backend/logs/xingrin.log"
|
||||
self.default_lines = 200 # 默认返回行数
|
||||
self.max_lines = 10000 # 最大返回行数限制
|
||||
self.timeout_seconds = 3 # tail 命令超时时间
|
||||
|
||||
def get_logs_content(self, lines: int | None = None) -> str:
|
||||
"""
|
||||
获取系统日志内容
|
||||
|
||||
Args:
|
||||
lines: 返回的日志行数,默认 200 行,最大 10000 行
|
||||
|
||||
Returns:
|
||||
str: 日志内容,每行以换行符分隔,保持原始顺序
|
||||
"""
|
||||
# 参数校验和默认值处理
|
||||
if lines is None:
|
||||
lines = self.default_lines
|
||||
|
||||
@@ -28,16 +47,8 @@ class SystemLogService:
|
||||
if lines > self.max_lines:
|
||||
lines = self.max_lines
|
||||
|
||||
files: list[str] = []
|
||||
for pattern in self.log_globs:
|
||||
matched = sorted(glob.glob(pattern))
|
||||
if matched:
|
||||
files = matched
|
||||
break
|
||||
if not files:
|
||||
return ""
|
||||
|
||||
cmd = ["tail", "-q", "-n", str(lines), *files]
|
||||
# 使用 tail 命令读取日志文件末尾内容
|
||||
cmd = ["tail", "-n", str(lines), self.log_file]
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
@@ -54,25 +65,5 @@ class SystemLogService:
|
||||
(result.stderr or "").strip(),
|
||||
)
|
||||
|
||||
raw = result.stdout or ""
|
||||
raw_lines = [ln for ln in raw.splitlines() if ln.strip()]
|
||||
|
||||
parsed: list[tuple[datetime | None, int, str]] = []
|
||||
for idx, line in enumerate(raw_lines):
|
||||
ts: datetime | None = None
|
||||
if line.startswith("{") and line.endswith("}"):
|
||||
try:
|
||||
obj = json.loads(line)
|
||||
asctime = obj.get("asctime")
|
||||
if isinstance(asctime, str):
|
||||
ts = datetime.strptime(asctime, "%Y-%m-%d %H:%M:%S")
|
||||
except Exception:
|
||||
ts = None
|
||||
parsed.append((ts, idx, line))
|
||||
|
||||
parsed.sort(key=lambda x: (x[0] is None, x[0] or datetime.min, x[1]))
|
||||
sorted_lines = [x[2] for x in parsed]
|
||||
if len(sorted_lines) > lines:
|
||||
sorted_lines = sorted_lines[-lines:]
|
||||
|
||||
return "\n".join(sorted_lines) + ("\n" if sorted_lines else "")
|
||||
# 直接返回原始内容,保持文件中的顺序
|
||||
return result.stdout or ""
|
||||
|
||||
@@ -27,3 +27,10 @@ vulnerabilities_saved = Signal()
|
||||
# - worker_name: str Worker 名称
|
||||
# - message: str 失败原因
|
||||
worker_delete_failed = Signal()
|
||||
|
||||
# 所有 Worker 高负载信号
|
||||
# 参数:
|
||||
# - worker_name: str 被选中的 Worker 名称
|
||||
# - cpu: float CPU 使用率
|
||||
# - mem: float 内存使用率
|
||||
all_workers_high_load = Signal()
|
||||
|
||||
@@ -1,13 +1,21 @@
|
||||
"""
|
||||
通用模块 URL 配置
|
||||
|
||||
路由说明:
|
||||
- /api/auth/* 认证相关接口(登录、登出、用户信息)
|
||||
- /api/system/* 系统管理接口(日志查看等)
|
||||
"""
|
||||
|
||||
from django.urls import path
|
||||
from .views import LoginView, LogoutView, MeView, ChangePasswordView, SystemLogsView
|
||||
|
||||
urlpatterns = [
|
||||
# 认证相关
|
||||
path('auth/login/', LoginView.as_view(), name='auth-login'),
|
||||
path('auth/logout/', LogoutView.as_view(), name='auth-logout'),
|
||||
path('auth/me/', MeView.as_view(), name='auth-me'),
|
||||
path('auth/change-password/', ChangePasswordView.as_view(), name='auth-change-password'),
|
||||
|
||||
# 系统管理
|
||||
path('system/logs/', SystemLogsView.as_view(), name='system-logs'),
|
||||
]
|
||||
|
||||
@@ -1,3 +1,11 @@
|
||||
"""
|
||||
通用模块视图导出
|
||||
|
||||
包含:
|
||||
- 认证相关视图:登录、登出、用户信息、修改密码
|
||||
- 系统日志视图:实时日志查看
|
||||
"""
|
||||
|
||||
from .auth_views import LoginView, LogoutView, MeView, ChangePasswordView
|
||||
from .system_log_views import SystemLogsView
|
||||
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
"""
|
||||
系统日志视图模块
|
||||
|
||||
提供系统日志的 REST API 接口,供前端实时查看系统运行日志。
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from django.utils.decorators import method_decorator
|
||||
@@ -15,6 +21,26 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
@method_decorator(csrf_exempt, name="dispatch")
|
||||
class SystemLogsView(APIView):
|
||||
"""
|
||||
系统日志 API 视图
|
||||
|
||||
GET /api/system/logs/
|
||||
获取系统日志内容
|
||||
|
||||
Query Parameters:
|
||||
lines (int, optional): 返回的日志行数,默认 200,最大 10000
|
||||
|
||||
Response:
|
||||
{
|
||||
"content": "日志内容字符串..."
|
||||
}
|
||||
|
||||
Note:
|
||||
- 当前为开发阶段,暂时允许匿名访问
|
||||
- 生产环境应添加管理员权限验证
|
||||
"""
|
||||
|
||||
# TODO: 生产环境应改为 IsAdminUser 权限
|
||||
authentication_classes = []
|
||||
permission_classes = [AllowAny]
|
||||
|
||||
@@ -23,10 +49,17 @@ class SystemLogsView(APIView):
|
||||
self.service = SystemLogService()
|
||||
|
||||
def get(self, request):
|
||||
"""
|
||||
获取系统日志
|
||||
|
||||
支持通过 lines 参数控制返回行数,用于前端分页或实时刷新场景。
|
||||
"""
|
||||
try:
|
||||
# 解析 lines 参数
|
||||
lines_raw = request.query_params.get("lines")
|
||||
lines = int(lines_raw) if lines_raw is not None else None
|
||||
|
||||
# 调用服务获取日志内容
|
||||
content = self.service.get_logs_content(lines=lines)
|
||||
return Response({"content": content})
|
||||
except ValueError:
|
||||
|
||||
@@ -241,8 +241,9 @@ class WorkerDeployConsumer(AsyncWebsocketConsumer):
|
||||
}))
|
||||
return
|
||||
|
||||
django_host = f"{public_host}:{server_port}" # Django / 心跳上报使用
|
||||
heartbeat_api_url = f"http://{django_host}" # 基础 URL,agent 会加 /api/...
|
||||
# 远程 Worker 通过 nginx HTTPS 访问(nginx 反代到后端 8888)
|
||||
# 使用 https://{PUBLIC_HOST} 而不是直连 8888 端口
|
||||
heartbeat_api_url = f"https://{public_host}" # 基础 URL,agent 会加 /api/...
|
||||
|
||||
session_name = f'xingrin_deploy_{self.worker_id}'
|
||||
remote_script_path = '/tmp/xingrin_deploy.sh'
|
||||
|
||||
@@ -10,6 +10,8 @@ class WorkerNode(models.Model):
|
||||
('deploying', '部署中'),
|
||||
('online', '在线'),
|
||||
('offline', '离线'),
|
||||
('updating', '更新中'),
|
||||
('outdated', '版本过低'),
|
||||
]
|
||||
|
||||
name = models.CharField(max_length=100, help_text='节点名称')
|
||||
|
||||
@@ -198,9 +198,27 @@ class NucleiTemplateRepoService:
|
||||
|
||||
# 判断是 clone 还是 pull
|
||||
if git_dir.is_dir():
|
||||
# 已有仓库,执行 pull
|
||||
cmd = ["git", "-C", str(local_path), "pull", "--ff-only"]
|
||||
action = "pull"
|
||||
# 检查远程地址是否变化
|
||||
current_remote = subprocess.run(
|
||||
["git", "-C", str(local_path), "remote", "get-url", "origin"],
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
current_url = current_remote.stdout.strip() if current_remote.returncode == 0 else ""
|
||||
|
||||
if current_url != obj.repo_url:
|
||||
# 远程地址变化,删除旧目录重新 clone
|
||||
logger.info("nuclei 模板仓库 %s 远程地址变化,重新 clone: %s -> %s", obj.id, current_url, obj.repo_url)
|
||||
shutil.rmtree(local_path)
|
||||
local_path.mkdir(parents=True, exist_ok=True)
|
||||
cmd = ["git", "clone", "--depth", "1", obj.repo_url, str(local_path)]
|
||||
action = "clone"
|
||||
else:
|
||||
# 已有仓库且地址未变,执行 pull
|
||||
cmd = ["git", "-C", str(local_path), "pull", "--ff-only"]
|
||||
action = "pull"
|
||||
else:
|
||||
# 新仓库,执行 clone
|
||||
if local_path.exists() and not local_path.is_dir():
|
||||
|
||||
@@ -8,13 +8,32 @@
|
||||
2. 选择负载最低的 Worker(可能是本地或远程)
|
||||
3. 本地 Worker:直接执行 docker run
|
||||
4. 远程 Worker:通过 SSH 执行 docker run
|
||||
5. 任务执行完自动销毁容器
|
||||
5. 任务执行完自动销毁容器(--rm)
|
||||
|
||||
镜像版本管理:
|
||||
- 版本锁定:使用 settings.IMAGE_TAG 确保 server 和 worker 版本一致
|
||||
- 预拉取策略:安装时预拉取镜像,执行时使用 --pull=missing
|
||||
- 本地开发:可通过 TASK_EXECUTOR_IMAGE 环境变量指向本地镜像
|
||||
|
||||
环境变量注入:
|
||||
- Worker 容器不使用 env_file,通过 docker run -e 动态注入
|
||||
- 只注入 SERVER_URL,容器启动后从配置中心获取完整配置
|
||||
- 本地 Worker:SERVER_URL = http://server:{port}(Docker 内部网络)
|
||||
- 远程 Worker:SERVER_URL = http://{public_host}:{port}(公网地址)
|
||||
|
||||
任务启动流程:
|
||||
1. Server 调用 execute_scan_flow() 等方法提交任务
|
||||
2. select_best_worker() 从 Redis 读取心跳数据,选择负载最低的节点
|
||||
3. _build_docker_command() 构建完整的 docker run 命令:
|
||||
- 设置网络(本地加入 Docker 网络,远程不指定)
|
||||
- 注入环境变量(-e SERVER_URL=...)
|
||||
- 挂载结果和日志目录(-v)
|
||||
- 指定执行脚本(python -m apps.scan.scripts.xxx)
|
||||
4. _execute_docker_command() 执行命令:
|
||||
- 本地:subprocess.run() 直接执行
|
||||
- 远程:paramiko SSH 执行
|
||||
5. docker run -d 立即返回容器 ID,任务在后台执行
|
||||
|
||||
特点:
|
||||
- 负载感知:任务优先分发到最空闲的机器
|
||||
- 统一调度:本地和远程 Worker 使用相同的选择逻辑
|
||||
@@ -134,11 +153,30 @@ class TaskDistributor:
|
||||
else:
|
||||
scored_workers.append((worker, score, cpu, mem))
|
||||
|
||||
# 降级策略:如果没有正常负载的,使用高负载中最低的
|
||||
# 降级策略:如果没有正常负载的,等待后重新选择
|
||||
if not scored_workers:
|
||||
if high_load_workers:
|
||||
logger.warning("所有 Worker 高负载,降级选择负载最低的")
|
||||
scored_workers = high_load_workers
|
||||
# 高负载时先等待,给系统喘息时间(默认 60 秒)
|
||||
high_load_wait = getattr(settings, 'HIGH_LOAD_WAIT_SECONDS', 60)
|
||||
logger.warning("所有 Worker 高负载,等待 %d 秒后重试...", high_load_wait)
|
||||
time.sleep(high_load_wait)
|
||||
|
||||
# 重新选择(递归调用,可能负载已降下来)
|
||||
# 为避免无限递归,这里直接使用高负载中最低的
|
||||
high_load_workers.sort(key=lambda x: x[1])
|
||||
best_worker, _, cpu, mem = high_load_workers[0]
|
||||
|
||||
# 发送高负载通知
|
||||
from apps.common.signals import all_workers_high_load
|
||||
all_workers_high_load.send(
|
||||
sender=self.__class__,
|
||||
worker_name=best_worker.name,
|
||||
cpu=cpu,
|
||||
mem=mem
|
||||
)
|
||||
|
||||
logger.info("选择 Worker: %s (CPU: %.1f%%, MEM: %.1f%%)", best_worker.name, cpu, mem)
|
||||
return best_worker
|
||||
else:
|
||||
logger.warning("没有可用的 Worker")
|
||||
return None
|
||||
@@ -194,16 +232,29 @@ class TaskDistributor:
|
||||
network_arg = f"--network {settings.DOCKER_NETWORK_NAME}"
|
||||
server_url = f"http://server:{settings.SERVER_PORT}"
|
||||
else:
|
||||
# 远程:无需指定网络,使用公网地址
|
||||
# 远程:通过 Nginx 反向代理访问(HTTPS,不直连 8888 端口)
|
||||
network_arg = ""
|
||||
server_url = f"http://{settings.PUBLIC_HOST}:{settings.SERVER_PORT}"
|
||||
server_url = f"https://{settings.PUBLIC_HOST}"
|
||||
|
||||
# 挂载路径(所有节点统一使用固定路径)
|
||||
host_results_dir = settings.HOST_RESULTS_DIR # /opt/xingrin/results
|
||||
host_logs_dir = settings.HOST_LOGS_DIR # /opt/xingrin/logs
|
||||
|
||||
# 环境变量:只需 SERVER_URL,其他配置容器启动时从配置中心获取
|
||||
env_vars = [f"-e SERVER_URL={shlex.quote(server_url)}"]
|
||||
# 环境变量:SERVER_URL + IS_LOCAL,其他配置容器启动时从配置中心获取
|
||||
# IS_LOCAL 用于 Worker 向配置中心声明身份,决定返回的数据库地址
|
||||
# Prefect 本地模式配置:启用 ephemeral server(本地临时服务器)
|
||||
is_local_str = "true" if worker.is_local else "false"
|
||||
env_vars = [
|
||||
f"-e SERVER_URL={shlex.quote(server_url)}",
|
||||
f"-e IS_LOCAL={is_local_str}",
|
||||
"-e PREFECT_HOME=/tmp/.prefect", # 设置 Prefect 数据目录到可写位置
|
||||
"-e PREFECT_SERVER_EPHEMERAL_ENABLED=true", # 启用 ephemeral server(本地临时服务器)
|
||||
"-e PREFECT_SERVER_EPHEMERAL_STARTUP_TIMEOUT_SECONDS=120", # 增加启动超时时间
|
||||
"-e PREFECT_SERVER_DATABASE_CONNECTION_URL=sqlite+aiosqlite:////tmp/.prefect/prefect.db", # 使用 /tmp 下的 SQLite
|
||||
"-e PREFECT_LOGGING_LEVEL=DEBUG", # 启用 DEBUG 级别日志
|
||||
"-e PREFECT_LOGGING_SERVER_LEVEL=DEBUG", # Server 日志级别
|
||||
"-e PREFECT_DEBUG_MODE=true", # 启用调试模式
|
||||
]
|
||||
|
||||
# 挂载卷
|
||||
volumes = [
|
||||
@@ -383,8 +434,20 @@ class TaskDistributor:
|
||||
Note:
|
||||
engine_config 由 Flow 内部通过 scan_id 查询数据库获取
|
||||
"""
|
||||
logger.info("="*60)
|
||||
logger.info("execute_scan_flow 开始")
|
||||
logger.info(" scan_id: %s", scan_id)
|
||||
logger.info(" target_name: %s", target_name)
|
||||
logger.info(" target_id: %s", target_id)
|
||||
logger.info(" scan_workspace_dir: %s", scan_workspace_dir)
|
||||
logger.info(" engine_name: %s", engine_name)
|
||||
logger.info(" docker_image: %s", self.docker_image)
|
||||
logger.info("="*60)
|
||||
|
||||
# 1. 等待提交间隔(后台线程执行,不阻塞 API)
|
||||
logger.info("等待提交间隔...")
|
||||
self._wait_for_submit_interval()
|
||||
logger.info("提交间隔等待完成")
|
||||
|
||||
# 2. 选择最佳 Worker
|
||||
worker = self.select_best_worker()
|
||||
|
||||
@@ -134,5 +134,57 @@ class WorkerService:
|
||||
logger.warning(f"[卸载] Worker {worker_id} 远程卸载异常: {e}")
|
||||
return False, f"远程卸载异常: {str(e)}"
|
||||
|
||||
def execute_remote_command(
|
||||
self,
|
||||
ip_address: str,
|
||||
ssh_port: int,
|
||||
username: str,
|
||||
password: str | None,
|
||||
command: str
|
||||
) -> tuple[bool, str]:
|
||||
"""
|
||||
在远程主机上执行命令
|
||||
|
||||
Args:
|
||||
ip_address: SSH 主机地址
|
||||
ssh_port: SSH 端口
|
||||
username: SSH 用户名
|
||||
password: SSH 密码
|
||||
command: 要执行的命令
|
||||
|
||||
Returns:
|
||||
(success, message) 元组
|
||||
"""
|
||||
if not password:
|
||||
return False, "未配置 SSH 密码"
|
||||
|
||||
try:
|
||||
import paramiko
|
||||
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
|
||||
ssh.connect(
|
||||
ip_address,
|
||||
port=ssh_port,
|
||||
username=username,
|
||||
password=password,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
stdin, stdout, stderr = ssh.exec_command(command, timeout=120)
|
||||
exit_status = stdout.channel.recv_exit_status()
|
||||
|
||||
ssh.close()
|
||||
|
||||
if exit_status == 0:
|
||||
return True, stdout.read().decode().strip()
|
||||
else:
|
||||
error = stderr.read().decode().strip()
|
||||
return False, error
|
||||
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
|
||||
|
||||
__all__ = ["WorkerService"]
|
||||
|
||||
@@ -116,7 +116,7 @@ class NucleiTemplateRepoViewSet(viewsets.ModelViewSet):
|
||||
return Response({"message": str(exc)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
logger.error("刷新 Nuclei 模板仓库失败: %s", exc, exc_info=True)
|
||||
return Response({"message": "刷新仓库失败"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
return Response({"message": f"刷新仓库失败: {exc}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
return Response({"message": "刷新成功", "result": result}, status=status.HTTP_200_OK)
|
||||
|
||||
|
||||
@@ -118,8 +118,36 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def heartbeat(self, request, pk=None):
|
||||
"""接收心跳上报(写 Redis,首次心跳更新部署状态)"""
|
||||
"""
|
||||
接收心跳上报(写 Redis,首次心跳更新部署状态,检查版本)
|
||||
|
||||
请求体:
|
||||
{
|
||||
"cpu_percent": 50.0,
|
||||
"memory_percent": 60.0,
|
||||
"version": "v1.0.9"
|
||||
}
|
||||
|
||||
返回:
|
||||
{
|
||||
"status": "ok",
|
||||
"need_update": true/false,
|
||||
"server_version": "v1.0.19"
|
||||
}
|
||||
|
||||
状态流转:
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ 场景 │ 状态变化 │
|
||||
├─────────────────────────────┼───────────────────────────────────────┤
|
||||
│ 首次心跳 │ pending/deploying → online │
|
||||
│ 远程 Worker 版本不匹配 │ online → updating → (更新成功) online │
|
||||
│ 远程 Worker 更新失败 │ updating → outdated │
|
||||
│ 本地 Worker 版本不匹配 │ online → outdated (需手动 update.sh) │
|
||||
│ 版本匹配 │ updating/outdated → online │
|
||||
└─────────────────────────────┴───────────────────────────────────────┘
|
||||
"""
|
||||
from apps.engine.services.worker_load_service import worker_load_service
|
||||
from django.conf import settings
|
||||
|
||||
worker = self.get_object()
|
||||
info = request.data if request.data else {}
|
||||
@@ -134,7 +162,122 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
|
||||
worker.status = 'online'
|
||||
worker.save(update_fields=['status'])
|
||||
|
||||
return Response({'status': 'ok'})
|
||||
# 3. 版本检查:比较 agent 版本与 server 版本
|
||||
agent_version = info.get('version', '')
|
||||
server_version = settings.IMAGE_TAG # Server 当前版本
|
||||
need_update = False
|
||||
|
||||
if agent_version and agent_version != 'unknown':
|
||||
# 版本不匹配时通知 agent 更新
|
||||
need_update = agent_version != server_version
|
||||
if need_update:
|
||||
logger.info(
|
||||
f"Worker {worker.name} 版本不匹配: agent={agent_version}, server={server_version}"
|
||||
)
|
||||
|
||||
# 远程 Worker:服务端主动通过 SSH 触发更新
|
||||
if not worker.is_local and worker.ip_address:
|
||||
self._trigger_remote_agent_update(worker, server_version)
|
||||
else:
|
||||
# 本地 Worker 版本不匹配:标记为 outdated
|
||||
# 需要用户手动执行 update.sh 更新
|
||||
if worker.status != 'outdated':
|
||||
worker.status = 'outdated'
|
||||
worker.save(update_fields=['status'])
|
||||
else:
|
||||
# 版本匹配,确保状态为 online
|
||||
if worker.status in ('updating', 'outdated'):
|
||||
worker.status = 'online'
|
||||
worker.save(update_fields=['status'])
|
||||
|
||||
return Response({
|
||||
'status': 'ok',
|
||||
'need_update': need_update,
|
||||
'server_version': server_version
|
||||
})
|
||||
|
||||
def _trigger_remote_agent_update(self, worker, target_version: str):
|
||||
"""
|
||||
通过 SSH 触发远程 agent 更新(后台执行,不阻塞心跳响应)
|
||||
|
||||
使用 Redis 锁防止重复触发(同一 worker 60秒内只触发一次)
|
||||
"""
|
||||
import redis
|
||||
from django.conf import settings as django_settings
|
||||
|
||||
redis_url = f"redis://{django_settings.REDIS_HOST}:{django_settings.REDIS_PORT}/{django_settings.REDIS_DB}"
|
||||
redis_client = redis.from_url(redis_url)
|
||||
lock_key = f"agent_update_lock:{worker.id}"
|
||||
|
||||
# 尝试获取锁(60秒过期,防止重复触发)
|
||||
if not redis_client.set(lock_key, "1", nx=True, ex=60):
|
||||
logger.debug(f"Worker {worker.name} 更新已在进行中,跳过")
|
||||
return
|
||||
|
||||
# 获取锁成功,设置状态为 updating
|
||||
self._set_worker_status(worker.id, 'updating')
|
||||
|
||||
# 提取数据避免后台线程访问 ORM
|
||||
worker_id = worker.id
|
||||
worker_name = worker.name
|
||||
ip_address = worker.ip_address
|
||||
ssh_port = worker.ssh_port
|
||||
username = worker.username
|
||||
password = worker.password
|
||||
|
||||
def _async_update():
|
||||
try:
|
||||
logger.info(f"开始远程更新 Worker {worker_name} 到 {target_version}")
|
||||
|
||||
# 构建更新命令:拉取新镜像并重启 agent
|
||||
docker_user = getattr(django_settings, 'DOCKER_USER', 'yyhuni')
|
||||
update_cmd = f'''
|
||||
docker pull {docker_user}/xingrin-agent:{target_version} && \
|
||||
docker stop xingrin-agent 2>/dev/null || true && \
|
||||
docker rm xingrin-agent 2>/dev/null || true && \
|
||||
docker run -d --pull=always \
|
||||
--name xingrin-agent \
|
||||
--restart always \
|
||||
-e HEARTBEAT_API_URL="https://{django_settings.PUBLIC_HOST}" \
|
||||
-e WORKER_ID="{worker_id}" \
|
||||
-e IMAGE_TAG="{target_version}" \
|
||||
-v /proc:/host/proc:ro \
|
||||
{docker_user}/xingrin-agent:{target_version}
|
||||
'''
|
||||
|
||||
success, message = self.worker_service.execute_remote_command(
|
||||
ip_address=ip_address,
|
||||
ssh_port=ssh_port,
|
||||
username=username,
|
||||
password=password,
|
||||
command=update_cmd
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.info(f"Worker {worker_name} 远程更新成功")
|
||||
# 更新成功后,新 agent 心跳会自动把状态改回 online
|
||||
else:
|
||||
logger.warning(f"Worker {worker_name} 远程更新失败: {message}")
|
||||
# 更新失败,标记为 outdated
|
||||
self._set_worker_status(worker_id, 'outdated')
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Worker {worker_name} 远程更新异常: {e}")
|
||||
self._set_worker_status(worker_id, 'outdated')
|
||||
finally:
|
||||
# 释放锁
|
||||
redis_client.delete(lock_key)
|
||||
|
||||
# 后台执行,不阻塞心跳响应
|
||||
threading.Thread(target=_async_update, daemon=True).start()
|
||||
|
||||
def _set_worker_status(self, worker_id: int, status: str):
|
||||
"""更新 Worker 状态(用于后台线程)"""
|
||||
try:
|
||||
from apps.engine.models import WorkerNode
|
||||
WorkerNode.objects.filter(id=worker_id).update(status=status)
|
||||
except Exception as e:
|
||||
logger.error(f"更新 Worker {worker_id} 状态失败: {e}")
|
||||
|
||||
@action(detail=False, methods=['post'])
|
||||
def register(self, request):
|
||||
@@ -177,75 +320,16 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
|
||||
'created': created
|
||||
})
|
||||
|
||||
def _get_client_ip(self, request) -> str:
|
||||
"""获取客户端真实 IP"""
|
||||
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
|
||||
if x_forwarded_for:
|
||||
return x_forwarded_for.split(',')[0].strip()
|
||||
return request.META.get('REMOTE_ADDR', '')
|
||||
|
||||
def _is_local_request(self, client_ip: str) -> bool:
|
||||
"""
|
||||
判断是否为本地请求(Docker 网络内部)
|
||||
|
||||
本地请求特征:
|
||||
- 来自 Docker 网络内部(172.x.x.x)
|
||||
- 来自 localhost(127.0.0.1)
|
||||
"""
|
||||
if not client_ip:
|
||||
return True # 无法获取 IP 时默认为本地
|
||||
|
||||
# Docker 默认网络段
|
||||
if client_ip.startswith('172.') or client_ip.startswith('10.'):
|
||||
return True
|
||||
|
||||
# localhost
|
||||
if client_ip in ('127.0.0.1', '::1', 'localhost'):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def config(self, request):
|
||||
"""
|
||||
获取任务容器配置(配置中心 API)
|
||||
|
||||
Worker 启动时调用此接口获取完整配置,实现配置中心化管理。
|
||||
Worker 只需知道 SERVER_URL,其他配置由此 API 动态返回。
|
||||
Worker 通过 IS_LOCAL 环境变量声明身份,请求时带上 ?is_local=true/false 参数。
|
||||
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ 配置分发流程 │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ Worker 启动 │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ GET /api/workers/config/ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────┐ │
|
||||
│ │ _get_client_ip() │ ← 获取请求来源 IP │
|
||||
│ │ (X-Forwarded-For │ (支持 Nginx 代理场景) │
|
||||
│ │ 或 REMOTE_ADDR) │ │
|
||||
│ └─────────┬───────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────┐ │
|
||||
│ │ _is_local_request() │ ← 判断是否为 Docker 网络内部请求 │
|
||||
│ │ 172.x.x.x / 10.x.x.x│ (Docker 默认网段) │
|
||||
│ │ 127.0.0.1 / ::1 │ (localhost) │
|
||||
│ └─────────┬───────────┘ │
|
||||
│ │ │
|
||||
│ ┌───────┴───────┐ │
|
||||
│ ▼ ▼ │
|
||||
│ 本地 Worker 远程 Worker │
|
||||
│ (Docker内) (公网访问) │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ db: postgres db: PUBLIC_HOST │
|
||||
│ redis: redis redis: PUBLIC_HOST:6379 │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
请求参数:
|
||||
is_local: true/false - Worker 是否为本地节点(Docker 网络内)
|
||||
|
||||
返回:
|
||||
{
|
||||
@@ -253,19 +337,29 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
|
||||
"redisUrl": "...",
|
||||
"paths": {"results": "...", "logs": "..."}
|
||||
}
|
||||
|
||||
配置逻辑:
|
||||
- 本地 Worker (is_local=true): db_host=postgres, redis=redis:6379
|
||||
- 远程 Worker (is_local=false): db_host=PUBLIC_HOST, redis=PUBLIC_HOST:6379
|
||||
"""
|
||||
from django.conf import settings
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 判断请求来源:本地 Worker 还是远程 Worker
|
||||
# 本地 Worker 在 Docker 网络内,可以直接访问 postgres 服务
|
||||
# 远程 Worker 需要通过公网 IP 访问
|
||||
client_ip = self._get_client_ip(request)
|
||||
is_local_worker = self._is_local_request(client_ip)
|
||||
# 从请求参数获取 Worker 身份(由 Worker 自己声明)
|
||||
# 不再依赖 IP 判断,避免不同网络环境下的兼容性问题
|
||||
is_local_param = request.query_params.get('is_local', '').lower()
|
||||
is_local_worker = is_local_param == 'true'
|
||||
|
||||
# 根据请求来源返回不同的数据库地址
|
||||
db_host = settings.DATABASES['default']['HOST']
|
||||
_is_internal_db = db_host in ('postgres', 'localhost', '127.0.0.1')
|
||||
|
||||
logger.info(
|
||||
"Worker 配置请求 - is_local_param: %s, is_local_worker: %s, db_host: %s, is_internal_db: %s",
|
||||
is_local_param, is_local_worker, db_host, _is_internal_db
|
||||
)
|
||||
|
||||
if _is_internal_db:
|
||||
# 本地数据库场景
|
||||
if is_local_worker:
|
||||
@@ -274,13 +368,18 @@ class WorkerNodeViewSet(viewsets.ModelViewSet):
|
||||
worker_redis_url = 'redis://redis:6379/0'
|
||||
else:
|
||||
# 远程 Worker:通过公网 IP 访问
|
||||
worker_db_host = settings.PUBLIC_HOST
|
||||
worker_redis_url = f'redis://{settings.PUBLIC_HOST}:6379/0'
|
||||
public_host = settings.PUBLIC_HOST
|
||||
if public_host in ('server', 'localhost', '127.0.0.1'):
|
||||
logger.warning("远程 Worker 请求配置,但 PUBLIC_HOST=%s 不是有效的公网地址", public_host)
|
||||
worker_db_host = public_host
|
||||
worker_redis_url = f'redis://{public_host}:6379/0'
|
||||
else:
|
||||
# 远程数据库场景:所有 Worker 都用 DB_HOST
|
||||
worker_db_host = db_host
|
||||
worker_redis_url = getattr(settings, 'WORKER_REDIS_URL', 'redis://redis:6379/0')
|
||||
|
||||
logger.info("返回 Worker 配置 - db_host: %s, redis_url: %s", worker_db_host, worker_redis_url)
|
||||
|
||||
return Response({
|
||||
'db': {
|
||||
'host': worker_db_host,
|
||||
|
||||
@@ -1,28 +1,14 @@
|
||||
# 引擎配置
|
||||
#
|
||||
# ==================== 参数命名规范 ====================
|
||||
# 所有参数统一用中划线,如 rate-limit, request-timeout, wordlist-name
|
||||
# - 贴近 CLI 参数风格,用户更直观
|
||||
# - 系统会自动转换为下划线供代码使用
|
||||
#
|
||||
# ==================== 必需参数 ====================
|
||||
# - enabled: 是否启用工具(true/false)
|
||||
# - timeout: 超时时间(秒),工具执行超过此时间会被强制终止
|
||||
#
|
||||
# 使用方式:
|
||||
# - 在前端创建扫描引擎时,将此配置保存到数据库
|
||||
# - 执行扫描时,从数据库读取配置并传递给 Flow
|
||||
# - 取消注释可选参数即可启用
|
||||
# 参数命名:统一用中划线(如 rate-limit),系统自动转换为下划线
|
||||
# 必需参数:enabled(是否启用)、timeout(超时秒数,auto 表示自动计算)
|
||||
|
||||
# ==================== 子域名发现 ====================
|
||||
#
|
||||
# 流程说明:
|
||||
# Stage 1: 被动收集(并行) - 必选,至少启用一个工具
|
||||
# Stage 2: 字典爆破(可选) - 使用字典暴力枚举子域名
|
||||
# Stage 3: 变异生成 + 验证(可选) - 基于已发现域名生成变异,流式验证存活
|
||||
# Stage 4: DNS 存活验证(可选) - 验证所有候选域名是否能解析
|
||||
#
|
||||
# 灵活组合:可以关闭 2/3/4 中的任意阶段,最终结果会根据实际执行的阶段动态决定
|
||||
# Stage 1: 被动收集(并行) - 必选,至少启用一个工具
|
||||
# Stage 2: 字典爆破(可选) - 使用字典暴力枚举子域名
|
||||
# Stage 3: 变异生成 + 验证(可选) - 基于已发现域名生成变异,流式验证存活
|
||||
# Stage 4: DNS 存活验证(可选) - 验证所有候选域名是否能解析
|
||||
#
|
||||
subdomain_discovery:
|
||||
# === Stage 1: 被动收集工具(并行执行)===
|
||||
@@ -30,11 +16,11 @@ subdomain_discovery:
|
||||
subfinder:
|
||||
enabled: true
|
||||
timeout: 7200 # 2小时
|
||||
# threads: 10 # 可选,并发 goroutine 数
|
||||
# threads: 10 # 并发 goroutine 数
|
||||
|
||||
amass_passive:
|
||||
enabled: true
|
||||
timeout: 7200 # 2小时
|
||||
timeout: 7200
|
||||
|
||||
amass_active:
|
||||
enabled: true # 主动枚举 + 爆破
|
||||
@@ -43,7 +29,7 @@ subdomain_discovery:
|
||||
sublist3r:
|
||||
enabled: true
|
||||
timeout: 7200
|
||||
# threads: 50 # 可选,线程数
|
||||
# threads: 50 # 线程数
|
||||
|
||||
assetfinder:
|
||||
enabled: true
|
||||
@@ -51,174 +37,123 @@ subdomain_discovery:
|
||||
|
||||
# === Stage 2: 主动字典爆破(可选)===
|
||||
bruteforce:
|
||||
enabled: false # 是否启用字典爆破
|
||||
enabled: false
|
||||
subdomain_bruteforce:
|
||||
timeout: auto # 自动根据字典行数计算(后续代码中按行数 * 3 秒实现)
|
||||
wordlist-name: subdomains-top1million-110000.txt # 字典名称,对应「字典管理」中的 Wordlist.name
|
||||
timeout: auto # 自动根据字典行数计算
|
||||
wordlist-name: subdomains-top1million-110000.txt # 对应「字典管理」中的 Wordlist.name
|
||||
|
||||
# === Stage 3: 变异生成 + 存活验证(可选,流式管道避免 OOM)===
|
||||
# === Stage 3: 变异生成 + 存活验证(可选)===
|
||||
permutation:
|
||||
enabled: true # 是否启用变异生成
|
||||
enabled: true
|
||||
subdomain_permutation_resolve:
|
||||
timeout: 7200 # 2小时(变异量大时需要更长时间)
|
||||
timeout: 7200
|
||||
|
||||
# === Stage 4: DNS 存活验证(可选)===
|
||||
resolve:
|
||||
enabled: true # 是否启用存活验证
|
||||
enabled: true
|
||||
subdomain_resolve:
|
||||
timeout: auto # 自动根据候选子域数量计算(在 Flow 中按行数 * 3 秒实现)
|
||||
|
||||
timeout: auto # 自动根据候选子域数量计算
|
||||
|
||||
# ==================== 端口扫描 ====================
|
||||
port_scan:
|
||||
tools:
|
||||
naabu_active:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(根据:目标数 × 端口数 × 0.5秒)
|
||||
# 例如:100个域名 × 100个端口 × 0.5 = 5000秒
|
||||
# 10个域名 × 1000个端口 × 0.5 = 5000秒
|
||||
# 超时范围:60秒 ~ 2天(172800秒)
|
||||
# 或者手动指定:timeout: 3600
|
||||
threads: 200 # 可选,并发连接数(默认 5)
|
||||
# ports: 1-65535 # 可选,扫描端口范围(默认 1-65535)
|
||||
top-ports: 100 # 可选,Scan for nmap top 100 ports(影响 timeout 计算)
|
||||
rate: 10 # 可选,扫描速率(默认 10)
|
||||
timeout: auto # 自动计算(目标数 × 端口数 × 0.5秒),范围 60秒 ~ 2天
|
||||
threads: 200 # 并发连接数(默认 5)
|
||||
# ports: 1-65535 # 扫描端口范围(默认 1-65535)
|
||||
top-ports: 100 # 扫描 nmap top 100 端口
|
||||
rate: 10 # 扫描速率(默认 10)
|
||||
|
||||
naabu_passive:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(被动扫描通常较快,端口数默认为 100)
|
||||
# 被动扫描,使用被动数据源,无需额外配置
|
||||
timeout: auto # 被动扫描通常较快
|
||||
|
||||
# ==================== 站点扫描 ====================
|
||||
site_scan:
|
||||
tools:
|
||||
httpx:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(根据URL数量,每个URL 1秒)
|
||||
# 或者手动指定:timeout: 3600
|
||||
# threads: 50 # 可选,并发线程数(httpx 默认 50)
|
||||
# rate-limit: 150 # 可选,每秒发送的请求数量(httpx 默认 150)
|
||||
# request-timeout: 10 # 可选,单个请求的超时时间(httpx 默认 10)秒
|
||||
# retries: 2 # 可选,请求失败重试次数
|
||||
timeout: auto # 自动计算(每个 URL 约 1 秒)
|
||||
# threads: 50 # 并发线程数(默认 50)
|
||||
# rate-limit: 150 # 每秒请求数(默认 150)
|
||||
# request-timeout: 10 # 单个请求超时秒数(默认 10)
|
||||
# retries: 2 # 请求失败重试次数
|
||||
|
||||
# ==================== 目录扫描 ====================
|
||||
directory_scan:
|
||||
tools:
|
||||
ffuf:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算超时时间(根据字典行数)
|
||||
# 计算公式:字典行数 × 0.02秒/词
|
||||
# 超时范围:60秒 ~ 7200秒(2小时)
|
||||
# 也可以手动指定固定超时(如 300)
|
||||
wordlist-name: dir_default.txt # 字典名称(必需),对应「字典管理」中唯一的 Wordlist.name
|
||||
# 安装时会自动初始化名为 dir_default.txt 的默认目录字典
|
||||
# ffuf 会逐行读取字典文件,将每行作为 FUZZ 关键字的替换值
|
||||
delay: 0.1-2.0 # Seconds of delay between requests, or a range of random delay
|
||||
# For example "0.1" or "0.1-2.0"
|
||||
threads: 10 # Number of concurrent threads (default: 40)
|
||||
request-timeout: 10 # HTTP request timeout in seconds (default: 10)
|
||||
match-codes: 200,201,301,302,401,403 # Match HTTP status codes, comma separated
|
||||
# rate: 0 # Rate of requests per second (default: 0)
|
||||
timeout: auto # 自动计算(字典行数 × 0.02秒),范围 60秒 ~ 2小时
|
||||
wordlist-name: dir_default.txt # 对应「字典管理」中的 Wordlist.name
|
||||
delay: 0.1-2.0 # 请求间隔,支持范围随机(如 "0.1-2.0")
|
||||
threads: 10 # 并发线程数(默认 40)
|
||||
request-timeout: 10 # HTTP 请求超时秒数(默认 10)
|
||||
match-codes: 200,201,301,302,401,403 # 匹配的 HTTP 状态码
|
||||
# rate: 0 # 每秒请求数(默认 0 不限制)
|
||||
|
||||
# ==================== URL 获取 ====================
|
||||
url_fetch:
|
||||
tools:
|
||||
waymore:
|
||||
enabled: true
|
||||
timeout: 3600 # 工具级别总超时:固定 3600 秒(按域名 target_name 输入)
|
||||
# 如果目标较大或希望更快/更慢,可根据需要手动调整秒数
|
||||
# 输入类型:domain_name(域名级别,自动去重同域名站点)
|
||||
timeout: 3600 # 固定 1 小时(按域名输入)
|
||||
|
||||
katana:
|
||||
enabled: true
|
||||
timeout: auto # 工具级别总超时:自动计算(根据站点数量)
|
||||
# 或手动指定:timeout: 300
|
||||
|
||||
# ========== 核心功能参数(已在命令中固定开启) ==========
|
||||
# -jc: JavaScript 爬取 + 自动解析 .js 文件里的所有端点(最重要)
|
||||
# -xhr: 从 JS 中提取 XHR/Fetch 请求的 API 路径(再多挖 10-20% 隐藏接口)
|
||||
# -kf all: 自动 fuzz 所有已知敏感文件(.env、.git、backup、config 等 5000+ 条)
|
||||
# -fs rdn: 智能过滤重复+噪声路径(分页、?id=1/2/3 全干掉,输出极干净)
|
||||
|
||||
# ========== 可选参数(推荐配置) ==========
|
||||
depth: 5 # 爬取最大深度(平衡深度与时间,默认 3,推荐 5)
|
||||
threads: 10 # 全局并发数(极低并发最像真人,推荐 10)
|
||||
rate-limit: 30 # 全局硬限速:每秒最多 30 个请求(WAF 几乎不报警)
|
||||
random-delay: 1 # 每次请求之间随机延迟 0.5~1.5 秒(再加一层人性化)
|
||||
retry: 2 # 失败请求自动重试 2 次(网络抖动不丢包)
|
||||
request-timeout: 12 # 单请求超时 12 秒(防卡死,katana 参数名是 -timeout)
|
||||
|
||||
# 输入类型:url(站点级别,每个站点单独爬取)
|
||||
timeout: auto # 自动计算(根据站点数量)
|
||||
depth: 5 # 爬取最大深度(默认 3)
|
||||
threads: 10 # 全局并发数
|
||||
rate-limit: 30 # 每秒最多请求数
|
||||
random-delay: 1 # 请求间随机延迟秒数
|
||||
retry: 2 # 失败重试次数
|
||||
request-timeout: 12 # 单请求超时秒数
|
||||
|
||||
uro:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(根据 URL 数量,每 100 个约 1 秒)
|
||||
# 范围:30 秒 ~ 300 秒
|
||||
# 或手动指定:timeout: 60
|
||||
|
||||
# ========== 可选参数 ==========
|
||||
# whitelist: # 只保留指定扩展名的 URL(如:php,asp,jsp)
|
||||
timeout: auto # 自动计算(每 100 个 URL 约 1 秒),范围 30 ~ 300 秒
|
||||
# whitelist: # 只保留指定扩展名
|
||||
# - php
|
||||
# - asp
|
||||
# blacklist: # 排除指定扩展名的 URL(静态资源)
|
||||
# blacklist: # 排除指定扩展名(静态资源)
|
||||
# - jpg
|
||||
# - jpeg
|
||||
# - png
|
||||
# - gif
|
||||
# - svg
|
||||
# - ico
|
||||
# - css
|
||||
# - woff
|
||||
# - woff2
|
||||
# - ttf
|
||||
# - eot
|
||||
# - mp4
|
||||
# - mp3
|
||||
# - pdf
|
||||
# filters: # 额外的过滤规则,参考 uro 文档
|
||||
# - hasparams # 只保留有参数的 URL
|
||||
# - hasext # 只保留有扩展名的 URL
|
||||
# - vuln # 只保留可能有漏洞的 URL
|
||||
|
||||
# 用途:清理合并后的 URL 列表,去除冗余和无效 URL
|
||||
# 输入类型:merged_file(合并后的 URL 文件)
|
||||
# 输出:清理后的 URL 列表
|
||||
# filters: # 额外过滤规则
|
||||
# - hasparams # 只保留有参数的 URL
|
||||
# - vuln # 只保留可能有漏洞的 URL
|
||||
|
||||
httpx:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(根据 URL 数量,每个 URL 1 秒)
|
||||
# 或手动指定:timeout: 600
|
||||
# threads: 50 # 可选,并发线程数(httpx 默认 50)
|
||||
# rate-limit: 150 # 可选,每秒发送的请求数量(httpx 默认 150)
|
||||
# request-timeout: 10 # 可选,单个请求的超时时间(httpx 默认 10)秒
|
||||
# retries: 2 # 可选,请求失败重试次数
|
||||
|
||||
# 用途:判断 URL 存活,过滤无效 URL
|
||||
# 输入类型:url_file(URL 列表文件)
|
||||
# 输出:存活的 URL 及其响应信息(status, title, server, tech 等)
|
||||
timeout: auto # 自动计算(每个 URL 约 1 秒)
|
||||
# threads: 50 # 并发线程数(默认 50)
|
||||
# rate-limit: 150 # 每秒请求数(默认 150)
|
||||
# request-timeout: 10 # 单个请求超时秒数(默认 10)
|
||||
# retries: 2 # 请求失败重试次数
|
||||
|
||||
# ==================== 漏洞扫描 ====================
|
||||
vuln_scan:
|
||||
tools:
|
||||
dalfox_xss:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(根据 endpoints 行数 × 100 秒),或手动指定秒数如 timeout: 600
|
||||
request-timeout: 10 # Dalfox 单个请求的超时时间,对应命令行 --timeout
|
||||
timeout: auto # 自动计算(endpoints 行数 × 100 秒)
|
||||
request-timeout: 10 # 单个请求超时秒数
|
||||
only-poc: r # 只输出 POC 结果(r: 反射型)
|
||||
ignore-return: "302,404,403" # 忽略这些返回码
|
||||
# blind-xss-server: xxx # 可选:盲打 XSS 回连服务地址,需要时再开启
|
||||
delay: 100 # Dalfox 扫描内部延迟参数
|
||||
worker: 10 # Dalfox worker 数量
|
||||
user-agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" # 默认 UA,可根据需要修改
|
||||
ignore-return: "302,404,403" # 忽略的返回码
|
||||
delay: 100 # 扫描内部延迟
|
||||
worker: 10 # worker 数量
|
||||
user-agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
|
||||
# blind-xss-server: xxx # 盲打 XSS 回连服务地址
|
||||
|
||||
nuclei:
|
||||
enabled: true
|
||||
timeout: auto # 自动计算(根据 endpoints 行数),或手动指定秒数
|
||||
template-repo-names: # 模板仓库列表(必填,数组写法),对应「Nuclei 模板」中的仓库名
|
||||
- nuclei-templates # Worker 会自动同步到与 Server 一致的 commit 版本
|
||||
# - nuclei-custom # 可追加自定义仓库,按顺序依次 -t 传入
|
||||
concurrency: 25 # 并发数(默认 25)
|
||||
rate-limit: 150 # 每秒请求数限制(默认 150)
|
||||
request-timeout: 5 # 单个请求超时秒数(默认 5)
|
||||
severity: medium,high,critical # 只扫描中高危,降低噪音(逗号分隔)
|
||||
# tags: cve,rce # 可选:只使用指定标签的模板
|
||||
timeout: auto # 自动计算(根据 endpoints 行数)
|
||||
template-repo-names: # 模板仓库列表,对应「Nuclei 模板」中的仓库名
|
||||
- nuclei-templates
|
||||
# - nuclei-custom # 可追加自定义仓库
|
||||
concurrency: 25 # 并发数(默认 25)
|
||||
rate-limit: 150 # 每秒请求数限制(默认 150)
|
||||
request-timeout: 5 # 单个请求超时秒数(默认 5)
|
||||
severity: medium,high,critical # 只扫描中高危
|
||||
# tags: cve,rce # 只使用指定标签的模板
|
||||
|
||||
@@ -372,19 +372,17 @@ def port_scan_flow(
|
||||
端口扫描 Flow
|
||||
|
||||
主要功能:
|
||||
1. 扫描目标域名的开放端口(核心目标)
|
||||
2. 发现域名对应的 IP 地址(附带产物)
|
||||
3. 保存 IP 和端口的关联关系
|
||||
1. 扫描目标域名/IP 的开放端口
|
||||
2. 保存 host + ip + port 三元映射到 HostPortMapping 表
|
||||
|
||||
输出资产:
|
||||
- Port:开放的端口列表(主要资产)
|
||||
- IPAddress:域名对应的 IP 地址(附带资产)
|
||||
- HostPortMapping:主机端口映射(host + ip + port 三元组)
|
||||
|
||||
工作流程:
|
||||
Step 0: 创建工作目录
|
||||
Step 1: 导出域名列表到文件(供扫描工具使用)
|
||||
Step 2: 解析配置,获取启用的工具
|
||||
Step 3: 串行执行扫描工具,运行端口扫描工具并实时解析输出到数据库(Subdomain → IPAddress → Port)
|
||||
Step 3: 串行执行扫描工具,运行端口扫描工具并实时解析输出到数据库(→ HostPortMapping)
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务 ID
|
||||
@@ -418,10 +416,8 @@ def port_scan_flow(
|
||||
RuntimeError: 执行失败
|
||||
|
||||
Note:
|
||||
端口扫描的输出必然包含 IP 信息,因为:
|
||||
- 扫描工具需要解析域名 → IP
|
||||
- 端口属于 IP,而不是直接属于域名
|
||||
- 同一域名可能对应多个 IP(CDN、负载均衡)
|
||||
端口扫描工具(如 naabu)会解析域名获取 IP,输出 host + ip + port 三元组。
|
||||
同一 host 可能对应多个 IP(CDN、负载均衡),因此使用三元映射表存储。
|
||||
"""
|
||||
try:
|
||||
# 参数验证
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
子域名发现扫描 Flow(增强版)
|
||||
子域名发现扫描 Flow
|
||||
|
||||
负责编排子域名发现扫描的完整流程
|
||||
|
||||
@@ -343,7 +343,7 @@ def subdomain_discovery_flow(
|
||||
scan_workspace_dir: str,
|
||||
enabled_tools: dict
|
||||
) -> dict:
|
||||
"""子域名发现扫描流程(增强版)
|
||||
"""子域名发现扫描流程
|
||||
|
||||
工作流程(4 阶段):
|
||||
Stage 1: 被动收集(并行) - 必选
|
||||
@@ -410,7 +410,7 @@ def subdomain_discovery_flow(
|
||||
# 验证成功后打印日志
|
||||
logger.info(
|
||||
"="*60 + "\n" +
|
||||
"开始子域名发现扫描(增强版)\n" +
|
||||
"开始子域名发现扫描\n" +
|
||||
f" Scan ID: {scan_id}\n" +
|
||||
f" Domain: {domain_name}\n" +
|
||||
f" Workspace: {scan_workspace_dir}\n" +
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
import logging
|
||||
from django.dispatch import receiver
|
||||
|
||||
from apps.common.signals import vulnerabilities_saved, worker_delete_failed
|
||||
from apps.common.signals import vulnerabilities_saved, worker_delete_failed, all_workers_high_load
|
||||
from apps.scan.notifications import create_notification, NotificationLevel, NotificationCategory
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -80,3 +80,15 @@ def on_worker_delete_failed(sender, worker_name, message, **kwargs):
|
||||
category=NotificationCategory.SYSTEM
|
||||
)
|
||||
logger.warning("Worker 删除失败通知已发送 - worker=%s, message=%s", worker_name, message)
|
||||
|
||||
|
||||
@receiver(all_workers_high_load)
|
||||
def on_all_workers_high_load(sender, worker_name, cpu, mem, **kwargs):
|
||||
"""所有 Worker 高负载时的通知处理"""
|
||||
create_notification(
|
||||
title="系统负载较高",
|
||||
message=f"所有节点负载较高,已选择负载最低的节点 {worker_name}(CPU: {cpu:.1f}%, 内存: {mem:.1f}%)执行任务,扫描速度可能受影响",
|
||||
level=NotificationLevel.MEDIUM,
|
||||
category=NotificationCategory.SYSTEM
|
||||
)
|
||||
logger.warning("高负载通知已发送 - worker=%s, cpu=%.1f%%, mem=%.1f%%", worker_name, cpu, mem)
|
||||
|
||||
@@ -3,10 +3,14 @@
|
||||
import logging
|
||||
import time
|
||||
import requests
|
||||
import urllib3
|
||||
from .models import Notification, NotificationSettings
|
||||
from .types import NotificationLevel, NotificationCategory
|
||||
from .repositories import DjangoNotificationRepository, NotificationSettingsRepository
|
||||
|
||||
# 禁用自签名证书的 SSL 警告(远程 Worker 回调场景)
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -314,7 +318,8 @@ def _push_via_api_callback(notification: Notification, server_url: str) -> None:
|
||||
'created_at': notification.created_at.isoformat()
|
||||
}
|
||||
|
||||
resp = requests.post(callback_url, json=data, timeout=5)
|
||||
# verify=False: 远程 Worker 回调 Server 时可能使用自签名证书
|
||||
resp = requests.post(callback_url, json=data, timeout=5, verify=False)
|
||||
resp.raise_for_status()
|
||||
|
||||
logger.debug(f"通知回调推送成功 - ID: {notification.id}")
|
||||
|
||||
@@ -6,14 +6,135 @@
|
||||
必须在 Django 导入之前获取配置并设置环境变量。
|
||||
"""
|
||||
import argparse
|
||||
from apps.common.container_bootstrap import fetch_config_and_setup_django
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
|
||||
|
||||
def diagnose_prefect_environment():
|
||||
"""诊断 Prefect 运行环境,输出详细信息用于排查问题"""
|
||||
print("\n" + "="*60)
|
||||
print("Prefect 环境诊断")
|
||||
print("="*60)
|
||||
|
||||
# 1. 检查 Prefect 相关环境变量
|
||||
print("\n[诊断] Prefect 环境变量:")
|
||||
prefect_vars = [
|
||||
'PREFECT_HOME',
|
||||
'PREFECT_API_URL',
|
||||
'PREFECT_SERVER_EPHEMERAL_ENABLED',
|
||||
'PREFECT_SERVER_EPHEMERAL_STARTUP_TIMEOUT_SECONDS',
|
||||
'PREFECT_SERVER_DATABASE_CONNECTION_URL',
|
||||
'PREFECT_LOGGING_LEVEL',
|
||||
'PREFECT_DEBUG_MODE',
|
||||
]
|
||||
for var in prefect_vars:
|
||||
value = os.environ.get(var, 'NOT SET')
|
||||
print(f" {var}={value}")
|
||||
|
||||
# 2. 检查 PREFECT_HOME 目录
|
||||
prefect_home = os.environ.get('PREFECT_HOME', os.path.expanduser('~/.prefect'))
|
||||
print(f"\n[诊断] PREFECT_HOME 目录: {prefect_home}")
|
||||
if os.path.exists(prefect_home):
|
||||
print(f" ✓ 目录存在")
|
||||
print(f" 可写: {os.access(prefect_home, os.W_OK)}")
|
||||
try:
|
||||
files = os.listdir(prefect_home)
|
||||
print(f" 文件列表: {files[:10]}{'...' if len(files) > 10 else ''}")
|
||||
except Exception as e:
|
||||
print(f" ✗ 无法列出文件: {e}")
|
||||
else:
|
||||
print(f" 目录不存在,尝试创建...")
|
||||
try:
|
||||
os.makedirs(prefect_home, exist_ok=True)
|
||||
print(f" ✓ 创建成功")
|
||||
except Exception as e:
|
||||
print(f" ✗ 创建失败: {e}")
|
||||
|
||||
# 3. 检查 uvicorn 是否可用
|
||||
print(f"\n[诊断] uvicorn 可用性:")
|
||||
import shutil
|
||||
uvicorn_path = shutil.which('uvicorn')
|
||||
if uvicorn_path:
|
||||
print(f" ✓ uvicorn 路径: {uvicorn_path}")
|
||||
else:
|
||||
print(f" ✗ uvicorn 不在 PATH 中")
|
||||
print(f" PATH: {os.environ.get('PATH', 'NOT SET')}")
|
||||
|
||||
# 4. 检查 Prefect 版本
|
||||
print(f"\n[诊断] Prefect 版本:")
|
||||
try:
|
||||
import prefect
|
||||
print(f" ✓ prefect=={prefect.__version__}")
|
||||
except Exception as e:
|
||||
print(f" ✗ 无法导入 prefect: {e}")
|
||||
|
||||
# 5. 检查 SQLite 支持
|
||||
print(f"\n[诊断] SQLite 支持:")
|
||||
try:
|
||||
import sqlite3
|
||||
print(f" ✓ sqlite3 版本: {sqlite3.sqlite_version}")
|
||||
# 测试创建数据库
|
||||
test_db = os.path.join(prefect_home, 'test.db')
|
||||
conn = sqlite3.connect(test_db)
|
||||
conn.execute('CREATE TABLE IF NOT EXISTS test (id INTEGER)')
|
||||
conn.close()
|
||||
os.remove(test_db)
|
||||
print(f" ✓ SQLite 读写测试通过")
|
||||
except Exception as e:
|
||||
print(f" ✗ SQLite 测试失败: {e}")
|
||||
|
||||
# 6. 检查端口绑定能力
|
||||
print(f"\n[诊断] 端口绑定测试:")
|
||||
try:
|
||||
import socket
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.bind(('127.0.0.1', 0))
|
||||
port = sock.getsockname()[1]
|
||||
sock.close()
|
||||
print(f" ✓ 可以绑定 127.0.0.1 端口 (测试端口: {port})")
|
||||
except Exception as e:
|
||||
print(f" ✗ 端口绑定失败: {e}")
|
||||
|
||||
# 7. 检查内存情况
|
||||
print(f"\n[诊断] 系统资源:")
|
||||
try:
|
||||
import psutil
|
||||
mem = psutil.virtual_memory()
|
||||
print(f" 内存总量: {mem.total / 1024 / 1024:.0f} MB")
|
||||
print(f" 可用内存: {mem.available / 1024 / 1024:.0f} MB")
|
||||
print(f" 内存使用率: {mem.percent}%")
|
||||
except ImportError:
|
||||
print(f" psutil 未安装,跳过内存检查")
|
||||
except Exception as e:
|
||||
print(f" ✗ 资源检查失败: {e}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("诊断完成")
|
||||
print("="*60 + "\n")
|
||||
|
||||
|
||||
def main():
|
||||
print("="*60)
|
||||
print("run_initiate_scan.py 启动")
|
||||
print(f" Python: {sys.version}")
|
||||
print(f" CWD: {os.getcwd()}")
|
||||
print(f" SERVER_URL: {os.environ.get('SERVER_URL', 'NOT SET')}")
|
||||
print("="*60)
|
||||
|
||||
# 1. 从配置中心获取配置并初始化 Django(必须在 Django 导入之前)
|
||||
fetch_config_and_setup_django()
|
||||
print("[1/4] 从配置中心获取配置...")
|
||||
try:
|
||||
from apps.common.container_bootstrap import fetch_config_and_setup_django
|
||||
fetch_config_and_setup_django()
|
||||
print("[1/4] ✓ 配置获取成功")
|
||||
except Exception as e:
|
||||
print(f"[1/4] ✗ 配置获取失败: {e}")
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# 2. 解析命令行参数
|
||||
print("[2/4] 解析命令行参数...")
|
||||
parser = argparse.ArgumentParser(description="执行扫描初始化 Flow")
|
||||
parser.add_argument("--scan_id", type=int, required=True, help="扫描任务 ID")
|
||||
parser.add_argument("--target_name", type=str, required=True, help="目标名称")
|
||||
@@ -23,21 +144,45 @@ def main():
|
||||
parser.add_argument("--scheduled_scan_name", type=str, default=None, help="定时扫描任务名称(可选)")
|
||||
|
||||
args = parser.parse_args()
|
||||
print(f"[2/4] ✓ 参数解析成功:")
|
||||
print(f" scan_id: {args.scan_id}")
|
||||
print(f" target_name: {args.target_name}")
|
||||
print(f" target_id: {args.target_id}")
|
||||
print(f" scan_workspace_dir: {args.scan_workspace_dir}")
|
||||
print(f" engine_name: {args.engine_name}")
|
||||
print(f" scheduled_scan_name: {args.scheduled_scan_name}")
|
||||
|
||||
# 2.5. 运行 Prefect 环境诊断(仅在 DEBUG 模式下)
|
||||
if os.environ.get('PREFECT_DEBUG_MODE') == 'true':
|
||||
diagnose_prefect_environment()
|
||||
|
||||
# 3. 现在可以安全导入 Django 相关模块
|
||||
from apps.scan.flows.initiate_scan_flow import initiate_scan_flow
|
||||
print("[3/4] 导入 initiate_scan_flow...")
|
||||
try:
|
||||
from apps.scan.flows.initiate_scan_flow import initiate_scan_flow
|
||||
print("[3/4] ✓ 导入成功")
|
||||
except Exception as e:
|
||||
print(f"[3/4] ✗ 导入失败: {e}")
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# 4. 执行 Flow
|
||||
result = initiate_scan_flow(
|
||||
scan_id=args.scan_id,
|
||||
target_name=args.target_name,
|
||||
target_id=args.target_id,
|
||||
scan_workspace_dir=args.scan_workspace_dir,
|
||||
engine_name=args.engine_name,
|
||||
scheduled_scan_name=args.scheduled_scan_name,
|
||||
)
|
||||
|
||||
print(f"Flow 执行完成: {result}")
|
||||
print("[4/4] 执行 initiate_scan_flow...")
|
||||
try:
|
||||
result = initiate_scan_flow(
|
||||
scan_id=args.scan_id,
|
||||
target_name=args.target_name,
|
||||
target_id=args.target_id,
|
||||
scan_workspace_dir=args.scan_workspace_dir,
|
||||
engine_name=args.engine_name,
|
||||
scheduled_scan_name=args.scheduled_scan_name,
|
||||
)
|
||||
print("[4/4] ✓ Flow 执行完成")
|
||||
print(f"结果: {result}")
|
||||
except Exception as e:
|
||||
print(f"[4/4] ✗ Flow 执行失败: {e}")
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -266,15 +266,26 @@ class ScanCreationService:
|
||||
Args:
|
||||
scan_data: 扫描任务数据列表
|
||||
"""
|
||||
logger.info("="*60)
|
||||
logger.info("开始分发扫描任务到 Workers - 数量: %d", len(scan_data))
|
||||
logger.info("="*60)
|
||||
|
||||
# 后台线程需要新的数据库连接
|
||||
connection.close()
|
||||
logger.info("已关闭旧数据库连接,准备获取新连接")
|
||||
|
||||
distributor = get_task_distributor()
|
||||
logger.info("TaskDistributor 初始化完成")
|
||||
|
||||
scan_repo = DjangoScanRepository()
|
||||
logger.info("ScanRepository 初始化完成")
|
||||
|
||||
for data in scan_data:
|
||||
scan_id = data['scan_id']
|
||||
logger.info("-"*40)
|
||||
logger.info("准备分发扫描任务 - Scan ID: %s, Target: %s", scan_id, data['target_name'])
|
||||
try:
|
||||
logger.info("调用 distributor.execute_scan_flow...")
|
||||
success, message, container_id, worker_id = distributor.execute_scan_flow(
|
||||
scan_id=scan_id,
|
||||
target_name=data['target_name'],
|
||||
@@ -284,20 +295,29 @@ class ScanCreationService:
|
||||
scheduled_scan_name=data.get('scheduled_scan_name'),
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"execute_scan_flow 返回 - success: %s, message: %s, container_id: %s, worker_id: %s",
|
||||
success, message, container_id, worker_id
|
||||
)
|
||||
|
||||
if success:
|
||||
if container_id:
|
||||
scan_repo.append_container_id(scan_id, container_id)
|
||||
logger.info("已记录 container_id: %s", container_id)
|
||||
if worker_id:
|
||||
scan_repo.update_worker(scan_id, worker_id)
|
||||
logger.info("已记录 worker_id: %s", worker_id)
|
||||
logger.info(
|
||||
"✓ 扫描任务已提交 - Scan ID: %s, Worker: %s",
|
||||
scan_id, worker_id
|
||||
)
|
||||
else:
|
||||
logger.error("execute_scan_flow 返回失败 - message: %s", message)
|
||||
raise Exception(message)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("提交扫描任务失败 - Scan ID: %s, 错误: %s", scan_id, e)
|
||||
logger.exception("详细堆栈:")
|
||||
try:
|
||||
scan_repo.update_status(
|
||||
scan_id,
|
||||
|
||||
@@ -157,6 +157,51 @@ class ScanService:
|
||||
"""取消所有正在运行的阶段(委托给 ScanStateService)"""
|
||||
return self.state_service.cancel_running_stages(scan_id, final_status)
|
||||
|
||||
# TODO:待接入
|
||||
def add_command_to_scan(self, scan_id: int, stage_name: str, tool_name: str, command: str) -> bool:
|
||||
"""
|
||||
增量添加命令到指定扫描阶段
|
||||
|
||||
Args:
|
||||
scan_id: 扫描任务ID
|
||||
stage_name: 阶段名称(如 'subdomain_discovery', 'port_scan')
|
||||
tool_name: 工具名称
|
||||
command: 执行命令
|
||||
|
||||
Returns:
|
||||
bool: 是否成功添加
|
||||
"""
|
||||
try:
|
||||
scan = self.get_scan(scan_id, prefetch_relations=False)
|
||||
if not scan:
|
||||
logger.error(f"扫描任务不存在: {scan_id}")
|
||||
return False
|
||||
|
||||
stage_progress = scan.stage_progress or {}
|
||||
|
||||
# 确保指定阶段存在
|
||||
if stage_name not in stage_progress:
|
||||
stage_progress[stage_name] = {'status': 'running', 'commands': []}
|
||||
|
||||
# 确保 commands 列表存在
|
||||
if 'commands' not in stage_progress[stage_name]:
|
||||
stage_progress[stage_name]['commands'] = []
|
||||
|
||||
# 增量添加命令
|
||||
command_entry = f"{tool_name}: {command}"
|
||||
stage_progress[stage_name]['commands'].append(command_entry)
|
||||
|
||||
scan.stage_progress = stage_progress
|
||||
scan.save(update_fields=['stage_progress'])
|
||||
|
||||
command_count = len(stage_progress[stage_name]['commands'])
|
||||
logger.info(f"✓ 记录命令: {stage_name}.{tool_name} (总计: {command_count})")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"记录命令失败: {e}")
|
||||
return False
|
||||
|
||||
# ==================== 删除和控制方法(委托给 ScanControlService) ====================
|
||||
|
||||
def delete_scans_two_phase(self, scan_ids: List[int]) -> dict:
|
||||
@@ -167,6 +212,20 @@ class ScanService:
|
||||
"""停止扫描任务(委托给 ScanControlService)"""
|
||||
return self.control_service.stop_scan(scan_id)
|
||||
|
||||
def hard_delete_scans(self, scan_ids: List[int]) -> tuple[int, Dict[str, int]]:
|
||||
"""
|
||||
硬删除扫描任务(真正删除数据)
|
||||
|
||||
用于 Worker 容器中执行,删除已软删除的扫描及其关联数据。
|
||||
|
||||
Args:
|
||||
scan_ids: 扫描任务 ID 列表
|
||||
|
||||
Returns:
|
||||
(删除数量, 详情字典)
|
||||
"""
|
||||
return self.scan_repo.hard_delete_by_ids(scan_ids)
|
||||
|
||||
# ==================== 统计方法(委托给 ScanStatsService) ====================
|
||||
|
||||
def get_statistics(self) -> dict:
|
||||
|
||||
@@ -225,6 +225,13 @@ def _parse_and_validate_line(line: str) -> Optional[PortScanRecord]:
|
||||
ip = line_data.get('ip', '').strip()
|
||||
port = line_data.get('port')
|
||||
|
||||
logger.debug("解析到的主机名: %s, IP: %s, 端口: %s", host, ip, port)
|
||||
|
||||
if not host and ip:
|
||||
host = ip
|
||||
logger.debug("主机名为空,使用 IP 作为 host")
|
||||
|
||||
|
||||
# 步骤 4: 验证字段不为空
|
||||
if not host or not ip or port is None:
|
||||
logger.warning(
|
||||
|
||||
@@ -51,6 +51,18 @@ class ServiceSet:
|
||||
)
|
||||
|
||||
|
||||
def _sanitize_string(value: str) -> str:
|
||||
"""
|
||||
清理字符串中的 NUL 字符和其他不可打印字符
|
||||
|
||||
PostgreSQL 不允许字符串字段包含 NUL (0x00) 字符
|
||||
"""
|
||||
if not value:
|
||||
return value
|
||||
# 移除 NUL 字符
|
||||
return value.replace('\x00', '')
|
||||
|
||||
|
||||
def _parse_and_validate_line(line: str) -> Optional[dict]:
|
||||
"""
|
||||
解析并验证单行 httpx JSON 输出
|
||||
@@ -64,6 +76,9 @@ def _parse_and_validate_line(line: str) -> Optional[dict]:
|
||||
只返回存活的 URL(2xx/3xx 状态码)
|
||||
"""
|
||||
try:
|
||||
# 清理 NUL 字符后再解析 JSON
|
||||
line = _sanitize_string(line)
|
||||
|
||||
# 解析 JSON
|
||||
try:
|
||||
line_data = json.loads(line)
|
||||
@@ -87,16 +102,16 @@ def _parse_and_validate_line(line: str) -> Optional[dict]:
|
||||
# 只保存存活的 URL(2xx 或 3xx)
|
||||
if status_code and (200 <= status_code < 400):
|
||||
return {
|
||||
'url': url,
|
||||
'host': line_data.get('host', ''), # 从 httpx 输出中提取 host
|
||||
'url': _sanitize_string(url),
|
||||
'host': _sanitize_string(line_data.get('host', '')),
|
||||
'status_code': status_code,
|
||||
'title': line_data.get('title', ''),
|
||||
'title': _sanitize_string(line_data.get('title', '')),
|
||||
'content_length': line_data.get('content_length', 0),
|
||||
'content_type': line_data.get('content_type', ''),
|
||||
'webserver': line_data.get('webserver', ''),
|
||||
'location': line_data.get('location', ''),
|
||||
'content_type': _sanitize_string(line_data.get('content_type', '')),
|
||||
'webserver': _sanitize_string(line_data.get('webserver', '')),
|
||||
'location': _sanitize_string(line_data.get('location', '')),
|
||||
'tech': line_data.get('tech', []),
|
||||
'body_preview': line_data.get('body_preview', ''),
|
||||
'body_preview': _sanitize_string(line_data.get('body_preview', '')),
|
||||
'vhost': line_data.get('vhost', False),
|
||||
}
|
||||
else:
|
||||
@@ -104,7 +119,7 @@ def _parse_and_validate_line(line: str) -> Optional[dict]:
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error("解析行数据异常: %s - 数据: %s", e, line[:100])
|
||||
logger.error("解析行数据异常: %s - 数据: %s", e, line[:100] if line else 'empty')
|
||||
return None
|
||||
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
import logging
|
||||
import os
|
||||
import ssl
|
||||
from pathlib import Path
|
||||
from urllib import request as urllib_request
|
||||
from urllib import parse as urllib_parse
|
||||
@@ -81,15 +82,20 @@ def ensure_wordlist_local(wordlist_name: str) -> str:
|
||||
raise RuntimeError(
|
||||
"无法确定 Django API 地址:请配置 SERVER_URL 或 PUBLIC_HOST 环境变量"
|
||||
)
|
||||
server_port = getattr(settings, 'SERVER_PORT', '8888')
|
||||
api_base = f"http://{public_host}:{server_port}/api"
|
||||
# 远程 Worker 通过 nginx HTTPS 访问,不再直连 8888
|
||||
api_base = f"https://{public_host}/api"
|
||||
query = urllib_parse.urlencode({'wordlist': wordlist_name})
|
||||
download_url = f"{api_base.rstrip('/')}/wordlists/download/?{query}"
|
||||
|
||||
logger.info("从后端下载字典: %s -> %s", download_url, local_path)
|
||||
|
||||
try:
|
||||
with urllib_request.urlopen(download_url) as resp:
|
||||
# 创建不验证 SSL 的上下文(远程 Worker 可能使用自签名证书)
|
||||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.check_hostname = False
|
||||
ssl_context.verify_mode = ssl.CERT_NONE
|
||||
|
||||
with urllib_request.urlopen(download_url, context=ssl_context) as resp:
|
||||
if resp.status != 200:
|
||||
raise RuntimeError(f"下载字典失败,HTTP {resp.status}")
|
||||
data = resp.read()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
# ============================================
|
||||
# XingRin Agent
|
||||
# 用途:心跳上报 + 负载监控
|
||||
# 用途:心跳上报 + 负载监控 + 版本检查
|
||||
# 适用:远程 VPS 或 Docker 容器内
|
||||
# ============================================
|
||||
|
||||
@@ -17,6 +17,9 @@ SRC_DIR="${MARKER_DIR}/src"
|
||||
ENV_FILE="${SRC_DIR}/backend/.env"
|
||||
INTERVAL=${AGENT_INTERVAL:-3}
|
||||
|
||||
# Agent 版本(从环境变量获取,由 Docker 镜像构建时注入)
|
||||
AGENT_VERSION="${IMAGE_TAG:-unknown}"
|
||||
|
||||
# 颜色定义
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
@@ -52,7 +55,7 @@ if [ "$RUN_MODE" = "remote" ] && [ -f "$ENV_FILE" ]; then
|
||||
fi
|
||||
|
||||
# 获取配置
|
||||
# SERVER_URL: 后端 API 地址(容器内用 http://server:8888,远程用公网地址)
|
||||
# SERVER_URL: 后端 API 地址(容器内用 http://server:8888,远程用 https://{PUBLIC_HOST})
|
||||
API_URL="${HEARTBEAT_API_URL:-${SERVER_URL:-}}"
|
||||
WORKER_NAME="${WORKER_NAME:-}"
|
||||
IS_LOCAL="${IS_LOCAL:-false}"
|
||||
@@ -90,7 +93,7 @@ register_worker() {
|
||||
EOF
|
||||
)
|
||||
|
||||
RESPONSE=$(curl -s -X POST \
|
||||
RESPONSE=$(curl -k -s -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$REGISTER_DATA" \
|
||||
"${API_URL}/api/workers/register/" 2>/dev/null)
|
||||
@@ -113,7 +116,7 @@ if [ -z "$WORKER_ID" ]; then
|
||||
# 等待 Server 就绪
|
||||
log "等待 Server 就绪..."
|
||||
for i in $(seq 1 30); do
|
||||
if curl -s "${API_URL}/api/" > /dev/null 2>&1; then
|
||||
if curl -k -s "${API_URL}/api/" > /dev/null 2>&1; then
|
||||
log "${GREEN}Server 已就绪${NC}"
|
||||
break
|
||||
fi
|
||||
@@ -172,22 +175,72 @@ while true; do
|
||||
fi
|
||||
|
||||
# 构建 JSON 数据(使用数值而非字符串,便于比较和排序)
|
||||
# 包含版本号,供 Server 端检查版本一致性
|
||||
JSON_DATA=$(cat <<EOF
|
||||
{
|
||||
"cpu_percent": $CPU_PERCENT,
|
||||
"memory_percent": $MEM_PERCENT
|
||||
"memory_percent": $MEM_PERCENT,
|
||||
"version": "$AGENT_VERSION"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# 发送心跳
|
||||
RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" -X POST \
|
||||
# 发送心跳,获取响应内容
|
||||
RESPONSE_FILE=$(mktemp)
|
||||
HTTP_CODE=$(curl -k -s -o "$RESPONSE_FILE" -w "%{http_code}" -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_DATA" \
|
||||
"${API_URL}/api/workers/${WORKER_ID}/heartbeat/" 2>/dev/null || echo "000")
|
||||
RESPONSE_BODY=$(cat "$RESPONSE_FILE" 2>/dev/null)
|
||||
rm -f "$RESPONSE_FILE"
|
||||
|
||||
if [ "$RESPONSE" != "200" ] && [ "$RESPONSE" != "201" ]; then
|
||||
log "${YELLOW}心跳发送失败 (HTTP $RESPONSE)${NC}"
|
||||
if [ "$HTTP_CODE" != "200" ] && [ "$HTTP_CODE" != "201" ]; then
|
||||
log "${YELLOW}心跳发送失败 (HTTP $HTTP_CODE)${NC}"
|
||||
else
|
||||
# 检查是否需要更新
|
||||
NEED_UPDATE=$(echo "$RESPONSE_BODY" | grep -oE '"need_update":\s*(true|false)' | grep -oE '(true|false)')
|
||||
if [ "$NEED_UPDATE" = "true" ]; then
|
||||
SERVER_VERSION=$(echo "$RESPONSE_BODY" | grep -oE '"server_version":\s*"[^"]+"' | sed 's/.*"\([^"]*\)"$/\1/')
|
||||
log "${YELLOW}检测到版本不匹配: Agent=$AGENT_VERSION, Server=$SERVER_VERSION${NC}"
|
||||
log "${GREEN}正在自动更新...${NC}"
|
||||
|
||||
# 执行自动更新
|
||||
if [ "$RUN_MODE" = "container" ]; then
|
||||
# 容器模式:通知外部重启(退出后由 docker-compose restart policy 重启)
|
||||
log "容器模式:退出以触发重启更新"
|
||||
exit 0
|
||||
else
|
||||
# 远程模式:拉取新镜像并重启 agent 容器
|
||||
log "远程模式:更新 agent 镜像..."
|
||||
DOCKER_USER="${DOCKER_USER:-yyhuni}"
|
||||
NEW_IMAGE="${DOCKER_USER}/xingrin-agent:${SERVER_VERSION}"
|
||||
|
||||
# 拉取新镜像
|
||||
if $DOCKER_CMD pull "$NEW_IMAGE" 2>/dev/null; then
|
||||
log "${GREEN}镜像拉取成功: $NEW_IMAGE${NC}"
|
||||
|
||||
# 停止当前容器并用新镜像重启
|
||||
CONTAINER_NAME="xingrin-agent"
|
||||
$DOCKER_CMD stop "$CONTAINER_NAME" 2>/dev/null || true
|
||||
$DOCKER_CMD rm "$CONTAINER_NAME" 2>/dev/null || true
|
||||
|
||||
# 重新启动(使用相同的环境变量)
|
||||
$DOCKER_CMD run -d \
|
||||
--name "$CONTAINER_NAME" \
|
||||
--restart unless-stopped \
|
||||
-e HEARTBEAT_API_URL="$API_URL" \
|
||||
-e WORKER_ID="$WORKER_ID" \
|
||||
-e IMAGE_TAG="$SERVER_VERSION" \
|
||||
-v /proc:/host/proc:ro \
|
||||
"$NEW_IMAGE"
|
||||
|
||||
log "${GREEN}Agent 已更新到 $SERVER_VERSION${NC}"
|
||||
exit 0
|
||||
else
|
||||
log "${RED}镜像拉取失败: $NEW_IMAGE${NC}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# 休眠
|
||||
|
||||
@@ -60,12 +60,14 @@ start_agent() {
|
||||
log_info "=========================================="
|
||||
|
||||
log_info "启动 agent 容器..."
|
||||
# --pull=missing 只在本地没有镜像时才拉取,避免意外更新
|
||||
# --pull=missing: 本地没有镜像时才拉取
|
||||
# 版本更新由服务端通过 SSH 显式 docker pull 触发
|
||||
docker run -d --pull=missing \
|
||||
--name ${CONTAINER_NAME} \
|
||||
--restart always \
|
||||
-e SERVER_URL="${PRESET_SERVER_URL}" \
|
||||
-e WORKER_ID="${PRESET_WORKER_ID}" \
|
||||
-e IMAGE_TAG="${IMAGE_TAG}" \
|
||||
-v /proc:/host/proc:ro \
|
||||
${IMAGE}
|
||||
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 目前采用github action自动版本构建,用
|
||||
# git tag v1.0.9
|
||||
# git push origin v1.0.9
|
||||
# ============================================
|
||||
# Docker Hub 镜像推送脚本
|
||||
# 用途:构建并推送所有服务镜像到 Docker Hub
|
||||
|
||||
@@ -15,14 +15,14 @@ REDIS_PORT=6379
|
||||
REDIS_DB=0
|
||||
|
||||
# ==================== 服务端口配置 ====================
|
||||
# SERVER_PORT 为 Django / uvicorn 对外端口
|
||||
# SERVER_PORT 为 Django / uvicorn 容器内部端口(由 nginx 反代,对公网不直接暴露)
|
||||
SERVER_PORT=8888
|
||||
|
||||
# ==================== 远程 Worker 配置 ====================
|
||||
# 供远程 Worker 访问主服务器的地址:
|
||||
# - 仅本地部署:server(Docker 内部服务名)
|
||||
# - 有远程 Worker:改为主服务器外网 IP(如 192.168.1.100)
|
||||
# 注意:远程 Worker 访问数据库/Redis 也会使用此地址(除非配置了远程 PostgreSQL)
|
||||
# - 有远程 Worker:改为主服务器外网 IP 或域名(如 192.168.1.100 或 xingrin.example.com)
|
||||
# 注意:远程 Worker 会通过 https://{PUBLIC_HOST} 访问(nginx 反代到后端 8888)
|
||||
PUBLIC_HOST=server
|
||||
|
||||
# ==================== Django 核心配置 ====================
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
# ============================================
|
||||
# XingRin Agent - 轻量心跳上报镜像
|
||||
# 用途:心跳上报 + 负载监控
|
||||
# 用途:心跳上报 + 负载监控 + 版本检查
|
||||
# 基础镜像:Alpine Linux (~5MB)
|
||||
# 最终大小:~10MB
|
||||
# ============================================
|
||||
|
||||
FROM alpine:3.19
|
||||
|
||||
# 构建参数:版本号
|
||||
ARG IMAGE_TAG=unknown
|
||||
|
||||
# 安装必要工具
|
||||
RUN apk add --no-cache \
|
||||
bash \
|
||||
@@ -17,6 +20,9 @@ RUN apk add --no-cache \
|
||||
COPY backend/scripts/worker-deploy/agent.sh /app/agent.sh
|
||||
RUN chmod +x /app/agent.sh
|
||||
|
||||
# 将版本号写入环境变量(运行时可用)
|
||||
ENV IMAGE_TAG=${IMAGE_TAG}
|
||||
|
||||
# 工作目录
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -37,8 +37,6 @@ services:
|
||||
context: ..
|
||||
dockerfile: docker/server/Dockerfile
|
||||
restart: always
|
||||
ports:
|
||||
- "${SERVER_PORT}:8888"
|
||||
env_file:
|
||||
- .env
|
||||
depends_on:
|
||||
@@ -56,19 +54,19 @@ services:
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
|
||||
# Agent:心跳上报 + 负载监控
|
||||
# Agent:心跳上报 + 负载监控 + 版本检查
|
||||
agent:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/worker/Dockerfile
|
||||
dockerfile: docker/agent/Dockerfile
|
||||
args:
|
||||
IMAGE_TAG: ${IMAGE_TAG:-dev}
|
||||
restart: always
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- SERVER_URL=http://server:8888
|
||||
- WORKER_NAME=本地节点
|
||||
- IS_LOCAL=true
|
||||
command: bash /app/backend/scripts/worker-deploy/agent.sh
|
||||
- IMAGE_TAG=${IMAGE_TAG:-dev}
|
||||
depends_on:
|
||||
server:
|
||||
condition: service_healthy
|
||||
@@ -79,6 +77,8 @@ services:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/frontend/Dockerfile
|
||||
args:
|
||||
IMAGE_TAG: ${IMAGE_TAG:-dev}
|
||||
restart: always
|
||||
depends_on:
|
||||
server:
|
||||
@@ -101,6 +101,18 @@ services:
|
||||
# SSL 证书挂载(方便更新)
|
||||
- ./nginx/ssl:/etc/nginx/ssl:ro
|
||||
|
||||
# Worker:扫描任务执行容器(开发模式下构建)
|
||||
worker:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/worker/Dockerfile
|
||||
image: docker-worker:${IMAGE_TAG:-latest}-dev
|
||||
restart: "no"
|
||||
volumes:
|
||||
- /opt/xingrin/results:/app/backend/results
|
||||
- /opt/xingrin/logs:/app/backend/logs
|
||||
command: echo "Worker image built for development"
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
|
||||
|
||||
@@ -41,8 +41,6 @@ services:
|
||||
server:
|
||||
image: ${DOCKER_USER:-yyhuni}/xingrin-server:${IMAGE_TAG:?IMAGE_TAG is required}
|
||||
restart: always
|
||||
ports:
|
||||
- "${SERVER_PORT}:8888"
|
||||
env_file:
|
||||
- .env
|
||||
depends_on:
|
||||
@@ -74,6 +72,7 @@ services:
|
||||
- SERVER_URL=http://server:8888
|
||||
- WORKER_NAME=本地节点
|
||||
- IS_LOCAL=true
|
||||
- IMAGE_TAG=${IMAGE_TAG}
|
||||
depends_on:
|
||||
server:
|
||||
condition: service_healthy
|
||||
|
||||
@@ -27,7 +27,9 @@ COPY frontend/ ./
|
||||
|
||||
# 设置环境变量(构建时使用)
|
||||
ARG NEXT_PUBLIC_API_URL
|
||||
ARG IMAGE_TAG=unknown
|
||||
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
|
||||
ENV NEXT_PUBLIC_VERSION=${IMAGE_TAG}
|
||||
# Docker 内部网络使用服务名 server 作为后端地址
|
||||
ENV API_HOST=server
|
||||
|
||||
|
||||
@@ -36,6 +36,9 @@ http {
|
||||
|
||||
client_max_body_size 50m;
|
||||
|
||||
# 指纹特征 - 用于 FOFA/Shodan 等搜索引擎识别
|
||||
add_header X-Powered-By "Xingrin ASM" always;
|
||||
|
||||
location /api/ {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
|
||||
@@ -27,10 +27,10 @@ check_docker() {
|
||||
|
||||
# ==================== Docker Compose 命令检测 ====================
|
||||
detect_compose_cmd() {
|
||||
if command -v docker-compose >/dev/null 2>&1; then
|
||||
COMPOSE_CMD="docker-compose"
|
||||
elif docker compose version >/dev/null 2>&1; then
|
||||
if docker compose version >/dev/null 2>&1; then
|
||||
COMPOSE_CMD="docker compose"
|
||||
elif command -v docker-compose >/dev/null 2>&1; then
|
||||
COMPOSE_CMD="docker-compose"
|
||||
else
|
||||
log_error "未检测到 docker-compose 或 docker compose。"
|
||||
exit 1
|
||||
|
||||
97
docker/scripts/setup-swap.sh
Executable file
97
docker/scripts/setup-swap.sh
Executable file
@@ -0,0 +1,97 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Ubuntu/Debian 一键开启交换分区脚本
|
||||
# 用法: sudo ./setup-swap.sh [大小GB]
|
||||
# 示例: sudo ./setup-swap.sh 4 # 创建 4GB 交换分区
|
||||
# sudo ./setup-swap.sh # 默认创建与内存相同大小的交换分区
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
# 颜色定义
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
|
||||
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
||||
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
||||
|
||||
# 检查 root 权限
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
log_error "请使用 sudo 运行此脚本"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 检查是否已有交换分区
|
||||
CURRENT_SWAP_KB=$(grep SwapTotal /proc/meminfo | awk '{print $2}')
|
||||
CURRENT_SWAP_GB=$(awk "BEGIN {printf \"%.0f\", $CURRENT_SWAP_KB / 1024 / 1024}")
|
||||
if [ "$CURRENT_SWAP_GB" -gt 0 ]; then
|
||||
log_warn "系统已有 ${CURRENT_SWAP_GB}GB 交换分区"
|
||||
swapon --show
|
||||
read -p "是否继续添加新的交换分区?(y/N) " -r
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
log_info "已取消"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# 获取系统内存大小(GB,四舍五入)
|
||||
TOTAL_MEM_KB=$(grep MemTotal /proc/meminfo | awk '{print $2}')
|
||||
TOTAL_MEM_GB=$(awk "BEGIN {printf \"%.0f\", $TOTAL_MEM_KB / 1024 / 1024}")
|
||||
|
||||
# 确定交换分区大小
|
||||
if [ -n "$1" ]; then
|
||||
SWAP_SIZE_GB=$1
|
||||
else
|
||||
# 默认与内存相同,最小 1GB,最大 8GB
|
||||
SWAP_SIZE_GB=$TOTAL_MEM_GB
|
||||
[ "$SWAP_SIZE_GB" -lt 1 ] && SWAP_SIZE_GB=1
|
||||
[ "$SWAP_SIZE_GB" -gt 8 ] && SWAP_SIZE_GB=8
|
||||
fi
|
||||
|
||||
SWAP_FILE="/swapfile_xingrin"
|
||||
|
||||
log_info "系统内存: ${TOTAL_MEM_GB}GB"
|
||||
log_info "将创建 ${SWAP_SIZE_GB}GB 交换分区: $SWAP_FILE"
|
||||
|
||||
# 检查磁盘空间(向下取整,保守估计)
|
||||
AVAILABLE_GB=$(df / | tail -1 | awk '{printf "%.0f", $4/1024/1024}')
|
||||
if [ "$AVAILABLE_GB" -lt "$SWAP_SIZE_GB" ]; then
|
||||
log_error "磁盘空间不足!可用: ${AVAILABLE_GB}GB,需要: ${SWAP_SIZE_GB}GB"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 创建交换文件
|
||||
log_info "正在创建交换文件(可能需要几分钟)..."
|
||||
dd if=/dev/zero of=$SWAP_FILE bs=1G count=$SWAP_SIZE_GB status=progress
|
||||
|
||||
# 设置权限
|
||||
chmod 600 $SWAP_FILE
|
||||
|
||||
# 格式化为交换分区
|
||||
mkswap $SWAP_FILE
|
||||
|
||||
# 启用交换分区
|
||||
swapon $SWAP_FILE
|
||||
|
||||
# 添加到 fstab(开机自动挂载)
|
||||
if ! grep -q "$SWAP_FILE" /etc/fstab; then
|
||||
echo "$SWAP_FILE none swap sw 0 0" >> /etc/fstab
|
||||
log_info "已添加到 /etc/fstab,开机自动启用"
|
||||
fi
|
||||
|
||||
# 优化 swappiness(降低交换倾向,优先使用内存)
|
||||
SWAPPINESS=10
|
||||
if ! grep -q "vm.swappiness" /etc/sysctl.conf; then
|
||||
echo "vm.swappiness=$SWAPPINESS" >> /etc/sysctl.conf
|
||||
fi
|
||||
sysctl vm.swappiness=$SWAPPINESS >/dev/null
|
||||
|
||||
log_info "交换分区创建成功!"
|
||||
echo ""
|
||||
echo "当前交换分区状态:"
|
||||
swapon --show
|
||||
echo ""
|
||||
free -h
|
||||
@@ -42,10 +42,10 @@ if ! docker info >/dev/null 2>&1; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if command -v docker-compose >/dev/null 2>&1; then
|
||||
COMPOSE_CMD="docker-compose"
|
||||
elif docker compose version >/dev/null 2>&1; then
|
||||
if docker compose version >/dev/null 2>&1; then
|
||||
COMPOSE_CMD="docker compose"
|
||||
elif command -v docker-compose >/dev/null 2>&1; then
|
||||
COMPOSE_CMD="docker-compose"
|
||||
else
|
||||
echo -e "${RED}[ERROR]${NC} 未检测到 docker compose,请先安装"
|
||||
exit 1
|
||||
@@ -135,6 +135,7 @@ if [ "$DEV_MODE" = true ]; then
|
||||
fi
|
||||
else
|
||||
# 生产模式:拉取 Docker Hub 镜像
|
||||
# pull 后 up -d 会自动检测镜像变化并重建容器
|
||||
if [ "$WITH_FRONTEND" = true ]; then
|
||||
echo -e "${CYAN}[PULL]${NC} 拉取最新镜像..."
|
||||
${COMPOSE_CMD} ${COMPOSE_ARGS} pull
|
||||
@@ -173,7 +174,7 @@ if [ "$WITH_FRONTEND" = true ]; then
|
||||
echo -e " XingRin: ${CYAN}https://${ACCESS_HOST}/${NC}"
|
||||
echo -e " ${YELLOW}(HTTP 会自动跳转到 HTTPS)${NC}"
|
||||
else
|
||||
echo -e " API: ${CYAN}http://${ACCESS_HOST}:8888${NC}"
|
||||
echo -e " API: ${CYAN}通过前端或 nginx 访问(后端未暴露 8888)${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}[TIP]${NC} 前端未启动,请手动运行:"
|
||||
echo " cd frontend && pnpm dev"
|
||||
|
||||
@@ -79,20 +79,20 @@ ENV GOPATH=/root/go
|
||||
ENV PATH=/usr/local/go/bin:$PATH:$GOPATH/bin
|
||||
ENV GOPROXY=https://goproxy.cn,direct
|
||||
|
||||
# 5. 安装 uv(超快的 Python 包管理器)
|
||||
RUN pip install uv --break-system-packages
|
||||
|
||||
# 安装 Python 依赖(使用 uv 并行下载,速度快 10-100 倍)
|
||||
COPY backend/requirements.txt .
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system -r requirements.txt --break-system-packages && \
|
||||
rm -f /usr/local/lib/python3.*/dist-packages/argparse.py && \
|
||||
rm -rf /usr/local/lib/python3.*/dist-packages/__pycache__/argparse*
|
||||
|
||||
COPY --from=go-builder /usr/local/go /usr/local/go
|
||||
COPY --from=go-builder /go/bin/* /usr/local/bin/
|
||||
COPY --from=go-builder /usr/local/bin/massdns /usr/local/bin/massdns
|
||||
|
||||
# 5. 安装 uv( Python 包管理器)并安装 Python 依赖
|
||||
COPY backend/requirements.txt .
|
||||
RUN pip install uv --break-system-packages && \
|
||||
uv pip install --system -r requirements.txt --break-system-packages && \
|
||||
rm -f /usr/local/lib/python3.*/dist-packages/argparse.py && \
|
||||
rm -rf /usr/local/lib/python3.*/dist-packages/__pycache__/argparse* && \
|
||||
rm -rf /root/.cache/uv && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# 6. 复制后端代码
|
||||
COPY backend /app/backend
|
||||
ENV PYTHONPATH=/app/backend
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
### 架构设计
|
||||
- [版本管理架构](./version-management.md) - Git Tag 驱动的自动化版本管理系统
|
||||
- [Nuclei 模板架构](./nuclei-template-architecture.md) - 模板仓库的存储、同步、分发机制
|
||||
- [字典文件架构](./wordlist-architecture.md) - 字典文件的存储、同步、分发机制
|
||||
|
||||
### 开发指南
|
||||
- [快速开始](./quick-start.md) - 一键安装和部署指南
|
||||
|
||||
229
docs/nuclei-template-architecture.md
Normal file
229
docs/nuclei-template-architecture.md
Normal file
@@ -0,0 +1,229 @@
|
||||
# Nuclei 模板管理架构
|
||||
|
||||
本文档介绍 XingRin 中 Nuclei 模板的存储、同步和使用机制。
|
||||
|
||||
## 目录结构
|
||||
|
||||
```
|
||||
/opt/xingrin/nuclei-repos/
|
||||
├── nuclei-templates/ # 官方模板仓库(按仓库名命名)
|
||||
│ ├── .git/
|
||||
│ ├── http/
|
||||
│ ├── network/
|
||||
│ └── ...
|
||||
└── custom-repo/ # 自定义模板仓库
|
||||
```
|
||||
|
||||
## 一、存储位置
|
||||
|
||||
| 配置项 | 默认值 | 说明 |
|
||||
|--------|--------|------|
|
||||
| `NUCLEI_TEMPLATES_REPOS_BASE_DIR` | `/opt/xingrin/nuclei-repos` | 模板仓库根目录 |
|
||||
|
||||
每个模板仓库会在根目录下创建独立子目录,目录名由仓库名称 slugify 生成。
|
||||
|
||||
## 二、数据模型
|
||||
|
||||
```
|
||||
NucleiTemplateRepo
|
||||
├── id # 仓库 ID
|
||||
├── name # 仓库名称(用于前端展示和 Worker 查询)
|
||||
├── repo_url # Git 仓库地址
|
||||
├── local_path # 本地克隆路径(自动生成)
|
||||
├── commit_hash # 当前同步的 commit hash
|
||||
└── last_synced_at # 最后同步时间
|
||||
```
|
||||
|
||||
## 三、Server 端同步流程
|
||||
|
||||
1. 用户在前端添加模板仓库(填写名称和 Git URL)
|
||||
2. 点击「同步」触发 `NucleiTemplateRepoService.refresh_repo()`
|
||||
3. 首次同步:`git clone --depth 1`(浅克隆,节省空间)
|
||||
4. 后续同步:`git pull --ff-only`(快进合并)
|
||||
5. 同步成功后更新数据库:`commit_hash`、`last_synced_at`
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────────────────────┐
|
||||
│ Server 容器 │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ 前端 UI │ │
|
||||
│ │ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ │
|
||||
│ │ │ 添加仓库 │ │ 同步仓库 │ │ 浏览模板 │ │ │
|
||||
│ │ │ name + url │ │ 点击刷新 │ │ 目录树 │ │ │
|
||||
│ │ └──────┬───────┘ └──────┬───────┘ └──────────────┘ │ │
|
||||
│ └─────────┼───────────────────┼───────────────────────────────────┘ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ NucleiTemplateRepoViewSet │ │
|
||||
│ │ POST /api/nuclei/repos/ | POST .../refresh/ │ │
|
||||
│ └─────────────────────────────┬───────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ NucleiTemplateRepoService │ │
|
||||
│ │ │ │
|
||||
│ │ ┌────────────────────┐ ┌────────────────────────────────┐ │ │
|
||||
│ │ │ ensure_local_path()│ │ refresh_repo() │ │ │
|
||||
│ │ │ 生成本地目录路径 │ │ 执行 Git 同步 │ │ │
|
||||
│ │ └────────────────────┘ └───────────────┬────────────────┘ │ │
|
||||
│ └────────────────────────────────────────────┼────────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌───────────────┴───────────────┐ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌─────────────────────┐ ┌─────────────────────┐ │
|
||||
│ │ 首次同步(无 .git) │ │ 后续同步(有 .git) │ │
|
||||
│ └──────────┬──────────┘ └──────────┬──────────┘ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌─────────────────────┐ ┌─────────────────────┐ │
|
||||
│ │ git clone --depth 1 │ │ git pull --ff-only │ │
|
||||
│ │ <repo_url> │ │ │ │
|
||||
│ └──────────┬──────────┘ └──────────┬──────────┘ │
|
||||
│ │ │ │
|
||||
│ └──────────────┬─────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────────────────────────────────┐ │
|
||||
│ │ git rev-parse HEAD │ │
|
||||
│ │ 获取当前 commit hash │ │
|
||||
│ └──────────────────────────┬──────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ PostgreSQL 数据库 │ │
|
||||
│ │ │ │
|
||||
│ │ UPDATE nuclei_template_repo SET │ │
|
||||
│ │ local_path = '/opt/xingrin/nuclei-repos/xxx', │ │
|
||||
│ │ commit_hash = 'abc123...', │ │
|
||||
│ │ last_synced_at = NOW() │ │
|
||||
│ │ WHERE id = ? │ │
|
||||
│ └─────────────────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ 文件系统 │ │
|
||||
│ │ /opt/xingrin/nuclei-repos/ │ │
|
||||
│ │ ├── nuclei-templates/ # 官方模板 │ │
|
||||
│ │ │ ├── .git/ │ │
|
||||
│ │ │ ├── http/ │ │
|
||||
│ │ │ ├── network/ │ │
|
||||
│ │ │ └── ... │ │
|
||||
│ │ └── custom-repo/ # 自定义模板 │ │
|
||||
│ └─────────────────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
└──────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ git clone / pull
|
||||
▼
|
||||
┌─────────────────────┐
|
||||
│ GitHub / GitLab │
|
||||
│ 远程 Git 仓库 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## 四、Worker 端同步流程
|
||||
|
||||
Worker 执行扫描任务时,通过 `ensure_nuclei_templates_local()` 确保本地模板与 Server 版本一致:
|
||||
|
||||
1. 从数据库查询仓库记录,获取 `repo_url` 和 `commit_hash`
|
||||
2. 检查本地是否存在仓库目录
|
||||
- 不存在:`git clone --depth 1`
|
||||
- 存在:比较本地 commit hash 与 Server 的 `commit_hash`
|
||||
3. 如果 commit 不一致:`git fetch` + `git checkout <commit_hash>`
|
||||
4. 返回本地模板目录路径,供 nuclei 命令使用
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────────────────────┐
|
||||
│ Worker 容器 │
|
||||
│ │
|
||||
│ ┌─────────────┐ │
|
||||
│ │ 扫描任务 │ │
|
||||
│ │ 开始执行 │ │
|
||||
│ └──────┬──────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────────┐ ┌─────────────────────────────────┐ │
|
||||
│ │ ensure_nuclei_ │ │ PostgreSQL │ │
|
||||
│ │ templates_local() │─────▶│ 查询 NucleiTemplateRepo 表 │ │
|
||||
│ │ │ │ 获取 repo_url, commit_hash │ │
|
||||
│ └───────────┬─────────────┘ └─────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────────┐ │
|
||||
│ │ 检查本地 .git 目录 │ │
|
||||
│ └───────────┬─────────────┘ │
|
||||
│ │ │
|
||||
│ ┌───────┴───────┐ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌────────┐ ┌────────────┐ │
|
||||
│ │ 不存在 │ │ 存在 │ │
|
||||
│ └───┬────┘ └─────┬──────┘ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌────────────┐ ┌─────────────────────┐ │
|
||||
│ │ git clone │ │ 比较 commit hash │ │
|
||||
│ │ --depth 1 │ │ local vs server │ │
|
||||
│ └─────┬──────┘ └──────────┬──────────┘ │
|
||||
│ │ │ │
|
||||
│ │ ┌───────┴───────┐ │
|
||||
│ │ │ │ │
|
||||
│ │ ▼ ▼ │
|
||||
│ │ ┌──────────┐ ┌──────────────┐ │
|
||||
│ │ │ 一致 │ │ 不一致 │ │
|
||||
│ │ │ 直接使用 │ │ │ │
|
||||
│ │ └────┬─────┘ └───────┬──────┘ │
|
||||
│ │ │ │ │
|
||||
│ │ │ ▼ │
|
||||
│ │ │ ┌──────────────────┐ │
|
||||
│ │ │ │ git fetch origin │ │
|
||||
│ │ │ │ git checkout │ │
|
||||
│ │ │ │ <commit_hash> │ │
|
||||
│ │ │ └────────┬─────────┘ │
|
||||
│ │ │ │ │
|
||||
│ ▼ ▼ ▼ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ 返回本地模板目录路径 │ │
|
||||
│ │ /opt/xingrin/nuclei-repos/<repo-name>/ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ 执行 nuclei 扫描 │ │
|
||||
│ │ nuclei -t /opt/xingrin/nuclei-repos/xxx/ -l targets.txt │ │
|
||||
│ └─────────────────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
└──────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## 五、版本一致性保证
|
||||
|
||||
- Server 同步时记录 `commit_hash`
|
||||
- Worker 使用前检查本地 hash 是否与 Server 一致
|
||||
- 不一致时自动同步到指定 commit
|
||||
- 确保所有节点使用相同版本的模板
|
||||
|
||||
## 六、配置项
|
||||
|
||||
在 `docker/.env` 或环境变量中配置:
|
||||
|
||||
```bash
|
||||
# Nuclei 模板仓库根目录
|
||||
NUCLEI_TEMPLATES_REPOS_BASE_DIR=/opt/xingrin/nuclei-repos
|
||||
```
|
||||
|
||||
## 七、常见问题
|
||||
|
||||
### Q: Worker 报错「未找到模板仓库」?
|
||||
|
||||
A: 需要先在 Server 端添加并同步模板仓库,Worker 通过数据库查询仓库信息。
|
||||
|
||||
### Q: 如何添加自定义模板仓库?
|
||||
|
||||
A: 在前端「Nuclei 模板」页面点击添加,填写仓库名称和 Git URL,然后点击同步即可。
|
||||
|
||||
### Q: 模板更新后 Worker 如何获取最新版本?
|
||||
|
||||
A: 在 Server 端点击「同步」更新模板,Worker 下次执行扫描时会检测到 commit hash 不一致并自动同步。
|
||||
@@ -14,10 +14,9 @@
|
||||
- **端口要求**: 需要开放以下端口
|
||||
- `80` - HTTP 访问(自动跳转到 HTTPS)
|
||||
- `443` - HTTPS 访问(主要访问端口)
|
||||
- `3000` - 前端开发服务(开发模式)
|
||||
- `8888` - 后端 API 服务
|
||||
- `5432` - PostgreSQL 数据库(如使用本地数据库)
|
||||
- `6379` - Redis 缓存服务
|
||||
- 后端 API 仅容器内监听 8888,由 nginx 反代到 80/443,对公网无需放行 8888
|
||||
|
||||
## 一键安装
|
||||
|
||||
@@ -64,10 +63,10 @@ sudo ./install.sh --no-frontend
|
||||
80 - HTTP 访问
|
||||
443 - HTTPS 访问
|
||||
3000 - 前端服务(开发模式)
|
||||
8888 - 后端 API
|
||||
5432 - PostgreSQL(如使用本地数据库)
|
||||
6379 - Redis 缓存
|
||||
```
|
||||
> 后端 API 默认仅在容器内 8888 监听,由 nginx 反代到 80/443,对公网无需放行 8888。
|
||||
|
||||
#### 推荐方案
|
||||
- **国外 VPS**:如 Vultr、DigitalOcean、Linode 等,默认开放所有端口,无需额外配置
|
||||
@@ -157,8 +156,8 @@ DB_USER=postgres # 数据库用户
|
||||
DB_PASSWORD=随机生成 # 数据库密码
|
||||
|
||||
# 服务配置
|
||||
SERVER_PORT=8888 # 后端服务端口
|
||||
PUBLIC_HOST=server # 对外访问地址
|
||||
SERVER_PORT=8888 # 后端容器内部端口(仅 Docker 内网监听)
|
||||
PUBLIC_HOST=server # 对外访问地址(远程 Worker 用,配置外网 IP 或域名)
|
||||
DEBUG=False # 调试模式
|
||||
|
||||
# 版本配置
|
||||
|
||||
123
docs/scan-flow-architecture.md
Normal file
123
docs/scan-flow-architecture.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# 扫描流程架构
|
||||
|
||||
## 完整扫描流程
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
START[Start Scan]
|
||||
TARGET[Input Target]
|
||||
|
||||
START --> TARGET
|
||||
|
||||
subgraph STAGE1["Stage 1: Discovery Sequential"]
|
||||
direction TB
|
||||
|
||||
subgraph SUB["Subdomain Discovery"]
|
||||
direction TB
|
||||
SUBFINDER[subfinder]
|
||||
AMASS[amass]
|
||||
SUBLIST3R[sublist3r]
|
||||
ASSETFINDER[assetfinder]
|
||||
MERGE[Merge & Deduplicate]
|
||||
BRUTEFORCE[puredns bruteforce<br/>Dictionary Attack]
|
||||
MUTATE[dnsgen + puredns<br/>Mutation Generation]
|
||||
RESOLVE[puredns resolve<br/>Alive Verification]
|
||||
|
||||
SUBFINDER --> MERGE
|
||||
AMASS --> MERGE
|
||||
SUBLIST3R --> MERGE
|
||||
ASSETFINDER --> MERGE
|
||||
MERGE --> BRUTEFORCE
|
||||
BRUTEFORCE --> MUTATE
|
||||
MUTATE --> RESOLVE
|
||||
end
|
||||
|
||||
subgraph PORT["Port Scan"]
|
||||
NAABU[naabu<br/>Port Discovery]
|
||||
end
|
||||
|
||||
subgraph SITE["Site Scan"]
|
||||
HTTPX1[httpx<br/>Web Service Detection]
|
||||
end
|
||||
|
||||
RESOLVE --> NAABU
|
||||
NAABU --> HTTPX1
|
||||
end
|
||||
|
||||
TARGET --> SUBFINDER
|
||||
TARGET --> AMASS
|
||||
TARGET --> SUBLIST3R
|
||||
TARGET --> ASSETFINDER
|
||||
|
||||
subgraph STAGE2["Stage 2: Analysis Parallel"]
|
||||
direction TB
|
||||
|
||||
subgraph URL["URL Collection"]
|
||||
direction TB
|
||||
WAYMORE[waymore<br/>Historical URLs]
|
||||
KATANA[katana<br/>Crawler]
|
||||
URO[uro<br/>URL Deduplication]
|
||||
HTTPX2[httpx<br/>Alive Verification]
|
||||
|
||||
WAYMORE --> URO
|
||||
KATANA --> URO
|
||||
URO --> HTTPX2
|
||||
end
|
||||
|
||||
subgraph DIR["Directory Scan"]
|
||||
FFUF[ffuf<br/>Directory Bruteforce]
|
||||
end
|
||||
end
|
||||
|
||||
HTTPX1 --> WAYMORE
|
||||
HTTPX1 --> KATANA
|
||||
HTTPX1 --> FFUF
|
||||
|
||||
subgraph STAGE3["Stage 3: Vulnerability Sequential"]
|
||||
direction TB
|
||||
|
||||
subgraph VULN["Vulnerability Scan"]
|
||||
direction LR
|
||||
DALFOX[dalfox<br/>XSS Scan]
|
||||
NUCLEI[nuclei<br/>Vulnerability Scan]
|
||||
end
|
||||
end
|
||||
|
||||
HTTPX2 --> DALFOX
|
||||
HTTPX2 --> NUCLEI
|
||||
|
||||
DALFOX --> FINISH
|
||||
NUCLEI --> FINISH
|
||||
FFUF --> FINISH
|
||||
|
||||
FINISH[Scan Complete]
|
||||
|
||||
style START fill:#ff9999
|
||||
style FINISH fill:#99ff99
|
||||
style TARGET fill:#ffcc99
|
||||
style STAGE1 fill:#e6f3ff
|
||||
style STAGE2 fill:#fff4e6
|
||||
style STAGE3 fill:#ffe6f0
|
||||
```
|
||||
|
||||
## 执行阶段定义
|
||||
|
||||
```python
|
||||
# backend/apps/scan/configs/command_templates.py
|
||||
EXECUTION_STAGES = [
|
||||
{'mode': 'sequential', 'flows': ['subdomain_discovery', 'port_scan', 'site_scan']},
|
||||
{'mode': 'parallel', 'flows': ['url_fetch', 'directory_scan']},
|
||||
{'mode': 'sequential', 'flows': ['vuln_scan']},
|
||||
]
|
||||
```
|
||||
|
||||
## 各阶段输出
|
||||
|
||||
| Flow | 工具 | 输出表 |
|
||||
|------|------|--------|
|
||||
| subdomain_discovery | subfinder, amass, sublist3r, assetfinder, puredns | Subdomain |
|
||||
| port_scan | naabu | HostPortMapping |
|
||||
| site_scan | httpx | WebSite |
|
||||
| url_fetch | waymore, katana, uro, httpx | Endpoint |
|
||||
| directory_scan | ffuf | Directory |
|
||||
| vuln_scan | dalfox, nuclei | Vulnerability |
|
||||
@@ -148,17 +148,82 @@ sequenceDiagram
|
||||
2. **远程 Worker**:按需拉取对应版本
|
||||
3. **自动同步**:update.sh 统一更新版本号
|
||||
|
||||
## Agent 自动更新机制
|
||||
|
||||
### 概述
|
||||
|
||||
Agent 是运行在每个 Worker 节点上的轻量级心跳服务(~10MB),负责上报节点状态和负载信息。当主服务器更新后,Agent 需要同步更新以保持版本一致。
|
||||
|
||||
### 版本检测流程
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant A as Agent
|
||||
participant S as Server
|
||||
participant H as Docker Hub
|
||||
|
||||
A->>S: POST /api/workers/{id}/heartbeat/
|
||||
Note right of A: {"cpu": 50, "mem": 60, "version": "v1.0.8"}
|
||||
|
||||
S->>S: 比较 agent_version vs IMAGE_TAG
|
||||
|
||||
alt 版本匹配
|
||||
S->>A: {"status": "ok", "need_update": false}
|
||||
else 版本不匹配 (远程 Worker)
|
||||
S->>S: 设置状态为 updating
|
||||
S->>A: {"status": "ok", "need_update": true}
|
||||
S-->>H: SSH: docker pull agent:v1.0.19
|
||||
S-->>A: SSH: 重启 agent 容器
|
||||
else 版本不匹配 (本地 Worker)
|
||||
S->>S: 设置状态为 outdated
|
||||
S->>A: {"status": "ok", "need_update": true}
|
||||
Note over S: 需用户手动 ./update.sh
|
||||
end
|
||||
```
|
||||
|
||||
### Worker 状态流转
|
||||
|
||||
| 场景 | 状态变化 | 说明 |
|
||||
|------|---------|------|
|
||||
| 首次心跳 | `pending/deploying` → `online` | Agent 启动成功 |
|
||||
| 远程 Worker 版本不匹配 | `online` → `updating` → `online` | 服务端自动 SSH 更新 |
|
||||
| 远程 Worker 更新失败 | `updating` → `outdated` | SSH 执行失败 |
|
||||
| 本地 Worker 版本不匹配 | `online` → `outdated` | 需手动 update.sh |
|
||||
| 版本匹配 | `updating/outdated` → `online` | 恢复正常 |
|
||||
|
||||
### 更新触发条件
|
||||
|
||||
1. **远程 Worker**:服务端检测到版本不匹配时,自动通过 SSH 执行更新
|
||||
2. **本地 Worker**:用户执行 `./update.sh` 时,docker-compose 会拉取新镜像并重启
|
||||
|
||||
### 防重复机制
|
||||
|
||||
使用 Redis 锁防止同一 Worker 在 60 秒内重复触发更新:
|
||||
```
|
||||
lock_key = f"agent_update_lock:{worker_id}"
|
||||
redis.set(lock_key, "1", nx=True, ex=60)
|
||||
```
|
||||
|
||||
### 相关文件
|
||||
|
||||
| 文件 | 作用 |
|
||||
|------|------|
|
||||
| `backend/apps/engine/views/worker_views.py` | 心跳 API,版本检测和更新触发 |
|
||||
| `backend/scripts/worker-deploy/agent.sh` | Agent 心跳脚本,上报版本号 |
|
||||
| `backend/scripts/worker-deploy/start-agent.sh` | Agent 启动脚本 |
|
||||
| `docker/agent/Dockerfile` | Agent 镜像构建,注入 IMAGE_TAG |
|
||||
|
||||
## 开发环境配置
|
||||
|
||||
### 本地开发测试
|
||||
```bash
|
||||
# docker/.env 中添加
|
||||
TASK_EXECUTOR_IMAGE=docker-agent:latest # 指向本地构建镜像
|
||||
# docker/.env 中添加(开发模式会自动设置)
|
||||
TASK_EXECUTOR_IMAGE=docker-worker:v1.1.0-dev # 指向本地构建镜像
|
||||
```
|
||||
|
||||
### 开发模式启动
|
||||
```bash
|
||||
# 使用本地构建镜像
|
||||
# 使用本地构建镜像(自动构建并标记为 ${VERSION}-dev)
|
||||
./install.sh --dev
|
||||
./start.sh --dev
|
||||
```
|
||||
@@ -188,7 +253,13 @@ else:
|
||||
TASK_EXECUTOR_IMAGE = ''
|
||||
```
|
||||
|
||||
## 故障排查
|
||||
## Agent 自动更新机制
|
||||
|
||||
### 概述
|
||||
|
||||
Agent 是运行在每个 Worker 节点上的轻量级心跳服务,负责上报节点状态和负载信息。当主服务器更新后,Agent 需要同步更新以保持版本一致。
|
||||
|
||||
### 版本检测流程
|
||||
|
||||
### 版本不一致问题
|
||||
**症状**:任务执行失败,兼容性错误
|
||||
@@ -238,7 +309,8 @@ curl -s https://hub.docker.com/v2/repositories/yyhuni/xingrin-worker/tags/
|
||||
4. ✅ 使用 `docker system prune` 清理旧镜像
|
||||
|
||||
### 开发调试
|
||||
1. ✅ 本地测试使用 `--dev` 模式
|
||||
1. ✅ 本地测试使用 `--dev` 模式(自动构建 `docker-worker:${VERSION}-dev`)
|
||||
2. ✅ 远程测试先推送测试版本到 Hub
|
||||
3. ✅ 生产环境避免使用 `latest` 标签
|
||||
4. ✅ 版本回滚通过修改 `IMAGE_TAG` 实现
|
||||
3. ✅ 生产环境避免使用 `latest` 标签,始终使用明确版本号
|
||||
4. ✅ 开发环境使用 `-dev` 后缀区分开发版本
|
||||
5. ✅ 版本回滚通过修改 `IMAGE_TAG` 实现
|
||||
253
docs/wordlist-architecture.md
Normal file
253
docs/wordlist-architecture.md
Normal file
@@ -0,0 +1,253 @@
|
||||
# 字典文件管理架构
|
||||
|
||||
本文档介绍 XingRin 中字典文件的存储、同步和使用机制。
|
||||
|
||||
## 目录结构
|
||||
|
||||
```
|
||||
/opt/xingrin/wordlists/
|
||||
├── common.txt # 通用字典
|
||||
├── subdomains.txt # 子域名字典
|
||||
├── directories.txt # 目录字典
|
||||
└── ...
|
||||
```
|
||||
|
||||
## 一、存储位置
|
||||
|
||||
| 配置项 | 默认值 | 说明 |
|
||||
|--------|--------|------|
|
||||
| `WORDLISTS_BASE_PATH` | `/opt/xingrin/wordlists` | 字典文件存储目录 |
|
||||
|
||||
## 二、数据模型
|
||||
|
||||
```
|
||||
Wordlist
|
||||
├── id # 字典 ID
|
||||
├── name # 字典名称(唯一,用于查询)
|
||||
├── description # 描述
|
||||
├── file_path # 文件绝对路径
|
||||
├── file_size # 文件大小(字节)
|
||||
├── line_count # 行数
|
||||
└── file_hash # SHA256 哈希值(用于校验)
|
||||
```
|
||||
|
||||
## 三、Server 端上传流程
|
||||
|
||||
1. 用户在前端上传字典文件
|
||||
2. `WordlistService.create_wordlist()` 处理:
|
||||
- 保存文件到 `WORDLISTS_BASE_PATH` 目录
|
||||
- 计算 SHA256 哈希值
|
||||
- 统计文件大小和行数
|
||||
- 创建数据库记录
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph SERVER["🖥️ Server 容器"]
|
||||
direction TB
|
||||
|
||||
subgraph UI["前端 UI"]
|
||||
direction LR
|
||||
UPLOAD["📤 上传字典<br/>选择文件"]
|
||||
EDIT["✏️ 编辑内容<br/>在线修改"]
|
||||
DELETE["🗑️ 删除字典"]
|
||||
end
|
||||
|
||||
UPLOAD --> API
|
||||
EDIT --> API
|
||||
|
||||
subgraph API["API 层"]
|
||||
VIEWSET["WordlistViewSet<br/>POST /api/wordlists/<br/>PUT .../content/"]
|
||||
end
|
||||
|
||||
API --> SERVICE
|
||||
|
||||
subgraph SERVICE["业务逻辑层"]
|
||||
CREATE["create_wordlist()<br/>创建字典"]
|
||||
UPDATE["update_wordlist_content()<br/>更新字典内容"]
|
||||
end
|
||||
|
||||
CREATE --> PROCESS
|
||||
UPDATE --> PROCESS
|
||||
|
||||
subgraph PROCESS["处理流程"]
|
||||
direction TB
|
||||
STEP1["1️⃣ 保存文件到<br/>/opt/xingrin/wordlists/"]
|
||||
STEP2["2️⃣ 计算 SHA256 哈希值"]
|
||||
STEP3["3️⃣ 统计文件大小和行数"]
|
||||
STEP4["4️⃣ 创建/更新数据库记录"]
|
||||
|
||||
STEP1 --> STEP2
|
||||
STEP2 --> STEP3
|
||||
STEP3 --> STEP4
|
||||
end
|
||||
|
||||
STEP4 --> DB
|
||||
STEP1 --> FS
|
||||
|
||||
subgraph DB["💾 PostgreSQL 数据库"]
|
||||
DBRECORD["INSERT INTO wordlist<br/>name: 'subdomains'<br/>file_path: '/opt/xingrin/wordlists/subdomains.txt'<br/>file_size: 1024000<br/>line_count: 50000<br/>file_hash: 'sha256...'"]
|
||||
end
|
||||
|
||||
subgraph FS["📁 文件系统"]
|
||||
FILES["/opt/xingrin/wordlists/<br/>├── common.txt<br/>├── subdomains.txt<br/>└── directories.txt"]
|
||||
end
|
||||
end
|
||||
|
||||
style SERVER fill:#e6f3ff
|
||||
style UI fill:#fff4e6
|
||||
style API fill:#f0f0f0
|
||||
style SERVICE fill:#d4edda
|
||||
style PROCESS fill:#ffe6f0
|
||||
style DB fill:#cce5ff
|
||||
style FS fill:#e2e3e5
|
||||
```
|
||||
|
||||
## 四、Worker 端获取流程
|
||||
|
||||
Worker 执行扫描任务时,通过 `ensure_wordlist_local()` 获取字典:
|
||||
|
||||
1. 根据字典名称查询数据库,获取 `file_path` 和 `file_hash`
|
||||
2. 检查本地是否存在字典文件
|
||||
- 存在且 hash 匹配:直接使用
|
||||
- 存在但 hash 不匹配:重新下载
|
||||
- 不存在:从 Server API 下载
|
||||
3. 下载地址:`GET /api/wordlists/download/?wordlist=<name>`
|
||||
4. 返回本地字典文件路径
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph WORKER["🔧 Worker 容器"]
|
||||
direction TB
|
||||
|
||||
START["🎯 扫描任务<br/>需要字典"]
|
||||
|
||||
START --> ENSURE
|
||||
|
||||
ENSURE["ensure_wordlist_local()<br/>参数: wordlist_name"]
|
||||
|
||||
ENSURE --> QUERY
|
||||
|
||||
QUERY["📊 查询 PostgreSQL<br/>获取 file_path, file_hash"]
|
||||
|
||||
QUERY --> CHECK
|
||||
|
||||
CHECK{"🔍 检查本地文件<br/>/opt/xingrin/wordlists/"}
|
||||
|
||||
CHECK -->|不存在| DOWNLOAD
|
||||
CHECK -->|存在| HASH
|
||||
|
||||
HASH["🔐 计算本地文件 SHA256<br/>与数据库 hash 比较"]
|
||||
|
||||
HASH -->|一致| USE
|
||||
HASH -->|不一致| DOWNLOAD
|
||||
|
||||
DOWNLOAD["📥 从 Server API 下载<br/>GET /api/wordlists/download/?wordlist=name"]
|
||||
|
||||
DOWNLOAD --> SERVER
|
||||
|
||||
SERVER["🌐 HTTP Request"]
|
||||
|
||||
SERVER -.请求.-> API["Server (Django)<br/>返回文件内容"]
|
||||
API -.响应.-> SERVER
|
||||
|
||||
SERVER --> SAVE
|
||||
|
||||
SAVE["💾 保存到本地<br/>/opt/xingrin/wordlists/filename"]
|
||||
|
||||
SAVE --> RETURN
|
||||
|
||||
USE["✅ 直接使用"] --> RETURN
|
||||
|
||||
RETURN["📂 返回本地字典文件路径<br/>/opt/xingrin/wordlists/subdomains.txt"]
|
||||
|
||||
RETURN --> EXEC
|
||||
|
||||
EXEC["🚀 执行扫描工具<br/>puredns bruteforce -w /opt/xingrin/wordlists/xxx.txt"]
|
||||
end
|
||||
|
||||
style WORKER fill:#e6f3ff
|
||||
style START fill:#fff4e6
|
||||
style CHECK fill:#ffe6f0
|
||||
style HASH fill:#ffe6f0
|
||||
style USE fill:#d4edda
|
||||
style DOWNLOAD fill:#f8d7da
|
||||
style RETURN fill:#d4edda
|
||||
style EXEC fill:#cce5ff
|
||||
```
|
||||
|
||||
## 五、Hash 校验机制
|
||||
|
||||
- 上传时计算 SHA256 并存入数据库
|
||||
- Worker 使用前校验本地文件 hash
|
||||
- 不匹配时自动重新下载
|
||||
- 确保所有节点使用相同内容的字典
|
||||
|
||||
## 六、本地 Worker vs 远程 Worker
|
||||
|
||||
本地 Worker 和远程 Worker 获取字典的方式相同:
|
||||
|
||||
1. 从数据库查询字典元数据(file_hash)
|
||||
2. 检查本地缓存是否存在且 hash 匹配
|
||||
3. 不匹配则通过 HTTP API 下载
|
||||
|
||||
**注意**:Worker 容器只挂载了 `results` 和 `logs` 目录,没有挂载 `wordlists` 目录,所以字典文件需要通过 API 下载。
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant W as Worker (本地/远程)
|
||||
participant DB as PostgreSQL
|
||||
participant S as Server API
|
||||
participant FS as 本地缓存
|
||||
|
||||
W->>DB: 1️⃣ 查询数据库获取 file_hash
|
||||
DB-->>W: 返回 file_hash
|
||||
|
||||
W->>FS: 2️⃣ 检查本地缓存
|
||||
|
||||
alt 存在且 hash 匹配
|
||||
FS-->>W: ✅ 直接使用
|
||||
else 不存在或不匹配
|
||||
W->>S: 3️⃣ GET /api/wordlists/download/
|
||||
S-->>W: 4️⃣ 返回文件内容
|
||||
W->>FS: 5️⃣ 保存到本地缓存<br/>/opt/xingrin/wordlists/
|
||||
FS-->>W: ✅ 使用缓存文件
|
||||
end
|
||||
|
||||
Note over W,FS: 本地 Worker 优势:<br/>• 网络延迟更低(容器内网络)<br/>• 缓存可复用(同一宿主机多次任务)
|
||||
```
|
||||
|
||||
### 本地 Worker 的优势
|
||||
|
||||
虽然获取方式相同,但本地 Worker 有以下优势:
|
||||
- 网络延迟更低(容器内网络)
|
||||
- 下载后的缓存可复用(同一宿主机上的多次任务)
|
||||
|
||||
## 七、配置项
|
||||
|
||||
在 `docker/.env` 或环境变量中配置:
|
||||
|
||||
```bash
|
||||
# 字典文件存储目录
|
||||
WORDLISTS_PATH=/opt/xingrin/wordlists
|
||||
|
||||
# Server 地址(Worker 用于下载文件)
|
||||
PUBLIC_HOST=your-server-ip # 远程 Worker 会通过 https://{PUBLIC_HOST}/api 访问
|
||||
SERVER_PORT=8888 # 后端容器内部端口,仅 Docker 内网监听
|
||||
```
|
||||
|
||||
## 八、常见问题
|
||||
|
||||
### Q: 字典文件更新后 Worker 没有使用新版本?
|
||||
|
||||
A: 更新字典内容后会重新计算 hash,Worker 下次使用时会检测到 hash 不匹配并重新下载。
|
||||
|
||||
### Q: 远程 Worker 下载文件失败?
|
||||
|
||||
A: 检查:
|
||||
1. `PUBLIC_HOST` 是否配置为 Server 的外网 IP 或域名
|
||||
2. Nginx 443 (HTTPS) 是否可达(远程 Worker 通过 nginx 访问后端)
|
||||
3. Worker 到 Server 的网络是否通畅
|
||||
|
||||
### Q: 如何批量导入字典?
|
||||
|
||||
A: 目前只支持通过前端逐个上传,后续可能支持批量导入功能。
|
||||
@@ -29,9 +29,21 @@ import { AuthLayout } from "@/components/auth/auth-layout"
|
||||
|
||||
// 定义页面的元数据信息,用于 SEO 优化
|
||||
export const metadata: Metadata = {
|
||||
title: "XingRin - 星环", // 页面标题
|
||||
description: "XingRin - 星环", // 页面描述
|
||||
generator: "XingRin", // 生成器标识
|
||||
title: "星环 (Xingrin) - 攻击面管理平台 | ASM",
|
||||
description: "星环 - 攻击面管理平台 (ASM),提供自动化资产发现、漏洞扫描、子域名枚举、端口扫描等功能。支持分布式扫描、Nuclei 集成、定时任务。",
|
||||
keywords: ["ASM", "攻击面管理", "漏洞扫描", "资产发现", "Bug Bounty", "渗透测试", "Nuclei", "子域名枚举", "安全工具", "EASM", "安全"],
|
||||
generator: "Xingrin ASM Platform",
|
||||
authors: [{ name: "yyhuni" }],
|
||||
openGraph: {
|
||||
title: "星环 (Xingrin) - 攻击面管理平台",
|
||||
description: "攻击面管理平台 (ASM),提供自动化资产发现与漏洞扫描",
|
||||
type: "website",
|
||||
locale: "zh_CN",
|
||||
},
|
||||
robots: {
|
||||
index: true,
|
||||
follow: true,
|
||||
},
|
||||
}
|
||||
|
||||
// 使用思源黑体 + 系统字体回退,完全本地加载
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import type { Metadata } from "next"
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: "登录 - XingRin - 星环",
|
||||
description: "登录到 XingRin - 星环",
|
||||
title: "登录 - 星环 | 攻击面管理平台",
|
||||
description: "星环 (XingRin) - 攻击面管理平台 (ASM),提供自动化资产发现与漏洞扫描",
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -61,22 +61,25 @@ export default function LoginPage() {
|
||||
|
||||
return (
|
||||
<div
|
||||
className="flex min-h-svh flex-col items-center justify-center p-6 md:p-10"
|
||||
className="flex min-h-svh flex-col p-6 md:p-10"
|
||||
style={{
|
||||
backgroundColor: '#DFDBE5',
|
||||
backgroundImage: `url("data:image/svg+xml,%3Csvg width='180' height='180' viewBox='0 0 180 180' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M82.42 180h-1.415L0 98.995v-2.827L6.167 90 0 83.833V81.004L81.005 0h2.827L90 6.167 96.167 0H98.996L180 81.005v2.827L173.833 90 180 96.167V98.996L98.995 180h-2.827L90 173.833 83.833 180H82.42zm0-1.414L1.413 97.58 8.994 90l-7.58-7.58L82.42 1.413 90 8.994l7.58-7.58 81.006 81.005-7.58 7.58 7.58 7.58-81.005 81.006-7.58-7.58-7.58 7.58zM175.196 0h-25.832c1.033 2.924 2.616 5.59 4.625 7.868C152.145 9.682 151 12.208 151 15c0 5.523 4.477 10 10 10 1.657 0 3 1.343 3 3v4h16V0h-4.803c.51.883.803 1.907.803 3 0 3.314-2.686 6-6 6s-6-2.686-6-6c0-1.093.292-2.117.803-3h10.394-13.685C161.18.938 161 1.948 161 3v4c-4.418 0-8 3.582-8 8s3.582 8 8 8c2.76 0 5 2.24 5 5v2h4v-4h2v4h4v-4h2v4h2V0h-4.803zm-15.783 0c-.27.954-.414 1.96-.414 3v2.2c-1.25.254-2.414.74-3.447 1.412-1.716-1.93-3.098-4.164-4.054-6.612h7.914zM180 17h-3l2.143-10H180v10zm-30.635 163c-.884-2.502-1.365-5.195-1.365-8 0-13.255 10.748-24 23.99-24H180v32h-30.635zm12.147 0c.5-1.416 1.345-2.67 2.434-3.66l-1.345-1.48c-1.498 1.364-2.62 3.136-3.186 5.14H151.5c-.97-2.48-1.5-5.177-1.5-8 0-12.15 9.84-22 22-22h8v30h-18.488zm13.685 0c-1.037-1.793-2.976-3-5.197-3-2.22 0-4.16 1.207-5.197 3h10.394zM0 148h8.01C21.26 148 32 158.742 32 172c0 2.805-.48 5.498-1.366 8H0v-32zm0 2h8c12.15 0 22 9.847 22 22 0 2.822-.53 5.52-1.5 8h-7.914c-.567-2.004-1.688-3.776-3.187-5.14l-1.346 1.48c1.09.99 1.933 2.244 2.434 3.66H0v-30zm15.197 30c-1.037-1.793-2.976-3-5.197-3-2.22 0-4.16 1.207-5.197 3h10.394zM0 32h16v-4c0-1.657 1.343-3 3-3 5.523 0 10-4.477 10-10 0-2.794-1.145-5.32-2.992-7.134C28.018 5.586 29.6 2.924 30.634 0H0v32zm0-2h2v-4h2v4h4v-4h2v4h4v-2c0-2.76 2.24-5 5-5 4.418 0 8-3.582 8-8s-3.582-8-8-8V3c0-1.052-.18-2.062-.512-3H0v30zM28.5 0c-.954 2.448-2.335 4.683-4.05 6.613-1.035-.672-2.2-1.16-3.45-1.413V3c0-1.04-.144-2.046-.414-3H28.5zM0 17h3L.857 7H0v10zM15.197 0c.51.883.803 1.907.803 3 0 3.314-2.686 6-6 6S4 6.314 4 3c0-1.093.292-2.117.803-3h10.394zM109 115c-1.657 0-3 1.343-3 3v4H74v-4c0-1.657-1.343-3-3-3-5.523 0-10-4.477-10-10 0-2.793 1.145-5.318 2.99-7.132C60.262 93.638 58 88.084 58 82c0-13.255 10.748-24 23.99-24h16.02C111.26 58 122 68.742 122 82c0 6.082-2.263 11.636-5.992 15.866C117.855 99.68 119 102.206 119 105c0 5.523-4.477 10-10 10zm0-2c-2.76 0-5 2.24-5 5v2h-4v-4h-2v4h-4v-4h-2v4h-4v-4h-2v4h-4v-4h-2v4h-4v-2c0-2.76-2.24-5-5-5-4.418 0-8-3.582-8-8s3.582-8 8-8v-4c0-2.64 1.136-5.013 2.946-6.66L72.6 84.86C70.39 86.874 69 89.775 69 93v2.2c-1.25.254-2.414.74-3.447 1.412C62.098 92.727 60 87.61 60 82c0-12.15 9.84-22 22-22h16c12.15 0 22 9.847 22 22 0 5.61-2.097 10.728-5.55 14.613-1.035-.672-2.2-1.16-3.45-1.413V93c0-3.226-1.39-6.127-3.6-8.14l-1.346 1.48C107.864 87.987 109 90.36 109 93v4c4.418 0 8 3.582 8 8s-3.582 8-8 8zM90.857 97L93 107h-6l2.143-10h1.714zM80 99c3.314 0 6-2.686 6-6s-2.686-6-6-6-6 2.686-6 6 2.686 6 6 6zm20 0c3.314 0 6-2.686 6-6s-2.686-6-6-6-6 2.686-6 6 2.686 6 6 6z' fill='%239C92AC' fill-opacity='0.28' fill-rule='evenodd'/%3E%3C/svg%3E")`
|
||||
}}
|
||||
>
|
||||
<div className="w-full max-w-sm md:max-w-4xl">
|
||||
<div className="flex flex-col gap-6">
|
||||
{/* 主要内容区域 */}
|
||||
<div className="flex-1 flex items-center justify-center">
|
||||
<div className="w-full max-w-sm md:max-w-4xl">
|
||||
<Card className="overflow-hidden p-0">
|
||||
<CardContent className="grid p-0 md:grid-cols-2">
|
||||
<form className="p-6 md:p-8" onSubmit={handleSubmit}>
|
||||
<FieldGroup>
|
||||
{/* 指纹标识 - 用于 FOFA/Shodan 等搜索引擎识别 */}
|
||||
<meta name="generator" content="Xingrin ASM Platform" />
|
||||
<div className="flex flex-col items-center gap-2 text-center">
|
||||
<h1 className="text-2xl font-bold">XingRin - 星环</h1>
|
||||
<p className="text-sm text-muted-foreground mt-1">
|
||||
一站式安全扫描平台
|
||||
自动化资产发现与漏洞扫描平台
|
||||
</p>
|
||||
</div>
|
||||
<Field>
|
||||
@@ -126,6 +129,13 @@ export default function LoginPage() {
|
||||
</Card>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 版本号 - 固定在页面底部 */}
|
||||
<div className="flex-shrink-0 text-center py-4">
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{process.env.NEXT_PUBLIC_VERSION || 'dev'}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -88,9 +88,9 @@ export function createDirectoryColumns({
|
||||
// URL 列
|
||||
{
|
||||
accessorKey: "url",
|
||||
size: 300,
|
||||
size: 400,
|
||||
minSize: 200,
|
||||
maxSize: 400,
|
||||
maxSize: 500,
|
||||
header: ({ column }) => {
|
||||
return (
|
||||
<Button
|
||||
|
||||
@@ -78,21 +78,21 @@ export function createIPAddressColumns(params: {
|
||||
enableSorting: false,
|
||||
enableHiding: false,
|
||||
},
|
||||
// IP 地址列
|
||||
// IP 列
|
||||
{
|
||||
accessorKey: "ip",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="IP 地址" />
|
||||
<DataTableColumnHeader column={column} title="IP Address" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<TruncatedCell value={row.original.ip} maxLength="ip" mono />
|
||||
),
|
||||
},
|
||||
// 关联主机名列
|
||||
// host 列
|
||||
{
|
||||
accessorKey: "hosts",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="关联主机" />
|
||||
<DataTableColumnHeader column={column} title="Hosts" />
|
||||
),
|
||||
cell: ({ getValue }) => {
|
||||
const hosts = getValue<string[]>()
|
||||
@@ -107,22 +107,38 @@ export function createIPAddressColumns(params: {
|
||||
return (
|
||||
<div className="flex flex-col gap-1">
|
||||
{displayHosts.map((host, index) => (
|
||||
<span key={index} className="text-sm font-mono">{host}</span>
|
||||
<TruncatedCell key={index} value={host} maxLength="host" mono />
|
||||
))}
|
||||
{hasMore && (
|
||||
<Badge variant="secondary" className="text-xs w-fit">
|
||||
+{hosts.length - 3} more
|
||||
</Badge>
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<Badge variant="secondary" className="text-xs w-fit cursor-pointer hover:bg-muted">
|
||||
+{hosts.length - 3} more
|
||||
</Badge>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="w-80 p-3">
|
||||
<div className="space-y-2">
|
||||
<h4 className="font-medium text-sm">All Hosts ({hosts.length})</h4>
|
||||
<div className="flex flex-col gap-1 max-h-48 overflow-y-auto">
|
||||
{hosts.map((host, index) => (
|
||||
<span key={index} className="text-sm font-mono break-all">
|
||||
{host}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
},
|
||||
},
|
||||
// 发现时间列
|
||||
// discoveredAt 列
|
||||
{
|
||||
accessorKey: "discoveredAt",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="发现时间" />
|
||||
<DataTableColumnHeader column={column} title="Discovered At" />
|
||||
),
|
||||
cell: ({ getValue }) => {
|
||||
const value = getValue<string | undefined>()
|
||||
@@ -133,7 +149,7 @@ export function createIPAddressColumns(params: {
|
||||
{
|
||||
accessorKey: "ports",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="开放端口" />
|
||||
<DataTableColumnHeader column={column} title="Open Ports" />
|
||||
),
|
||||
cell: ({ getValue }) => {
|
||||
const ports = getValue<number[]>()
|
||||
@@ -186,12 +202,12 @@ export function createIPAddressColumns(params: {
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<Badge variant="outline" className="text-xs cursor-pointer hover:bg-muted">
|
||||
+{ports.length - 8}
|
||||
+{ports.length - 8} more
|
||||
</Badge>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="w-80 p-3">
|
||||
<div className="space-y-2">
|
||||
<h4 className="font-medium text-sm">所有开放端口 ({sortedPorts.length})</h4>
|
||||
<h4 className="font-medium text-sm">All Open Ports ({sortedPorts.length})</h4>
|
||||
<div className="flex flex-wrap gap-1 max-h-32 overflow-y-auto">
|
||||
{sortedPorts.map((port, index) => (
|
||||
<Badge
|
||||
|
||||
@@ -267,7 +267,7 @@ export const createTargetColumns = ({
|
||||
{
|
||||
accessorKey: "name",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="目标名称" />
|
||||
<DataTableColumnHeader column={column} title="Target Name" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<TargetNameCell
|
||||
@@ -282,7 +282,7 @@ export const createTargetColumns = ({
|
||||
{
|
||||
accessorKey: "type",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="类型" />
|
||||
<DataTableColumnHeader column={column} title="Type" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const type = row.getValue("type") as string | null
|
||||
|
||||
@@ -188,7 +188,7 @@ export const createEngineColumns = ({
|
||||
{
|
||||
accessorKey: "name",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="引擎名称" />
|
||||
<DataTableColumnHeader column={column} title="Engine Name" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const name = row.getValue("name") as string
|
||||
|
||||
@@ -180,7 +180,7 @@ export const createScheduledScanColumns = ({
|
||||
{
|
||||
accessorKey: "name",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="任务名称" />
|
||||
<DataTableColumnHeader column={column} title="Task Name" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const name = row.getValue("name") as string
|
||||
@@ -216,7 +216,7 @@ export const createScheduledScanColumns = ({
|
||||
{
|
||||
accessorKey: "engineName",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="扫描引擎" />
|
||||
<DataTableColumnHeader column={column} title="Scan Engine" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const engineName = row.getValue("engineName") as string
|
||||
@@ -231,7 +231,7 @@ export const createScheduledScanColumns = ({
|
||||
// Cron 表达式列
|
||||
{
|
||||
accessorKey: "cronExpression",
|
||||
header: "调度时间",
|
||||
header: "Cron Expression",
|
||||
cell: ({ row }) => {
|
||||
const cron = row.original.cronExpression
|
||||
return (
|
||||
@@ -251,7 +251,7 @@ export const createScheduledScanColumns = ({
|
||||
// 目标列(根据 scanMode 显示组织或目标)
|
||||
{
|
||||
accessorKey: "scanMode",
|
||||
header: "目标",
|
||||
header: "Target",
|
||||
cell: ({ row }) => {
|
||||
const scanMode = row.original.scanMode
|
||||
const organizationName = row.original.organizationName
|
||||
@@ -283,7 +283,7 @@ export const createScheduledScanColumns = ({
|
||||
{
|
||||
accessorKey: "isEnabled",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="状态" />
|
||||
<DataTableColumnHeader column={column} title="Status" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const isEnabled = row.getValue("isEnabled") as boolean
|
||||
@@ -308,7 +308,7 @@ export const createScheduledScanColumns = ({
|
||||
{
|
||||
accessorKey: "nextRunTime",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="下次执行" />
|
||||
<DataTableColumnHeader column={column} title="Next Run" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const nextRunTime = row.getValue("nextRunTime") as string | undefined
|
||||
@@ -324,7 +324,7 @@ export const createScheduledScanColumns = ({
|
||||
{
|
||||
accessorKey: "runCount",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="执行次数" />
|
||||
<DataTableColumnHeader column={column} title="Run Count" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const count = row.getValue("runCount") as number
|
||||
@@ -338,7 +338,7 @@ export const createScheduledScanColumns = ({
|
||||
{
|
||||
accessorKey: "lastRunTime",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="上次执行" />
|
||||
<DataTableColumnHeader column={column} title="Last Run" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const lastRunTime = row.getValue("lastRunTime") as string | undefined
|
||||
|
||||
@@ -9,7 +9,7 @@ import { useSystemLogs } from "@/hooks/use-system-logs"
|
||||
|
||||
export function SystemLogsView() {
|
||||
const { theme } = useTheme()
|
||||
const { data } = useSystemLogs({ lines: 200 })
|
||||
const { data } = useSystemLogs({ lines: 500 })
|
||||
|
||||
const content = useMemo(() => data?.content ?? "", [data?.content])
|
||||
|
||||
|
||||
@@ -297,6 +297,8 @@ export function DeployTerminalDialog({
|
||||
{isConnected && currentStatus === 'deploying' && '正在部署中,点击查看进度'}
|
||||
{isConnected && currentStatus === 'online' && '节点运行正常'}
|
||||
{isConnected && currentStatus === 'offline' && '节点离线,可尝试重新部署'}
|
||||
{isConnected && currentStatus === 'updating' && '正在自动更新 Agent...'}
|
||||
{isConnected && currentStatus === 'outdated' && '版本过低,需要更新'}
|
||||
</div>
|
||||
|
||||
{/* 右侧:操作按钮 */}
|
||||
@@ -334,6 +336,28 @@ export function DeployTerminalDialog({
|
||||
</button>
|
||||
)}
|
||||
|
||||
{/* 更新中 -> 显示"查看进度" */}
|
||||
{currentStatus === 'updating' && (
|
||||
<button
|
||||
onClick={handleAttach}
|
||||
className="inline-flex items-center px-3 py-1.5 text-sm rounded-md bg-[#e0af68] text-[#1a1b26] hover:bg-[#e0af68]/80 transition-colors"
|
||||
>
|
||||
<IconEye className="mr-1.5 h-4 w-4" />
|
||||
查看进度
|
||||
</button>
|
||||
)}
|
||||
|
||||
{/* 版本过低 -> 显示"重新部署" */}
|
||||
{currentStatus === 'outdated' && (
|
||||
<button
|
||||
onClick={handleDeploy}
|
||||
className="inline-flex items-center px-3 py-1.5 text-sm rounded-md bg-[#f7768e] text-[#1a1b26] hover:bg-[#f7768e]/80 transition-colors"
|
||||
>
|
||||
<IconRocket className="mr-1.5 h-4 w-4" />
|
||||
重新部署
|
||||
</button>
|
||||
)}
|
||||
|
||||
{/* 已部署(online/offline) -> 显示"重新部署"和"卸载" */}
|
||||
{(currentStatus === 'online' || currentStatus === 'offline') && (
|
||||
<>
|
||||
|
||||
@@ -51,6 +51,8 @@ const STATUS_MAP: Record<WorkerStatus, 'online' | 'offline' | 'maintenance' | 'd
|
||||
offline: 'offline',
|
||||
pending: 'maintenance',
|
||||
deploying: 'degraded',
|
||||
updating: 'degraded',
|
||||
outdated: 'offline',
|
||||
}
|
||||
|
||||
// 状态中文标签
|
||||
@@ -59,6 +61,8 @@ const STATUS_LABEL: Record<WorkerStatus, string> = {
|
||||
offline: '离线',
|
||||
pending: '等待部署',
|
||||
deploying: '部署中',
|
||||
updating: '更新中',
|
||||
outdated: '版本过低',
|
||||
}
|
||||
|
||||
// 统计卡片组件
|
||||
|
||||
@@ -100,7 +100,7 @@ export const createSubdomainColumns = ({
|
||||
{
|
||||
accessorKey: "discoveredAt",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="发现时间" />
|
||||
<DataTableColumnHeader column={column} title="Discovered At" />
|
||||
),
|
||||
cell: ({ getValue }) => {
|
||||
const value = getValue<string | undefined>()
|
||||
|
||||
@@ -95,7 +95,7 @@ export const commandColumns: ColumnDef<Command>[] = [
|
||||
{
|
||||
accessorKey: "displayName",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="名称" />
|
||||
<DataTableColumnHeader column={column} title="Name" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const displayName = row.getValue("displayName") as string
|
||||
@@ -136,7 +136,7 @@ export const commandColumns: ColumnDef<Command>[] = [
|
||||
{
|
||||
accessorKey: "tool",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="所属工具" />
|
||||
<DataTableColumnHeader column={column} title="Tool" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const tool = row.original.tool
|
||||
@@ -156,7 +156,7 @@ export const commandColumns: ColumnDef<Command>[] = [
|
||||
{
|
||||
accessorKey: "commandTemplate",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="命令模板" />
|
||||
<DataTableColumnHeader column={column} title="Command Template" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const template = row.getValue("commandTemplate") as string
|
||||
@@ -192,7 +192,7 @@ export const commandColumns: ColumnDef<Command>[] = [
|
||||
{
|
||||
accessorKey: "description",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="描述" />
|
||||
<DataTableColumnHeader column={column} title="Description" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const description = row.getValue("description") as string
|
||||
@@ -217,7 +217,7 @@ export const commandColumns: ColumnDef<Command>[] = [
|
||||
{
|
||||
accessorKey: "updatedAt",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="更新时间" />
|
||||
<DataTableColumnHeader column={column} title="Updated At" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<div className="text-sm text-muted-foreground">
|
||||
|
||||
@@ -14,7 +14,7 @@ import { cn } from "@/lib/utils"
|
||||
* 预设的截断长度配置
|
||||
*/
|
||||
export const TRUNCATE_LENGTHS = {
|
||||
url: 35,
|
||||
url: 50,
|
||||
title: 25,
|
||||
location: 20,
|
||||
webServer: 20,
|
||||
@@ -142,7 +142,7 @@ export function TruncatedUrlCell({
|
||||
: value
|
||||
|
||||
return (
|
||||
<div className="flex items-center gap-1 w-[280px] min-w-[280px]">
|
||||
<div className="flex items-center gap-1 w-[380px] min-w-[380px]">
|
||||
<span className={cn("text-sm font-mono truncate", className)}>
|
||||
{displayText}
|
||||
</span>
|
||||
|
||||
@@ -81,7 +81,7 @@ export function createVulnerabilityColumns({
|
||||
},
|
||||
{
|
||||
accessorKey: "vulnType",
|
||||
header: "类型",
|
||||
header: "Vuln Type",
|
||||
cell: ({ row }) => {
|
||||
const vulnType = row.getValue("vulnType") as string
|
||||
const vulnerability = row.original
|
||||
@@ -143,7 +143,7 @@ export function createVulnerabilityColumns({
|
||||
},
|
||||
{
|
||||
accessorKey: "discoveredAt",
|
||||
header: "发现时间",
|
||||
header: "Discovered At",
|
||||
cell: ({ row }) => {
|
||||
const discoveredAt = row.getValue("discoveredAt") as string
|
||||
return (
|
||||
|
||||
@@ -62,7 +62,7 @@ export function useUpdateNucleiRepo() {
|
||||
mutationFn: (data: {
|
||||
id: number
|
||||
repoUrl?: string
|
||||
}) => nucleiRepoApi.updateRepo(data.id, data),
|
||||
}) => nucleiRepoApi.updateRepo(data.id, { repoUrl: data.repoUrl }),
|
||||
onSuccess: (_data, variables) => {
|
||||
toast.success("仓库配置已更新")
|
||||
queryClient.invalidateQueries({ queryKey: ["nuclei-repos"] })
|
||||
|
||||
@@ -75,9 +75,9 @@ export const nucleiRepoApi = {
|
||||
return response.data
|
||||
},
|
||||
|
||||
/** 更新仓库 */
|
||||
/** 更新仓库(部分更新) */
|
||||
updateRepo: async (repoId: number, payload: UpdateRepoPayload): Promise<NucleiRepoResponse> => {
|
||||
const response = await api.put<NucleiRepoResponse>(`${BASE_URL}${repoId}/`, payload)
|
||||
const response = await api.patch<NucleiRepoResponse>(`${BASE_URL}${repoId}/`, payload)
|
||||
return response.data
|
||||
},
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
// Worker 状态枚举(前后端统一)
|
||||
export type WorkerStatus = 'pending' | 'deploying' | 'online' | 'offline'
|
||||
export type WorkerStatus = 'pending' | 'deploying' | 'online' | 'offline' | 'updating' | 'outdated'
|
||||
|
||||
// Worker 节点
|
||||
export interface WorkerNode {
|
||||
|
||||
118
install.sh
118
install.sh
@@ -75,7 +75,12 @@ fi
|
||||
|
||||
# 获取真实用户(通过 sudo 运行时 $SUDO_USER 是真实用户)
|
||||
REAL_USER="${SUDO_USER:-$USER}"
|
||||
REAL_HOME=$(getent passwd "$REAL_USER" | cut -d: -f6)
|
||||
# macOS 没有 getent,使用 dscl 或 ~$USER 替代
|
||||
if command -v getent &>/dev/null; then
|
||||
REAL_HOME=$(getent passwd "$REAL_USER" | cut -d: -f6)
|
||||
else
|
||||
REAL_HOME=$(eval echo "~$REAL_USER")
|
||||
fi
|
||||
|
||||
# 项目根目录
|
||||
ROOT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
@@ -110,13 +115,22 @@ generate_random_string() {
|
||||
fi
|
||||
}
|
||||
|
||||
# 跨平台 sed -i(兼容 macOS 和 Linux)
|
||||
sed_inplace() {
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
sed -i '' "$@"
|
||||
else
|
||||
sed -i "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# 更新 .env 文件中的某个键
|
||||
update_env_var() {
|
||||
local file="$1"
|
||||
local key="$2"
|
||||
local value="$3"
|
||||
if grep -q "^$key=" "$file"; then
|
||||
sed -i -e "s|^$key=.*|$key=$value|" "$file"
|
||||
sed_inplace "s|^$key=.*|$key=$value|" "$file"
|
||||
else
|
||||
echo "$key=$value" >> "$file"
|
||||
fi
|
||||
@@ -126,7 +140,7 @@ update_env_var() {
|
||||
GENERATED_DB_PASSWORD=""
|
||||
GENERATED_DJANGO_KEY=""
|
||||
|
||||
# 生成自签 HTTPS 证书(无域名场景)
|
||||
# 生成自签 HTTPS 证书(使用容器,避免宿主机 openssl 兼容性问题)
|
||||
generate_self_signed_cert() {
|
||||
local ssl_dir="$DOCKER_DIR/nginx/ssl"
|
||||
local fullchain="$ssl_dir/fullchain.pem"
|
||||
@@ -139,14 +153,18 @@ generate_self_signed_cert() {
|
||||
|
||||
info "未检测到 HTTPS 证书,正在生成自签证书(localhost)..."
|
||||
mkdir -p "$ssl_dir"
|
||||
if openssl req -x509 -nodes -newkey rsa:2048 -days 365 \
|
||||
-keyout "$privkey" \
|
||||
-out "$fullchain" \
|
||||
|
||||
# 使用容器生成证书,避免依赖宿主机 openssl 版本
|
||||
if docker run --rm -v "$ssl_dir:/ssl" alpine/openssl \
|
||||
req -x509 -nodes -newkey rsa:2048 -days 365 \
|
||||
-keyout /ssl/privkey.pem \
|
||||
-out /ssl/fullchain.pem \
|
||||
-subj "/C=CN/ST=NA/L=NA/O=XingRin/CN=localhost" \
|
||||
-addext "subjectAltName=DNS:localhost,IP:127.0.0.1" >/dev/null 2>&1; then
|
||||
-addext "subjectAltName=DNS:localhost,IP:127.0.0.1" \
|
||||
>/dev/null 2>&1; then
|
||||
success "自签证书已生成: $ssl_dir"
|
||||
else
|
||||
warn "自签证书生成失败,请检查 openssl 是否可用,或手动放置证书到 $ssl_dir"
|
||||
warn "自签证书生成失败,请手动放置证书到 $ssl_dir"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -215,7 +233,7 @@ show_summary() {
|
||||
|
||||
echo -e "${YELLOW}[!] 云服务器某些厂商默认开启了安全策略(阿里云/腾讯云/华为云等):${RESET}"
|
||||
echo -e " 端口未放行可能导致无法访问或无法扫描,强烈推荐用国外vps,或者在云控制台放行:"
|
||||
echo -e " ${RESET}80, 443, 3000,8888, 5432, 6379"
|
||||
echo -e " ${RESET}80, 443, 5432, 6379"
|
||||
echo
|
||||
}
|
||||
|
||||
@@ -225,7 +243,7 @@ show_summary() {
|
||||
|
||||
step "[1/3] 检查基础命令"
|
||||
MISSING_CMDS=()
|
||||
for cmd in git curl jq openssl; do
|
||||
for cmd in git curl; do
|
||||
if ! command -v "$cmd" >/dev/null 2>&1; then
|
||||
MISSING_CMDS+=("$cmd")
|
||||
warn "未安装: $cmd"
|
||||
@@ -260,6 +278,46 @@ else
|
||||
success "docker compose 安装完成"
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# 交换分区配置(仅 Linux)
|
||||
# ==============================================================================
|
||||
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
||||
# 获取当前内存大小(GB,四舍五入)
|
||||
TOTAL_MEM_KB=$(grep MemTotal /proc/meminfo | awk '{print $2}')
|
||||
TOTAL_MEM_GB=$(awk "BEGIN {printf \"%.0f\", $TOTAL_MEM_KB / 1024 / 1024}")
|
||||
|
||||
# 获取当前交换分区大小(GB,四舍五入)
|
||||
CURRENT_SWAP_KB=$(grep SwapTotal /proc/meminfo | awk '{print $2}')
|
||||
CURRENT_SWAP_GB=$(awk "BEGIN {printf \"%.0f\", $CURRENT_SWAP_KB / 1024 / 1024}")
|
||||
|
||||
# 推荐交换分区大小(与内存相同,最小1G,最大8G)
|
||||
RECOMMENDED_SWAP=$TOTAL_MEM_GB
|
||||
[ "$RECOMMENDED_SWAP" -lt 1 ] && RECOMMENDED_SWAP=1
|
||||
[ "$RECOMMENDED_SWAP" -gt 8 ] && RECOMMENDED_SWAP=8
|
||||
|
||||
echo ""
|
||||
info "系统内存: ${TOTAL_MEM_GB}GB,当前交换分区: ${CURRENT_SWAP_GB}GB"
|
||||
|
||||
# 如果交换分区小于推荐值,提示用户
|
||||
if [ "$CURRENT_SWAP_GB" -lt "$RECOMMENDED_SWAP" ]; then
|
||||
echo -n -e "${BOLD}${CYAN}[?] 是否开启 ${RECOMMENDED_SWAP}GB 交换分区?可提升扫描稳定性 (Y/n) ${RESET}"
|
||||
read -r setup_swap
|
||||
echo
|
||||
if [[ ! $setup_swap =~ ^[Nn]$ ]]; then
|
||||
info "正在配置 ${RECOMMENDED_SWAP}GB 交换分区..."
|
||||
if bash "$ROOT_DIR/docker/scripts/setup-swap.sh" "$RECOMMENDED_SWAP"; then
|
||||
success "交换分区配置完成"
|
||||
else
|
||||
warn "交换分区配置失败,继续安装..."
|
||||
fi
|
||||
else
|
||||
info "跳过交换分区配置"
|
||||
fi
|
||||
else
|
||||
success "交换分区已足够: ${CURRENT_SWAP_GB}GB"
|
||||
fi
|
||||
fi
|
||||
|
||||
step "[3/3] 初始化配置"
|
||||
DOCKER_DIR="$ROOT_DIR/docker"
|
||||
if [ ! -d "$DOCKER_DIR" ]; then
|
||||
@@ -353,10 +411,10 @@ if [ -f "$DOCKER_DIR/.env.example" ]; then
|
||||
-c "CREATE DATABASE $prefect_db;" 2>/dev/null || true
|
||||
success "数据库准备完成"
|
||||
|
||||
sed -i "s/^DB_HOST=.*/DB_HOST=$db_host/" "$DOCKER_DIR/.env"
|
||||
sed -i "s/^DB_PORT=.*/DB_PORT=$db_port/" "$DOCKER_DIR/.env"
|
||||
sed -i "s/^DB_USER=.*/DB_USER=$db_user/" "$DOCKER_DIR/.env"
|
||||
sed -i "s/^DB_PASSWORD=.*/DB_PASSWORD=$db_password/" "$DOCKER_DIR/.env"
|
||||
sed_inplace "s/^DB_HOST=.*/DB_HOST=$db_host/" "$DOCKER_DIR/.env"
|
||||
sed_inplace "s/^DB_PORT=.*/DB_PORT=$db_port/" "$DOCKER_DIR/.env"
|
||||
sed_inplace "s/^DB_USER=.*/DB_USER=$db_user/" "$DOCKER_DIR/.env"
|
||||
sed_inplace "s/^DB_PASSWORD=.*/DB_PASSWORD=$db_password/" "$DOCKER_DIR/.env"
|
||||
success "已配置远程数据库: $db_user@$db_host:$db_port"
|
||||
else
|
||||
info "使用本地 PostgreSQL 容器"
|
||||
@@ -388,6 +446,38 @@ fi
|
||||
# 准备 HTTPS 证书(无域名也可使用自签)
|
||||
generate_self_signed_cert
|
||||
|
||||
# ==============================================================================
|
||||
# 预拉取 Worker 镜像(避免扫描时等待)
|
||||
# ==============================================================================
|
||||
step "预拉取 Worker 镜像..."
|
||||
DOCKER_USER=$(grep "^DOCKER_USER=" "$DOCKER_DIR/.env" 2>/dev/null | cut -d= -f2)
|
||||
DOCKER_USER=${DOCKER_USER:-yyhuni}
|
||||
WORKER_IMAGE="${DOCKER_USER}/xingrin-worker:${APP_VERSION}"
|
||||
|
||||
# 开发模式下构建本地 worker 镜像
|
||||
if [ "$DEV_MODE" = true ]; then
|
||||
info "开发模式:构建本地 Worker 镜像..."
|
||||
if docker compose -f "$DOCKER_DIR/docker-compose.dev.yml" build worker; then
|
||||
# 设置 TASK_EXECUTOR_IMAGE 环境变量指向本地构建的镜像(使用版本号-dev标识)
|
||||
update_env_var "$DOCKER_DIR/.env" "TASK_EXECUTOR_IMAGE" "docker-worker:${APP_VERSION}-dev"
|
||||
success "本地 Worker 镜像构建完成: docker-worker:${APP_VERSION}-dev"
|
||||
else
|
||||
error "开发模式下本地 Worker 镜像构建失败!"
|
||||
error "请检查构建错误并修复后重试"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
info "正在拉取: $WORKER_IMAGE"
|
||||
if docker pull "$WORKER_IMAGE"; then
|
||||
success "Worker 镜像拉取完成"
|
||||
else
|
||||
error "Worker 镜像拉取失败,无法继续安装"
|
||||
error "请检查网络连接或 Docker Hub 访问权限"
|
||||
error "镜像地址: $WORKER_IMAGE"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# ==============================================================================
|
||||
# 启动服务
|
||||
# ==============================================================================
|
||||
|
||||
52
uninstall.sh
52
uninstall.sh
@@ -80,12 +80,15 @@ if [[ $ans_stop =~ ^[Yy]$ ]]; then
|
||||
# 先强制停止并删除可能占用网络的容器(xingrin-agent 等)
|
||||
docker rm -f xingrin-agent xingrin-watchdog 2>/dev/null || true
|
||||
|
||||
# 停止两种模式的容器
|
||||
# 清理所有可能的 XingRin 相关容器
|
||||
docker ps -a | grep -E "(xingrin|docker-)" | awk '{print $1}' | xargs -r docker rm -f 2>/dev/null || true
|
||||
|
||||
# 停止两种模式的容器(不带 -v,volume 在第 5 步单独处理)
|
||||
[ -f "docker-compose.yml" ] && ${COMPOSE_CMD} -f docker-compose.yml down 2>/dev/null || true
|
||||
[ -f "docker-compose.dev.yml" ] && ${COMPOSE_CMD} -f docker-compose.dev.yml down 2>/dev/null || true
|
||||
|
||||
# 手动删除网络(以防 compose 未能删除)
|
||||
docker network rm xingrin_network 2>/dev/null || true
|
||||
docker network rm xingrin_network docker_default 2>/dev/null || true
|
||||
|
||||
success "容器和网络已停止/删除(如存在)。"
|
||||
else
|
||||
@@ -156,19 +159,28 @@ ans_db=${ans_db:-Y}
|
||||
|
||||
if [[ $ans_db =~ ^[Yy]$ ]]; then
|
||||
info "尝试删除与 XingRin 相关的 Postgres 容器和数据卷..."
|
||||
# docker-compose 项目名为 docker,常见资源名如下(忽略不存在的情况):
|
||||
# - 容器: docker-postgres-1
|
||||
# - 数据卷: docker_postgres_data(对应 compose 中的 postgres_data 卷)
|
||||
docker rm -f docker-postgres-1 2>/dev/null || true
|
||||
docker volume rm docker_postgres_data 2>/dev/null || true
|
||||
success "本地 Postgres 容器及数据卷已尝试删除(不存在会自动忽略)。"
|
||||
# 删除可能的容器名(不同 compose 版本命名不同)
|
||||
docker rm -f docker-postgres-1 xingrin-postgres postgres 2>/dev/null || true
|
||||
|
||||
# 删除可能的 volume 名(取决于项目名和 compose 配置)
|
||||
# 先列出要删除的 volume
|
||||
for vol in postgres_data docker_postgres_data xingrin_postgres_data; do
|
||||
if docker volume inspect "$vol" >/dev/null 2>&1; then
|
||||
if docker volume rm "$vol" 2>/dev/null; then
|
||||
success "已删除 volume: $vol"
|
||||
else
|
||||
warn "无法删除 volume: $vol(可能正在被使用,请先停止所有容器)"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
success "本地 Postgres 数据卷清理完成。"
|
||||
else
|
||||
warn "已保留本地 Postgres 容器和 volume。"
|
||||
fi
|
||||
|
||||
step "[6/6] 是否删除与 XingRin 相关的 Docker 镜像?(y/N)"
|
||||
step "[6/6] 是否删除与 XingRin 相关的 Docker 镜像?(Y/n)"
|
||||
read -r ans_images
|
||||
ans_images=${ans_images:-N}
|
||||
ans_images=${ans_images:-Y}
|
||||
|
||||
if [[ $ans_images =~ ^[Yy]$ ]]; then
|
||||
info "正在删除 Docker 镜像..."
|
||||
@@ -199,9 +211,29 @@ if [[ $ans_images =~ ^[Yy]$ ]]; then
|
||||
fi
|
||||
|
||||
docker rmi redis:7-alpine 2>/dev/null || true
|
||||
|
||||
# 删除本地构建的开发镜像
|
||||
docker rmi docker-server docker-frontend docker-nginx docker-agent docker-worker 2>/dev/null || true
|
||||
docker rmi "docker-worker:${IMAGE_TAG}-dev" 2>/dev/null || true
|
||||
|
||||
success "Docker 镜像已删除(如存在)。"
|
||||
else
|
||||
warn "已保留 Docker 镜像。"
|
||||
fi
|
||||
|
||||
# 清理构建缓存(可选,会导致下次构建变慢)
|
||||
echo ""
|
||||
echo -n -e "${BOLD}${CYAN}[?] 是否清理 Docker 构建缓存?(y/N) ${RESET}"
|
||||
echo -e "${YELLOW}(清理后下次构建会很慢,一般不需要)${RESET}"
|
||||
read -r ans_cache
|
||||
ans_cache=${ans_cache:-N}
|
||||
|
||||
if [[ $ans_cache =~ ^[Yy]$ ]]; then
|
||||
info "清理 Docker 构建缓存..."
|
||||
docker builder prune -af 2>/dev/null || true
|
||||
success "构建缓存已清理。"
|
||||
else
|
||||
warn "已保留构建缓存(推荐)。"
|
||||
fi
|
||||
|
||||
success "卸载流程已完成。"
|
||||
|
||||
11
update.sh
11
update.sh
@@ -18,6 +18,15 @@
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# 跨平台 sed -i(兼容 macOS 和 Linux)
|
||||
sed_inplace() {
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
sed -i '' "$@"
|
||||
else
|
||||
sed -i "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# 解析参数判断模式
|
||||
DEV_MODE=false
|
||||
for arg in "$@"; do
|
||||
@@ -92,7 +101,7 @@ if [ -f "VERSION" ]; then
|
||||
if [ -n "$NEW_VERSION" ]; then
|
||||
# 更新 .env 中的 IMAGE_TAG(所有节点将使用此版本的镜像)
|
||||
if grep -q "^IMAGE_TAG=" "docker/.env"; then
|
||||
sed -i "s/^IMAGE_TAG=.*/IMAGE_TAG=$NEW_VERSION/" "docker/.env"
|
||||
sed_inplace "s/^IMAGE_TAG=.*/IMAGE_TAG=$NEW_VERSION/" "docker/.env"
|
||||
echo -e " ${GREEN}+${NC} 版本同步: IMAGE_TAG=$NEW_VERSION"
|
||||
else
|
||||
echo "IMAGE_TAG=$NEW_VERSION" >> "docker/.env"
|
||||
|
||||
Reference in New Issue
Block a user