refactor(backend): consolidate migration files and restructure host port entities

- Remove seed data generation command (cmd/seed/main.go)
- Consolidate database migrations into single init schema file
- Rename ip_address DTO to host_port for consistency
- Add host_port_snapshot DTO and model for snapshot tracking
- Rename host_port handler and repository files for clarity
- Implement host_port_snapshot service layer with CRUD operations
- Update website_snapshot service to work with new host_port structure
- Enhance terminal login UI with focus state tracking and Tab key navigation
- Update docker-compose configuration for development environment
- Refactor directory and website snapshot DTOs for improved data structure
- Add comprehensive test coverage for model and handler changes
- Simplify database schema by consolidating related migrations into single initialization file
This commit is contained in:
yyhuni
2026-01-14 18:04:16 +08:00
parent e8a9606d3b
commit e542633ad3
41 changed files with 684 additions and 1485 deletions

View File

@@ -50,6 +50,7 @@ export function TerminalLogin({
const [password, setPassword] = React.useState("")
const [lines, setLines] = React.useState<TerminalLine[]>([])
const [cursorPosition, setCursorPosition] = React.useState(0)
const [isFocused, setIsFocused] = React.useState(false)
const inputRef = React.useRef<HTMLInputElement>(null)
const containerRef = React.useRef<HTMLDivElement>(null)
@@ -137,6 +138,17 @@ export function TerminalLogin({
return
}
// Tab - Move to next field (username -> password)
if (e.key === "Tab" && step === "username") {
e.preventDefault()
if (!username.trim()) return
addLine({ text: `> ${t.usernamePrompt}: `, type: "prompt" })
addLine({ text: username, type: "input" })
setStep("password")
setCursorPosition(0)
return
}
// Enter - Submit
if (e.key === "Enter") {
if (step === "username") {
@@ -208,6 +220,10 @@ export function TerminalLogin({
const after = displayValue.slice(cursorPosition)
const cursorChar = after[0] || ""
if (!isFocused) {
return <span className="text-zinc-100">{displayValue}</span>
}
return (
<>
<span className="text-zinc-100">{before}</span>
@@ -359,6 +375,8 @@ export function TerminalLogin({
onChange={handleInputChange}
onKeyDown={handleKeyDown}
onSelect={handleSelect}
onFocus={() => setIsFocused(true)}
onBlur={() => setIsFocused(false)}
disabled={isInputDisabled}
className="absolute opacity-0 pointer-events-none"
autoComplete={step === "username" ? "username" : "current-password"}

File diff suppressed because it is too large Load Diff

View File

@@ -136,7 +136,7 @@ func main() {
subdomainRepo := repository.NewSubdomainRepository(db)
endpointRepo := repository.NewEndpointRepository(db)
directoryRepo := repository.NewDirectoryRepository(db)
ipAddressRepo := repository.NewIPAddressRepository(db)
hostPortRepo := repository.NewHostPortRepository(db)
screenshotRepo := repository.NewScreenshotRepository(db)
vulnerabilityRepo := repository.NewVulnerabilityRepository(db)
scanRepo := repository.NewScanRepository(db)
@@ -145,6 +145,7 @@ func main() {
subdomainSnapshotRepo := repository.NewSubdomainSnapshotRepository(db)
endpointSnapshotRepo := repository.NewEndpointSnapshotRepository(db)
directorySnapshotRepo := repository.NewDirectorySnapshotRepository(db)
hostPortSnapshotRepo := repository.NewHostPortSnapshotRepository(db)
// Create services
userSvc := service.NewUserService(userRepo)
@@ -156,7 +157,7 @@ func main() {
subdomainSvc := service.NewSubdomainService(subdomainRepo, targetRepo)
endpointSvc := service.NewEndpointService(endpointRepo, targetRepo)
directorySvc := service.NewDirectoryService(directoryRepo, targetRepo)
ipAddressSvc := service.NewIPAddressService(ipAddressRepo, targetRepo)
hostPortSvc := service.NewHostPortService(hostPortRepo, targetRepo)
screenshotSvc := service.NewScreenshotService(screenshotRepo, targetRepo)
vulnerabilitySvc := service.NewVulnerabilityService(vulnerabilityRepo, targetRepo)
scanSvc := service.NewScanService(scanRepo, scanLogRepo, targetRepo, orgRepo)
@@ -165,6 +166,7 @@ func main() {
subdomainSnapshotSvc := service.NewSubdomainSnapshotService(subdomainSnapshotRepo, scanRepo, subdomainSvc)
endpointSnapshotSvc := service.NewEndpointSnapshotService(endpointSnapshotRepo, scanRepo, endpointSvc)
directorySnapshotSvc := service.NewDirectorySnapshotService(directorySnapshotRepo, scanRepo, directorySvc)
hostPortSnapshotSvc := service.NewHostPortSnapshotService(hostPortSnapshotRepo, scanRepo, hostPortSvc)
// Create handlers
healthHandler := handler.NewHealthHandler(db, redisClient)
@@ -178,7 +180,7 @@ func main() {
subdomainHandler := handler.NewSubdomainHandler(subdomainSvc)
endpointHandler := handler.NewEndpointHandler(endpointSvc)
directoryHandler := handler.NewDirectoryHandler(directorySvc)
ipAddressHandler := handler.NewIPAddressHandler(ipAddressSvc)
hostPortHandler := handler.NewHostPortHandler(hostPortSvc)
screenshotHandler := handler.NewScreenshotHandler(screenshotSvc)
vulnerabilityHandler := handler.NewVulnerabilityHandler(vulnerabilitySvc)
scanHandler := handler.NewScanHandler(scanSvc)
@@ -187,6 +189,7 @@ func main() {
subdomainSnapshotHandler := handler.NewSubdomainSnapshotHandler(subdomainSnapshotSvc)
endpointSnapshotHandler := handler.NewEndpointSnapshotHandler(endpointSnapshotSvc)
directorySnapshotHandler := handler.NewDirectorySnapshotHandler(directorySnapshotSvc)
hostPortSnapshotHandler := handler.NewHostPortSnapshotHandler(hostPortSnapshotSvc)
// Register health routes
router.GET("/health", healthHandler.Check)
@@ -273,13 +276,13 @@ func main() {
// Directories (standalone)
protected.POST("/directories/bulk-delete", directoryHandler.BulkDelete)
// IP Addresses (nested under targets)
protected.GET("/targets/:id/ip-addresses", ipAddressHandler.List)
protected.GET("/targets/:id/ip-addresses/export", ipAddressHandler.Export)
protected.POST("/targets/:id/ip-addresses/bulk-upsert", ipAddressHandler.BulkUpsert)
// Host Ports (nested under targets)
protected.GET("/targets/:id/host-ports", hostPortHandler.List)
protected.GET("/targets/:id/host-ports/export", hostPortHandler.Export)
protected.POST("/targets/:id/host-ports/bulk-upsert", hostPortHandler.BulkUpsert)
// IP Addresses (standalone)
protected.POST("/ip-addresses/bulk-delete", ipAddressHandler.BulkDelete)
// Host Ports (standalone)
protected.POST("/host-ports/bulk-delete", hostPortHandler.BulkDelete)
// Screenshots (nested under targets)
protected.GET("/targets/:id/screenshots", screenshotHandler.ListByTargetID)
@@ -355,6 +358,11 @@ func main() {
protected.POST("/scans/:id/directories/bulk-upsert", directorySnapshotHandler.BulkUpsert)
protected.GET("/scans/:id/directories", directorySnapshotHandler.List)
protected.GET("/scans/:id/directories/export", directorySnapshotHandler.Export)
// HostPort Snapshots (nested under scans)
protected.POST("/scans/:id/host-ports/bulk-upsert", hostPortSnapshotHandler.BulkUpsert)
protected.GET("/scans/:id/host-ports", hostPortSnapshotHandler.List)
protected.GET("/scans/:id/host-ports/export", hostPortSnapshotHandler.Export)
}
}

View File

@@ -332,11 +332,9 @@ CREATE TABLE IF NOT EXISTS directory (
target_id INTEGER NOT NULL REFERENCES target(id) ON DELETE CASCADE,
url VARCHAR(2000) NOT NULL,
status INTEGER,
content_length BIGINT,
words INTEGER,
lines INTEGER,
content_length INTEGER,
content_type VARCHAR(200) NOT NULL DEFAULT '',
duration BIGINT,
duration INTEGER,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_directory_target ON directory(target_id);
@@ -370,6 +368,7 @@ CREATE TABLE IF NOT EXISTS vulnerability (
cvss_score DECIMAL(3,1) NOT NULL DEFAULT 0.0,
description TEXT NOT NULL DEFAULT '',
raw_output JSONB NOT NULL DEFAULT '{}',
reviewed BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_vuln_target ON vulnerability(target_id);
@@ -378,6 +377,7 @@ CREATE INDEX IF NOT EXISTS idx_vuln_type ON vulnerability(vuln_type);
CREATE INDEX IF NOT EXISTS idx_vuln_severity ON vulnerability(severity);
CREATE INDEX IF NOT EXISTS idx_vuln_source ON vulnerability(source);
CREATE INDEX IF NOT EXISTS idx_vuln_created_at ON vulnerability(created_at);
CREATE INDEX IF NOT EXISTS idx_vuln_target_reviewed ON vulnerability(target_id, reviewed);
-- ============================================
-- Snapshot tables (depends on scan)
@@ -419,7 +419,7 @@ CREATE TABLE IF NOT EXISTS website_snapshot (
host VARCHAR(253) NOT NULL DEFAULT '',
title TEXT NOT NULL DEFAULT '',
status_code INTEGER,
content_length BIGINT,
content_length INTEGER,
location TEXT NOT NULL DEFAULT '',
webserver TEXT NOT NULL DEFAULT '',
content_type TEXT NOT NULL DEFAULT '',
@@ -471,11 +471,9 @@ CREATE TABLE IF NOT EXISTS directory_snapshot (
scan_id INTEGER NOT NULL REFERENCES scan(id) ON DELETE CASCADE,
url VARCHAR(2000) NOT NULL,
status INTEGER,
content_length BIGINT,
words INTEGER,
lines INTEGER,
content_length INTEGER,
content_type VARCHAR(200) NOT NULL DEFAULT '',
duration BIGINT,
duration INTEGER,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_directory_snap_scan ON directory_snapshot(scan_id);
@@ -574,3 +572,22 @@ CREATE INDEX IF NOT EXISTS idx_notification_category ON notification(category);
CREATE INDEX IF NOT EXISTS idx_notification_level ON notification(level);
CREATE INDEX IF NOT EXISTS idx_notification_created_at ON notification(created_at);
CREATE INDEX IF NOT EXISTS idx_notification_is_read ON notification(is_read);
-- ============================================
-- GIN Indexes for array fields
-- ============================================
-- GIN index for website.tech array
CREATE INDEX IF NOT EXISTS idx_website_tech_gin ON website USING GIN (tech);
-- GIN index for endpoint.tech array
CREATE INDEX IF NOT EXISTS idx_endpoint_tech_gin ON endpoint USING GIN (tech);
-- GIN index for endpoint.matched_gf_patterns array
CREATE INDEX IF NOT EXISTS idx_endpoint_matched_gf_patterns_gin ON endpoint USING GIN (matched_gf_patterns);
-- GIN index for scan.engine_ids array
CREATE INDEX IF NOT EXISTS idx_scan_engine_ids_gin ON scan USING GIN (engine_ids);
-- GIN index for scan.container_ids array
CREATE INDEX IF NOT EXISTS idx_scan_container_ids_gin ON scan USING GIN (container_ids);

View File

@@ -1,11 +0,0 @@
-- Remove GIN indexes
DROP INDEX IF EXISTS idx_scheduled_scan_engine_ids_gin;
DROP INDEX IF EXISTS idx_scan_container_ids_gin;
DROP INDEX IF EXISTS idx_scan_engine_ids_gin;
DROP INDEX IF EXISTS idx_endpoint_snap_matched_gf_patterns_gin;
DROP INDEX IF EXISTS idx_endpoint_snap_tech_gin;
DROP INDEX IF EXISTS idx_website_snap_tech_gin;
DROP INDEX IF EXISTS idx_endpoint_matched_gf_patterns_gin;
DROP INDEX IF EXISTS idx_endpoint_tech_gin;
DROP INDEX IF EXISTS idx_website_tech_gin;

View File

@@ -1,23 +0,0 @@
-- Add GIN indexes for PostgreSQL array fields
-- GIN indexes enable efficient queries on array columns (e.g., @>, &&, etc.)
-- Website tech array
CREATE INDEX IF NOT EXISTS idx_website_tech_gin ON website USING GIN (tech);
-- Endpoint arrays
CREATE INDEX IF NOT EXISTS idx_endpoint_tech_gin ON endpoint USING GIN (tech);
CREATE INDEX IF NOT EXISTS idx_endpoint_matched_gf_patterns_gin ON endpoint USING GIN (matched_gf_patterns);
-- Website snapshot tech array
CREATE INDEX IF NOT EXISTS idx_website_snap_tech_gin ON website_snapshot USING GIN (tech);
-- Endpoint snapshot arrays
CREATE INDEX IF NOT EXISTS idx_endpoint_snap_tech_gin ON endpoint_snapshot USING GIN (tech);
CREATE INDEX IF NOT EXISTS idx_endpoint_snap_matched_gf_patterns_gin ON endpoint_snapshot USING GIN (matched_gf_patterns);
-- Scan arrays
CREATE INDEX IF NOT EXISTS idx_scan_engine_ids_gin ON scan USING GIN (engine_ids);
CREATE INDEX IF NOT EXISTS idx_scan_container_ids_gin ON scan USING GIN (container_ids);
-- Scheduled scan arrays
CREATE INDEX IF NOT EXISTS idx_scheduled_scan_engine_ids_gin ON scheduled_scan USING GIN (engine_ids);

View File

@@ -1,3 +0,0 @@
-- Restore words and lines columns to directory table
ALTER TABLE directory ADD COLUMN IF NOT EXISTS words INTEGER;
ALTER TABLE directory ADD COLUMN IF NOT EXISTS lines INTEGER;

View File

@@ -1,3 +0,0 @@
-- Drop words and lines columns from directory table
ALTER TABLE directory DROP COLUMN IF EXISTS words;
ALTER TABLE directory DROP COLUMN IF EXISTS lines;

View File

@@ -1,5 +0,0 @@
-- Remove review status fields from vulnerability table
DROP INDEX IF EXISTS idx_vuln_is_reviewed;
ALTER TABLE vulnerability
DROP COLUMN IF EXISTS reviewed_at,
DROP COLUMN IF EXISTS is_reviewed;

View File

@@ -1,7 +0,0 @@
-- Add review status fields to vulnerability table
ALTER TABLE vulnerability
ADD COLUMN is_reviewed BOOLEAN NOT NULL DEFAULT FALSE,
ADD COLUMN reviewed_at TIMESTAMP NULL;
-- Create index for filtering by review status
CREATE INDEX idx_vuln_is_reviewed ON vulnerability(is_reviewed);

View File

@@ -1,2 +0,0 @@
-- Remove composite index
DROP INDEX IF EXISTS idx_vuln_target_reviewed;

View File

@@ -1,3 +0,0 @@
-- Add composite index for target_id + is_reviewed queries
-- Optimizes: COUNT pending vulnerabilities by target, filter by target + review status
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_vuln_target_reviewed ON vulnerability(target_id, is_reviewed);

View File

@@ -11,11 +11,11 @@ services:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
ports:
- "5432:5432"
- 5432:5432
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
test: [CMD-SHELL, pg_isready -U postgres]
interval: 5s
timeout: 5s
retries: 5
@@ -24,9 +24,9 @@ services:
image: redis:7-alpine
restart: unless-stopped
ports:
- "6379:6379"
- 6379:6379
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: [CMD, redis-cli, ping]
interval: 5s
timeout: 5s
retries: 5

View File

@@ -2,7 +2,7 @@ module github.com/xingrin/go-backend
go 1.24.0
toolchain go1.24.6
toolchain go1.24.5
require (
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2

View File

@@ -14,9 +14,9 @@ type DirectoryResponse struct {
TargetID int `json:"targetId"`
URL string `json:"url"`
Status *int `json:"status"`
ContentLength *int64 `json:"contentLength"`
ContentLength *int `json:"contentLength"`
ContentType string `json:"contentType"`
Duration *int64 `json:"duration"`
Duration *int `json:"duration"`
CreatedAt time.Time `json:"createdAt"`
}
@@ -34,9 +34,9 @@ type BulkCreateDirectoriesResponse struct {
type DirectoryUpsertItem struct {
URL string `json:"url" binding:"required,url"`
Status *int `json:"status"`
ContentLength *int64 `json:"contentLength"`
ContentLength *int `json:"contentLength"`
ContentType string `json:"contentType"`
Duration *int64 `json:"duration"`
Duration *int `json:"duration"`
}
// BulkUpsertDirectoriesRequest represents bulk upsert directories request

View File

@@ -2,16 +2,9 @@ package dto
import "time"
// DirectorySnapshotItem represents a single directory snapshot data for bulk upsert
type DirectorySnapshotItem struct {
URL string `json:"url" binding:"required,url"`
Status *int `json:"status"`
ContentLength *int64 `json:"contentLength"`
Words *int `json:"words"`
Lines *int `json:"lines"`
ContentType string `json:"contentType"`
Duration *int64 `json:"duration"`
}
// DirectorySnapshotItem is an alias for DirectoryUpsertItem
// Snapshot items and asset items have identical fields
type DirectorySnapshotItem = DirectoryUpsertItem
// BulkUpsertDirectorySnapshotsRequest represents bulk upsert directory snapshots request
type BulkUpsertDirectorySnapshotsRequest struct {
@@ -37,10 +30,8 @@ type DirectorySnapshotResponse struct {
ScanID int `json:"scanId"`
URL string `json:"url"`
Status *int `json:"status"`
ContentLength *int64 `json:"contentLength"`
Words *int `json:"words"`
Lines *int `json:"lines"`
ContentLength *int `json:"contentLength"`
ContentType string `json:"contentType"`
Duration *int64 `json:"duration"`
Duration *int `json:"duration"`
CreatedAt time.Time `json:"createdAt"`
}

View File

@@ -0,0 +1,44 @@
package dto
import "time"
// HostPortListQuery represents host-port list query parameters
type HostPortListQuery struct {
PaginationQuery
Filter string `form:"filter"`
}
// HostPortResponse represents aggregated host-port response (grouped by IP)
type HostPortResponse struct {
IP string `json:"ip"`
Hosts []string `json:"hosts"`
Ports []int `json:"ports"`
CreatedAt time.Time `json:"createdAt"`
}
// HostPortItem represents a single host-port mapping for bulk operations
type HostPortItem struct {
Host string `json:"host" binding:"required"`
IP string `json:"ip" binding:"required,ip"`
Port int `json:"port" binding:"required,min=1,max=65535"`
}
// BulkUpsertHostPortsRequest represents bulk upsert request (for scanner import)
type BulkUpsertHostPortsRequest struct {
Mappings []HostPortItem `json:"mappings" binding:"required,min=1,max=5000,dive"`
}
// BulkUpsertHostPortsResponse represents bulk upsert response
type BulkUpsertHostPortsResponse struct {
UpsertedCount int `json:"upsertedCount"`
}
// BulkDeleteHostPortsRequest represents bulk delete request (by IP list)
type BulkDeleteHostPortsRequest struct {
IPs []string `json:"ips" binding:"required,min=1"`
}
// BulkDeleteHostPortsResponse represents bulk delete response
type BulkDeleteHostPortsResponse struct {
DeletedCount int64 `json:"deletedCount"`
}

View File

@@ -0,0 +1,30 @@
package dto
import "time"
// HostPortSnapshotItem is an alias for HostPortItem used in snapshot operations
type HostPortSnapshotItem = HostPortItem
type BulkUpsertHostPortSnapshotsRequest struct {
TargetID int `json:"targetId" binding:"required"`
HostPorts []HostPortSnapshotItem `json:"hostPorts" binding:"required,min=1,max=5000,dive"`
}
type BulkUpsertHostPortSnapshotsResponse struct {
SnapshotCount int `json:"snapshotCount"`
AssetCount int `json:"assetCount"`
}
type HostPortSnapshotListQuery struct {
PaginationQuery
Filter string `form:"filter"`
}
type HostPortSnapshotResponse struct {
ID int `json:"id"`
ScanID int `json:"scanId"`
Host string `json:"host"`
IP string `json:"ip"`
Port int `json:"port"`
CreatedAt time.Time `json:"createdAt"`
}

View File

@@ -1,44 +0,0 @@
package dto
import "time"
// IPAddressListQuery represents IP address list query parameters
type IPAddressListQuery struct {
PaginationQuery
Filter string `form:"filter"`
}
// IPAddressResponse represents aggregated IP address response (grouped by IP)
type IPAddressResponse struct {
IP string `json:"ip"`
Hosts []string `json:"hosts"`
Ports []int `json:"ports"`
CreatedAt time.Time `json:"createdAt"`
}
// IPAddressItem represents a single IP address mapping for bulk operations
type IPAddressItem struct {
Host string `json:"host" binding:"required"`
IP string `json:"ip" binding:"required,ip"`
Port int `json:"port" binding:"required,min=1,max=65535"`
}
// BulkUpsertIPAddressesRequest represents bulk upsert request (for scanner import)
type BulkUpsertIPAddressesRequest struct {
Mappings []IPAddressItem `json:"mappings" binding:"required,min=1,max=5000,dive"`
}
// BulkUpsertIPAddressesResponse represents bulk upsert response
type BulkUpsertIPAddressesResponse struct {
UpsertedCount int `json:"upsertedCount"`
}
// BulkDeleteIPAddressesRequest represents bulk delete request (by IP list)
type BulkDeleteIPAddressesRequest struct {
IPs []string `json:"ips" binding:"required,min=1"`
}
// BulkDeleteIPAddressesResponse represents bulk delete response
type BulkDeleteIPAddressesResponse struct {
DeletedCount int64 `json:"deletedCount"`
}

View File

@@ -2,21 +2,8 @@ package dto
import "time"
// WebsiteSnapshotItem represents a single website snapshot data for bulk upsert
type WebsiteSnapshotItem struct {
URL string `json:"url" binding:"required,url"`
Host string `json:"host"`
Title string `json:"title"`
StatusCode *int `json:"statusCode"`
ContentLength *int64 `json:"contentLength"`
Location string `json:"location"`
Webserver string `json:"webserver"`
ContentType string `json:"contentType"`
Tech []string `json:"tech"`
ResponseBody string `json:"responseBody"`
Vhost *bool `json:"vhost"`
ResponseHeaders string `json:"responseHeaders"`
}
// WebsiteSnapshotItem is an alias for WebsiteUpsertItem used in snapshot operations
type WebsiteSnapshotItem = WebsiteUpsertItem
// BulkUpsertWebsiteSnapshotsRequest represents bulk upsert website snapshots request
type BulkUpsertWebsiteSnapshotsRequest struct {
@@ -44,7 +31,7 @@ type WebsiteSnapshotResponse struct {
Host string `json:"host"`
Title string `json:"title"`
StatusCode *int `json:"statusCode"`
ContentLength *int64 `json:"contentLength"`
ContentLength *int `json:"contentLength"`
Location string `json:"location"`
Webserver string `json:"webserver"`
ContentType string `json:"contentType"`

View File

@@ -148,12 +148,12 @@ func (h *DirectoryHandler) Export(c *gin.Context) {
contentLength := ""
if directory.ContentLength != nil {
contentLength = strconv.FormatInt(*directory.ContentLength, 10)
contentLength = strconv.Itoa(*directory.ContentLength)
}
duration := ""
if directory.Duration != nil {
duration = strconv.FormatInt(*directory.Duration, 10)
duration = strconv.Itoa(*directory.Duration)
}
return []string{

View File

@@ -112,7 +112,7 @@ func (h *DirectorySnapshotHandler) Export(c *gin.Context) {
headers := []string{
"id", "scan_id", "url", "status", "content_length",
"words", "lines", "content_type", "duration", "created_at",
"content_type", "duration", "created_at",
}
filename := fmt.Sprintf("scan-%d-directories.csv", scanID)
@@ -129,22 +129,12 @@ func (h *DirectorySnapshotHandler) Export(c *gin.Context) {
contentLength := ""
if snapshot.ContentLength != nil {
contentLength = strconv.FormatInt(*snapshot.ContentLength, 10)
}
words := ""
if snapshot.Words != nil {
words = strconv.Itoa(*snapshot.Words)
}
lines := ""
if snapshot.Lines != nil {
lines = strconv.Itoa(*snapshot.Lines)
contentLength = strconv.Itoa(*snapshot.ContentLength)
}
duration := ""
if snapshot.Duration != nil {
duration = strconv.FormatInt(*snapshot.Duration, 10)
duration = strconv.Itoa(*snapshot.Duration)
}
return []string{
@@ -153,8 +143,6 @@ func (h *DirectorySnapshotHandler) Export(c *gin.Context) {
snapshot.URL,
status,
contentLength,
words,
lines,
snapshot.ContentType,
duration,
snapshot.CreatedAt.Format("2006-01-02 15:04:05"),
@@ -173,8 +161,6 @@ func toDirectorySnapshotResponse(s *model.DirectorySnapshot) dto.DirectorySnapsh
URL: s.URL,
Status: s.Status,
ContentLength: s.ContentLength,
Words: s.Words,
Lines: s.Lines,
ContentType: s.ContentType,
Duration: s.Duration,
CreatedAt: s.CreatedAt,

View File

@@ -13,26 +13,26 @@ import (
"github.com/xingrin/go-backend/internal/service"
)
// IPAddressHandler handles IP address endpoints
type IPAddressHandler struct {
svc *service.IPAddressService
// HostPortHandler handles host-port endpoints
type HostPortHandler struct {
svc *service.HostPortService
}
// NewIPAddressHandler creates a new IP address handler
func NewIPAddressHandler(svc *service.IPAddressService) *IPAddressHandler {
return &IPAddressHandler{svc: svc}
// NewHostPortHandler creates a new host-port handler
func NewHostPortHandler(svc *service.HostPortService) *HostPortHandler {
return &HostPortHandler{svc: svc}
}
// List returns paginated IP addresses aggregated by IP
// GET /api/targets/:id/ip-addresses
func (h *IPAddressHandler) List(c *gin.Context) {
// List returns paginated host-ports aggregated by IP
// GET /api/targets/:id/host-ports
func (h *HostPortHandler) List(c *gin.Context) {
targetID, err := strconv.Atoi(c.Param("id"))
if err != nil {
dto.BadRequest(c, "Invalid target ID")
return
}
var query dto.IPAddressListQuery
var query dto.HostPortListQuery
if !dto.BindQuery(c, &query) {
return
}
@@ -43,7 +43,7 @@ func (h *IPAddressHandler) List(c *gin.Context) {
dto.NotFound(c, "Target not found")
return
}
dto.InternalError(c, "Failed to list IP addresses")
dto.InternalError(c, "Failed to list host-ports")
return
}
@@ -60,10 +60,10 @@ func (h *IPAddressHandler) List(c *gin.Context) {
dto.Paginated(c, results, total, query.GetPage(), query.GetPageSize())
}
// Export exports IP addresses as CSV (raw format)
// GET /api/targets/:id/ip-addresses/export
// Export exports host-ports as CSV (raw format)
// GET /api/targets/:id/host-ports/export
// Query params: ips (optional, comma-separated IP list for filtering)
func (h *IPAddressHandler) Export(c *gin.Context) {
func (h *HostPortHandler) Export(c *gin.Context) {
targetID, err := strconv.Atoi(c.Param("id"))
if err != nil {
dto.BadRequest(c, "Invalid target ID")
@@ -91,7 +91,7 @@ func (h *IPAddressHandler) Export(c *gin.Context) {
dto.NotFound(c, "Target not found")
return
}
dto.InternalError(c, "Failed to export IP addresses")
dto.InternalError(c, "Failed to export host-ports")
return
}
rows, err = h.svc.StreamByTarget(targetID)
@@ -102,12 +102,12 @@ func (h *IPAddressHandler) Export(c *gin.Context) {
dto.NotFound(c, "Target not found")
return
}
dto.InternalError(c, "Failed to export IP addresses")
dto.InternalError(c, "Failed to export host-ports")
return
}
headers := []string{"ip", "host", "port", "created_at"}
filename := fmt.Sprintf("target-%d-ip-addresses.csv", targetID)
filename := fmt.Sprintf("target-%d-host-ports.csv", targetID)
mapper := func(rows *sql.Rows) ([]string, error) {
mapping, err := h.svc.ScanRow(rows)
@@ -128,16 +128,16 @@ func (h *IPAddressHandler) Export(c *gin.Context) {
}
}
// BulkUpsert creates multiple IP address mappings (ignores duplicates)
// POST /api/targets/:id/ip-addresses/bulk-upsert
func (h *IPAddressHandler) BulkUpsert(c *gin.Context) {
// BulkUpsert creates multiple host-port mappings (ignores duplicates)
// POST /api/targets/:id/host-ports/bulk-upsert
func (h *HostPortHandler) BulkUpsert(c *gin.Context) {
targetID, err := strconv.Atoi(c.Param("id"))
if err != nil {
dto.BadRequest(c, "Invalid target ID")
return
}
var req dto.BulkUpsertIPAddressesRequest
var req dto.BulkUpsertHostPortsRequest
if !dto.BindJSON(c, &req) {
return
}
@@ -148,30 +148,30 @@ func (h *IPAddressHandler) BulkUpsert(c *gin.Context) {
dto.NotFound(c, "Target not found")
return
}
dto.InternalError(c, "Failed to upsert IP addresses")
dto.InternalError(c, "Failed to upsert host-ports")
return
}
dto.Success(c, dto.BulkUpsertIPAddressesResponse{
dto.Success(c, dto.BulkUpsertHostPortsResponse{
UpsertedCount: int(upsertedCount),
})
}
// BulkDelete deletes IP address mappings by IP list
// POST /api/ip-addresses/bulk-delete
func (h *IPAddressHandler) BulkDelete(c *gin.Context) {
var req dto.BulkDeleteIPAddressesRequest
// BulkDelete deletes host-port mappings by IP list
// POST /api/host-ports/bulk-delete
func (h *HostPortHandler) BulkDelete(c *gin.Context) {
var req dto.BulkDeleteHostPortsRequest
if !dto.BindJSON(c, &req) {
return
}
deletedCount, err := h.svc.BulkDeleteByIPs(req.IPs)
if err != nil {
dto.InternalError(c, "Failed to delete IP addresses")
dto.InternalError(c, "Failed to delete host-ports")
return
}
dto.Success(c, dto.BulkDeleteIPAddressesResponse{
dto.Success(c, dto.BulkDeleteHostPortsResponse{
DeletedCount: deletedCount,
})
}

View File

@@ -0,0 +1,147 @@
package handler
import (
"database/sql"
"errors"
"fmt"
"strconv"
"github.com/gin-gonic/gin"
"github.com/xingrin/go-backend/internal/dto"
"github.com/xingrin/go-backend/internal/model"
"github.com/xingrin/go-backend/internal/pkg/csv"
"github.com/xingrin/go-backend/internal/service"
)
// HostPortSnapshotHandler handles host-port snapshot endpoints
type HostPortSnapshotHandler struct {
svc *service.HostPortSnapshotService
}
// NewHostPortSnapshotHandler creates a new host-port snapshot handler
func NewHostPortSnapshotHandler(svc *service.HostPortSnapshotService) *HostPortSnapshotHandler {
return &HostPortSnapshotHandler{svc: svc}
}
// BulkUpsert creates host-port snapshots and syncs to asset table
// POST /api/scans/:id/host-ports/bulk-upsert
func (h *HostPortSnapshotHandler) BulkUpsert(c *gin.Context) {
scanID, err := strconv.Atoi(c.Param("id"))
if err != nil {
dto.BadRequest(c, "Invalid scan ID")
return
}
var req dto.BulkUpsertHostPortSnapshotsRequest
if !dto.BindJSON(c, &req) {
return
}
snapshotCount, assetCount, err := h.svc.SaveAndSync(scanID, req.TargetID, req.HostPorts)
if err != nil {
if errors.Is(err, service.ErrScanNotFoundForSnapshot) {
dto.NotFound(c, "Scan not found")
return
}
dto.InternalError(c, "Failed to save snapshots")
return
}
dto.Success(c, dto.BulkUpsertHostPortSnapshotsResponse{
SnapshotCount: int(snapshotCount),
AssetCount: int(assetCount),
})
}
// List returns paginated host-port snapshots for a scan
// GET /api/scans/:id/host-ports
func (h *HostPortSnapshotHandler) List(c *gin.Context) {
scanID, err := strconv.Atoi(c.Param("id"))
if err != nil {
dto.BadRequest(c, "Invalid scan ID")
return
}
var query dto.HostPortSnapshotListQuery
if !dto.BindQuery(c, &query) {
return
}
snapshots, total, err := h.svc.ListByScan(scanID, &query)
if err != nil {
if errors.Is(err, service.ErrScanNotFoundForSnapshot) {
dto.NotFound(c, "Scan not found")
return
}
dto.InternalError(c, "Failed to list snapshots")
return
}
var resp []dto.HostPortSnapshotResponse
for _, s := range snapshots {
resp = append(resp, toHostPortSnapshotResponse(&s))
}
dto.Paginated(c, resp, total, query.GetPage(), query.GetPageSize())
}
// Export exports host-port snapshots as CSV
// GET /api/scans/:id/host-ports/export
func (h *HostPortSnapshotHandler) Export(c *gin.Context) {
scanID, err := strconv.Atoi(c.Param("id"))
if err != nil {
dto.BadRequest(c, "Invalid scan ID")
return
}
count, err := h.svc.CountByScan(scanID)
if err != nil {
if errors.Is(err, service.ErrScanNotFoundForSnapshot) {
dto.NotFound(c, "Scan not found")
return
}
dto.InternalError(c, "Failed to export snapshots")
return
}
rows, err := h.svc.StreamByScan(scanID)
if err != nil {
dto.InternalError(c, "Failed to export snapshots")
return
}
headers := []string{"id", "scan_id", "host", "ip", "port", "created_at"}
filename := fmt.Sprintf("scan-%d-host-ports.csv", scanID)
mapper := func(rows *sql.Rows) ([]string, error) {
snapshot, err := h.svc.ScanRow(rows)
if err != nil {
return nil, err
}
return []string{
strconv.Itoa(snapshot.ID),
strconv.Itoa(snapshot.ScanID),
snapshot.Host,
snapshot.IP,
strconv.Itoa(snapshot.Port),
snapshot.CreatedAt.Format("2006-01-02 15:04:05"),
}, nil
}
if err := csv.StreamCSV(c, rows, headers, filename, mapper, count); err != nil {
return
}
}
// toHostPortSnapshotResponse converts model to response DTO
func toHostPortSnapshotResponse(s *model.HostPortSnapshot) dto.HostPortSnapshotResponse {
return dto.HostPortSnapshotResponse{
ID: s.ID,
ScanID: s.ScanID,
Host: s.Host,
IP: s.IP,
Port: s.Port,
CreatedAt: s.CreatedAt,
}
}

View File

@@ -134,7 +134,7 @@ func (h *WebsiteSnapshotHandler) Export(c *gin.Context) {
contentLength := ""
if snapshot.ContentLength != nil {
contentLength = strconv.FormatInt(*snapshot.ContentLength, 10)
contentLength = strconv.Itoa(*snapshot.ContentLength)
}
vhost := ""

View File

@@ -235,19 +235,7 @@ func TestListHandler(t *testing.T) {
}
},
},
{
name: "list with ordering",
scanID: "1",
queryParams: "?ordering=-url",
mockFunc: func(scanID int, query *dto.WebsiteSnapshotListQuery) ([]model.WebsiteSnapshot, int64, error) {
if query.Ordering != "-url" {
t.Errorf("expected ordering '-url', got %q", query.Ordering)
}
return mockSnapshots, 3, nil
},
expectedStatus: http.StatusOK,
checkResponse: nil,
},
{
name: "scan not found",
scanID: "999",
@@ -373,8 +361,8 @@ func TestFilterProperties(t *testing.T) {
router := gin.New()
router.GET("/api/scans/:scan_id/websites", func(c *gin.Context) {
var query dto.WebsiteSnapshotListQuery
c.ShouldBindQuery(&query)
mockSvc.ListByScan(1, &query)
_ = c.ShouldBindQuery(&query)
_, _, _ = mockSvc.ListByScan(1, &query)
dto.Paginated(c, []dto.WebsiteSnapshotResponse{}, 0, 1, 20)
})

View File

@@ -38,7 +38,9 @@ func (h *WordlistHandler) Create(c *gin.Context) {
dto.InternalError(c, "Failed to read uploaded file")
return
}
defer src.Close()
defer func() {
_ = src.Close() // Ignore close error in defer
}()
wordlist, err := h.svc.Create(name, description, file.Filename, src)
if err != nil {

View File

@@ -10,9 +10,9 @@ type Directory struct {
TargetID int `gorm:"column:target_id;not null;index:idx_directory_target;uniqueIndex:unique_directory_url_target,priority:1" json:"targetId"`
URL string `gorm:"column:url;size:2000;not null;index:idx_directory_url;uniqueIndex:unique_directory_url_target,priority:2" json:"url"`
Status *int `gorm:"column:status;index:idx_directory_status" json:"status"`
ContentLength *int64 `gorm:"column:content_length" json:"contentLength"`
ContentLength *int `gorm:"column:content_length" json:"contentLength"`
ContentType string `gorm:"column:content_type;size:200" json:"contentType"`
Duration *int64 `gorm:"column:duration" json:"duration"`
Duration *int `gorm:"column:duration" json:"duration"`
CreatedAt time.Time `gorm:"column:created_at;autoCreateTime;index:idx_directory_created_at" json:"createdAt"`
// Relationships

View File

@@ -10,11 +10,9 @@ type DirectorySnapshot struct {
ScanID int `gorm:"column:scan_id;not null;index:idx_directory_snap_scan;uniqueIndex:unique_directory_per_scan_snapshot,priority:1" json:"scanId"`
URL string `gorm:"column:url;size:2000;index:idx_directory_snap_url;uniqueIndex:unique_directory_per_scan_snapshot,priority:2" json:"url"`
Status *int `gorm:"column:status;index:idx_directory_snap_status" json:"status"`
ContentLength *int64 `gorm:"column:content_length" json:"contentLength"`
Words *int `gorm:"column:words" json:"words"`
Lines *int `gorm:"column:lines" json:"lines"`
ContentLength *int `gorm:"column:content_length" json:"contentLength"`
ContentType string `gorm:"column:content_type;size:200;index:idx_directory_snap_content_type" json:"contentType"`
Duration *int64 `gorm:"column:duration" json:"duration"`
Duration *int `gorm:"column:duration" json:"duration"`
CreatedAt time.Time `gorm:"column:created_at;autoCreateTime;index:idx_directory_snap_created_at" json:"createdAt"`
// Relationships

View File

@@ -4,8 +4,8 @@ import (
"time"
)
// HostPortMapping represents a host-port mapping
type HostPortMapping struct {
// HostPort represents a host-port mapping
type HostPort struct {
ID int `gorm:"primaryKey;autoIncrement" json:"id"`
TargetID int `gorm:"column:target_id;not null;index:idx_hpm_target;uniqueIndex:unique_target_host_ip_port,priority:1" json:"targetId"`
Host string `gorm:"column:host;size:1000;not null;index:idx_hpm_host;uniqueIndex:unique_target_host_ip_port,priority:2" json:"host"`
@@ -17,7 +17,7 @@ type HostPortMapping struct {
Target *Target `gorm:"foreignKey:TargetID" json:"target,omitempty"`
}
// TableName returns the table name for HostPortMapping
func (HostPortMapping) TableName() string {
// TableName returns the table name for HostPort
func (HostPort) TableName() string {
return "host_port_mapping"
}

View File

@@ -4,8 +4,8 @@ import (
"time"
)
// HostPortMappingSnapshot represents a host-port mapping snapshot
type HostPortMappingSnapshot struct {
// HostPortSnapshot represents a host-port snapshot
type HostPortSnapshot struct {
ID int `gorm:"primaryKey;autoIncrement" json:"id"`
ScanID int `gorm:"column:scan_id;not null;index:idx_hpm_snap_scan;uniqueIndex:unique_scan_host_ip_port_snapshot,priority:1" json:"scanId"`
Host string `gorm:"column:host;size:1000;not null;index:idx_hpm_snap_host;uniqueIndex:unique_scan_host_ip_port_snapshot,priority:2" json:"host"`
@@ -17,7 +17,7 @@ type HostPortMappingSnapshot struct {
Scan *Scan `gorm:"foreignKey:ScanID" json:"scan,omitempty"`
}
// TableName returns the table name for HostPortMappingSnapshot
func (HostPortMappingSnapshot) TableName() string {
// TableName returns the table name for HostPortSnapshot
func (HostPortSnapshot) TableName() string {
return "host_port_mapping_snapshot"
}

View File

@@ -33,7 +33,7 @@ func TestTableNames(t *testing.T) {
// Asset models
{Endpoint{}, "endpoint"},
{Directory{}, "directory"},
{HostPortMapping{}, "host_port_mapping"},
{HostPort{}, "host_port_mapping"},
{Vulnerability{}, "vulnerability"},
{Screenshot{}, "screenshot"},
// Snapshot models
@@ -41,7 +41,7 @@ func TestTableNames(t *testing.T) {
{WebsiteSnapshot{}, "website_snapshot"},
{EndpointSnapshot{}, "endpoint_snapshot"},
{DirectorySnapshot{}, "directory_snapshot"},
{HostPortMappingSnapshot{}, "host_port_mapping_snapshot"},
{HostPortSnapshot{}, "host_port_mapping_snapshot"},
{VulnerabilitySnapshot{}, "vulnerability_snapshot"},
{ScreenshotSnapshot{}, "screenshot_snapshot"},
// Scan-related models

View File

@@ -14,7 +14,7 @@ type WebsiteSnapshot struct {
Host string `gorm:"column:host;size:253;index:idx_website_snap_host" json:"host"`
Title string `gorm:"column:title;type:text;index:idx_website_snap_title" json:"title"`
StatusCode *int `gorm:"column:status_code" json:"statusCode"`
ContentLength *int64 `gorm:"column:content_length" json:"contentLength"`
ContentLength *int `gorm:"column:content_length" json:"contentLength"`
Location string `gorm:"column:location;type:text" json:"location"`
Webserver string `gorm:"column:webserver;type:text" json:"webserver"`
ContentType string `gorm:"column:content_type;type:text" json:"contentType"`

View File

@@ -11,18 +11,18 @@ import (
"gorm.io/gorm/clause"
)
// IPAddressRepository handles IP address (host_port_mapping) database operations
type IPAddressRepository struct {
// HostPortRepository handles host-port mapping (host_port_mapping) database operations
type HostPortRepository struct {
db *gorm.DB
}
// NewIPAddressRepository creates a new IP address repository
func NewIPAddressRepository(db *gorm.DB) *IPAddressRepository {
return &IPAddressRepository{db: db}
// NewHostPortRepository creates a new host-port repository
func NewHostPortRepository(db *gorm.DB) *HostPortRepository {
return &HostPortRepository{db: db}
}
// IPAddressFilterMapping defines field mapping for filtering
var IPAddressFilterMapping = scope.FilterMapping{
// HostPortFilterMapping defines field mapping for filtering
var HostPortFilterMapping = scope.FilterMapping{
"host": {Column: "host"},
"ip": {Column: "ip", NeedsCast: true},
"port": {Column: "port", IsNumeric: true},
@@ -35,12 +35,12 @@ type IPAggregationRow struct {
}
// GetIPAggregation returns IPs with their earliest created_at, ordered by created_at DESC
func (r *IPAddressRepository) GetIPAggregation(targetID int, filter string) ([]IPAggregationRow, int64, error) {
func (r *HostPortRepository) GetIPAggregation(targetID int, filter string) ([]IPAggregationRow, int64, error) {
// Build base query
baseQuery := r.db.Model(&model.HostPortMapping{}).Where("target_id = ?", targetID)
baseQuery := r.db.Model(&model.HostPort{}).Where("target_id = ?", targetID)
// Apply filter
baseQuery = baseQuery.Scopes(scope.WithFilter(filter, IPAddressFilterMapping))
baseQuery = baseQuery.Scopes(scope.WithFilter(filter, HostPortFilterMapping))
// Get distinct IPs with MIN(created_at)
var results []IPAggregationRow
@@ -57,12 +57,12 @@ func (r *IPAddressRepository) GetIPAggregation(targetID int, filter string) ([]I
}
// GetHostsAndPortsByIP returns hosts and ports for a specific IP
func (r *IPAddressRepository) GetHostsAndPortsByIP(targetID int, ip string, filter string) ([]string, []int, error) {
baseQuery := r.db.Model(&model.HostPortMapping{}).
func (r *HostPortRepository) GetHostsAndPortsByIP(targetID int, ip string, filter string) ([]string, []int, error) {
baseQuery := r.db.Model(&model.HostPort{}).
Where("target_id = ? AND ip = ?", targetID, ip)
// Apply filter
baseQuery = baseQuery.Scopes(scope.WithFilter(filter, IPAddressFilterMapping))
baseQuery = baseQuery.Scopes(scope.WithFilter(filter, HostPortFilterMapping))
// Get distinct host and port combinations
var mappings []struct {
@@ -101,34 +101,34 @@ func (r *IPAddressRepository) GetHostsAndPortsByIP(targetID int, ip string, filt
}
// StreamByTargetID returns a sql.Rows cursor for streaming export (raw format)
func (r *IPAddressRepository) StreamByTargetID(targetID int) (*sql.Rows, error) {
return r.db.Model(&model.HostPortMapping{}).
func (r *HostPortRepository) StreamByTargetID(targetID int) (*sql.Rows, error) {
return r.db.Model(&model.HostPort{}).
Where("target_id = ?", targetID).
Order("ip, host, port").
Rows()
}
// StreamByTargetIDAndIPs returns a sql.Rows cursor for streaming export filtered by IPs
func (r *IPAddressRepository) StreamByTargetIDAndIPs(targetID int, ips []string) (*sql.Rows, error) {
return r.db.Model(&model.HostPortMapping{}).
func (r *HostPortRepository) StreamByTargetIDAndIPs(targetID int, ips []string) (*sql.Rows, error) {
return r.db.Model(&model.HostPort{}).
Where("target_id = ? AND ip IN ?", targetID, ips).
Order("ip, host, port").
Rows()
}
// CountByTargetID returns the count of unique IPs for a target
func (r *IPAddressRepository) CountByTargetID(targetID int) (int64, error) {
func (r *HostPortRepository) CountByTargetID(targetID int) (int64, error) {
var count int64
err := r.db.Model(&model.HostPortMapping{}).
err := r.db.Model(&model.HostPort{}).
Where("target_id = ?", targetID).
Distinct("ip").
Count(&count).Error
return count, err
}
// ScanRow scans a single row into HostPortMapping model
func (r *IPAddressRepository) ScanRow(rows *sql.Rows) (*model.HostPortMapping, error) {
var mapping model.HostPortMapping
// ScanRow scans a single row into HostPort model
func (r *HostPortRepository) ScanRow(rows *sql.Rows) (*model.HostPort, error) {
var mapping model.HostPort
if err := r.db.ScanRows(rows, &mapping); err != nil {
return nil, err
}
@@ -136,7 +136,7 @@ func (r *IPAddressRepository) ScanRow(rows *sql.Rows) (*model.HostPortMapping, e
}
// BulkUpsert creates multiple mappings, ignoring duplicates (ON CONFLICT DO NOTHING)
func (r *IPAddressRepository) BulkUpsert(mappings []model.HostPortMapping) (int64, error) {
func (r *HostPortRepository) BulkUpsert(mappings []model.HostPort) (int64, error) {
if len(mappings) == 0 {
return 0, nil
}
@@ -161,10 +161,10 @@ func (r *IPAddressRepository) BulkUpsert(mappings []model.HostPortMapping) (int6
}
// DeleteByIPs deletes all mappings for the given IPs
func (r *IPAddressRepository) DeleteByIPs(ips []string) (int64, error) {
func (r *HostPortRepository) DeleteByIPs(ips []string) (int64, error) {
if len(ips) == 0 {
return 0, nil
}
result := r.db.Where("ip IN ?", ips).Delete(&model.HostPortMapping{})
result := r.db.Where("ip IN ?", ips).Delete(&model.HostPort{})
return result.RowsAffected, result.Error
}

View File

@@ -0,0 +1,99 @@
package repository
import (
"database/sql"
"github.com/xingrin/go-backend/internal/model"
"github.com/xingrin/go-backend/internal/pkg/scope"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// HostPortSnapshotRepository handles host-port mapping snapshot database operations
type HostPortSnapshotRepository struct {
db *gorm.DB
}
// NewHostPortSnapshotRepository creates a new host-port mapping snapshot repository
func NewHostPortSnapshotRepository(db *gorm.DB) *HostPortSnapshotRepository {
return &HostPortSnapshotRepository{db: db}
}
// HostPortSnapshotFilterMapping defines field mapping for host-port snapshot filtering
var HostPortSnapshotFilterMapping = scope.FilterMapping{
"host": {Column: "host"},
"ip": {Column: "ip"},
"port": {Column: "port"},
}
// BulkCreate creates multiple host-port snapshots, ignoring duplicates
func (r *HostPortSnapshotRepository) BulkCreate(snapshots []model.HostPortSnapshot) (int64, error) {
if len(snapshots) == 0 {
return 0, nil
}
var totalAffected int64
// Batch to avoid PostgreSQL parameter limit (65535)
// 5 fields per record: scan_id, host, ip, port, created_at
batchSize := 500
for i := 0; i < len(snapshots); i += batchSize {
end := i + batchSize
if end > len(snapshots) {
end = len(snapshots)
}
batch := snapshots[i:end]
result := r.db.Clauses(clause.OnConflict{DoNothing: true}).Create(&batch)
if result.Error != nil {
return totalAffected, result.Error
}
totalAffected += result.RowsAffected
}
return totalAffected, nil
}
// FindByScanID finds host-port snapshots by scan ID with pagination and filter
func (r *HostPortSnapshotRepository) FindByScanID(scanID int, page, pageSize int, filter string) ([]model.HostPortSnapshot, int64, error) {
var snapshots []model.HostPortSnapshot
var total int64
baseQuery := r.db.Model(&model.HostPortSnapshot{}).Where("scan_id = ?", scanID)
baseQuery = baseQuery.Scopes(scope.WithFilter(filter, HostPortSnapshotFilterMapping))
if err := baseQuery.Count(&total).Error; err != nil {
return nil, 0, err
}
err := baseQuery.Scopes(
scope.WithPagination(page, pageSize),
scope.OrderByCreatedAtDesc(),
).Find(&snapshots).Error
return snapshots, total, err
}
// StreamByScanID returns a sql.Rows cursor for streaming export
func (r *HostPortSnapshotRepository) StreamByScanID(scanID int) (*sql.Rows, error) {
return r.db.Model(&model.HostPortSnapshot{}).
Where("scan_id = ?", scanID).
Order("created_at DESC").
Rows()
}
// CountByScanID returns the count of host-port snapshots for a scan
func (r *HostPortSnapshotRepository) CountByScanID(scanID int) (int64, error) {
var count int64
err := r.db.Model(&model.HostPortSnapshot{}).Where("scan_id = ?", scanID).Count(&count).Error
return count, err
}
// ScanRow scans a single row into HostPortSnapshot model
func (r *HostPortSnapshotRepository) ScanRow(rows *sql.Rows) (*model.HostPortSnapshot, error) {
var snapshot model.HostPortSnapshot
if err := r.db.ScanRows(rows, &snapshot); err != nil {
return nil, err
}
return &snapshot, nil
}

View File

@@ -6,79 +6,6 @@ import (
"github.com/xingrin/go-backend/internal/model"
)
// TestApplyOrdering tests the ordering function
func TestApplyOrdering(t *testing.T) {
tests := []struct {
name string
ordering string
wantDesc bool
wantCol string
}{
{
name: "ascending url",
ordering: "url",
wantDesc: false,
wantCol: "url",
},
{
name: "descending url",
ordering: "-url",
wantDesc: true,
wantCol: "url",
},
{
name: "ascending createdAt",
ordering: "createdAt",
wantDesc: false,
wantCol: "created_at",
},
{
name: "descending createdAt",
ordering: "-createdAt",
wantDesc: true,
wantCol: "created_at",
},
{
name: "statusCode ascending",
ordering: "statusCode",
wantDesc: false,
wantCol: "status_code",
},
{
name: "statusCode descending",
ordering: "-statusCode",
wantDesc: true,
wantCol: "status_code",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
desc := false
field := tt.ordering
if len(tt.ordering) > 0 && tt.ordering[0] == '-' {
desc = true
field = tt.ordering[1:]
}
column, ok := orderingFieldMapping[field]
if !ok {
t.Errorf("field %s not found in orderingFieldMapping", field)
return
}
if desc != tt.wantDesc {
t.Errorf("desc = %v, want %v", desc, tt.wantDesc)
}
if column != tt.wantCol {
t.Errorf("column = %v, want %v", column, tt.wantCol)
}
})
}
}
// TestWebsiteSnapshotFilterMapping tests the filter mapping configuration
func TestWebsiteSnapshotFilterMapping(t *testing.T) {
expectedFields := []string{"url", "host", "title", "status", "webserver", "tech"}

View File

@@ -70,8 +70,6 @@ func (s *DirectorySnapshotService) SaveAndSync(scanID int, targetID int, items [
URL: item.URL,
Status: item.Status,
ContentLength: item.ContentLength,
Words: item.Words,
Lines: item.Lines,
ContentType: item.ContentType,
Duration: item.Duration,
})

View File

@@ -10,19 +10,19 @@ import (
"gorm.io/gorm"
)
// IPAddressService handles IP address business logic
type IPAddressService struct {
repo *repository.IPAddressRepository
// HostPortService handles host-port business logic
type HostPortService struct {
repo *repository.HostPortRepository
targetRepo *repository.TargetRepository
}
// NewIPAddressService creates a new IP address service
func NewIPAddressService(repo *repository.IPAddressRepository, targetRepo *repository.TargetRepository) *IPAddressService {
return &IPAddressService{repo: repo, targetRepo: targetRepo}
// NewHostPortService creates a new host-port service
func NewHostPortService(repo *repository.HostPortRepository, targetRepo *repository.TargetRepository) *HostPortService {
return &HostPortService{repo: repo, targetRepo: targetRepo}
}
// ListByTarget returns paginated IP addresses aggregated by IP
func (s *IPAddressService) ListByTarget(targetID int, query *dto.IPAddressListQuery) ([]dto.IPAddressResponse, int64, error) {
// ListByTarget returns paginated host-ports aggregated by IP
func (s *HostPortService) ListByTarget(targetID int, query *dto.HostPortListQuery) ([]dto.HostPortResponse, int64, error) {
// Get IP aggregation (all IPs with their earliest created_at)
ipRows, total, err := s.repo.GetIPAggregation(targetID, query.Filter)
if err != nil {
@@ -36,7 +36,7 @@ func (s *IPAddressService) ListByTarget(targetID int, query *dto.IPAddressListQu
end := start + pageSize
if start >= len(ipRows) {
return []dto.IPAddressResponse{}, total, nil
return []dto.HostPortResponse{}, total, nil
}
if end > len(ipRows) {
end = len(ipRows)
@@ -45,14 +45,14 @@ func (s *IPAddressService) ListByTarget(targetID int, query *dto.IPAddressListQu
pagedIPs := ipRows[start:end]
// For each IP, get its hosts and ports
results := make([]dto.IPAddressResponse, 0, len(pagedIPs))
results := make([]dto.HostPortResponse, 0, len(pagedIPs))
for _, row := range pagedIPs {
hosts, ports, err := s.repo.GetHostsAndPortsByIP(targetID, row.IP, query.Filter)
if err != nil {
return nil, 0, err
}
results = append(results, dto.IPAddressResponse{
results = append(results, dto.HostPortResponse{
IP: row.IP,
Hosts: hosts,
Ports: ports,
@@ -64,7 +64,7 @@ func (s *IPAddressService) ListByTarget(targetID int, query *dto.IPAddressListQu
}
// StreamByTarget returns a cursor for streaming export (raw format)
func (s *IPAddressService) StreamByTarget(targetID int) (*sql.Rows, error) {
func (s *HostPortService) StreamByTarget(targetID int) (*sql.Rows, error) {
_, err := s.targetRepo.FindByID(targetID)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
@@ -76,7 +76,7 @@ func (s *IPAddressService) StreamByTarget(targetID int) (*sql.Rows, error) {
}
// StreamByTargetAndIPs returns a cursor for streaming export filtered by IPs
func (s *IPAddressService) StreamByTargetAndIPs(targetID int, ips []string) (*sql.Rows, error) {
func (s *HostPortService) StreamByTargetAndIPs(targetID int, ips []string) (*sql.Rows, error) {
_, err := s.targetRepo.FindByID(targetID)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
@@ -88,7 +88,7 @@ func (s *IPAddressService) StreamByTargetAndIPs(targetID int, ips []string) (*sq
}
// CountByTarget returns the count of unique IPs for a target
func (s *IPAddressService) CountByTarget(targetID int) (int64, error) {
func (s *HostPortService) CountByTarget(targetID int) (int64, error) {
_, err := s.targetRepo.FindByID(targetID)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
@@ -99,13 +99,13 @@ func (s *IPAddressService) CountByTarget(targetID int) (int64, error) {
return s.repo.CountByTargetID(targetID)
}
// ScanRow scans a row into HostPortMapping model
func (s *IPAddressService) ScanRow(rows *sql.Rows) (*model.HostPortMapping, error) {
// ScanRow scans a row into HostPort model
func (s *HostPortService) ScanRow(rows *sql.Rows) (*model.HostPort, error) {
return s.repo.ScanRow(rows)
}
// BulkUpsert creates multiple mappings for a target (ignores duplicates)
func (s *IPAddressService) BulkUpsert(targetID int, items []dto.IPAddressItem) (int64, error) {
func (s *HostPortService) BulkUpsert(targetID int, items []dto.HostPortItem) (int64, error) {
_, err := s.targetRepo.FindByID(targetID)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
@@ -115,9 +115,9 @@ func (s *IPAddressService) BulkUpsert(targetID int, items []dto.IPAddressItem) (
}
// Convert DTOs to models
mappings := make([]model.HostPortMapping, 0, len(items))
mappings := make([]model.HostPort, 0, len(items))
for _, item := range items {
mappings = append(mappings, model.HostPortMapping{
mappings = append(mappings, model.HostPort{
TargetID: targetID,
Host: item.Host,
IP: item.IP,
@@ -133,7 +133,7 @@ func (s *IPAddressService) BulkUpsert(targetID int, items []dto.IPAddressItem) (
}
// BulkDeleteByIPs deletes all mappings for the given IPs
func (s *IPAddressService) BulkDeleteByIPs(ips []string) (int64, error) {
func (s *HostPortService) BulkDeleteByIPs(ips []string) (int64, error) {
if len(ips) == 0 {
return 0, nil
}

View File

@@ -0,0 +1,181 @@
package service
import (
"database/sql"
"errors"
"fmt"
"net"
"strings"
"github.com/xingrin/go-backend/internal/dto"
"github.com/xingrin/go-backend/internal/model"
"github.com/xingrin/go-backend/internal/pkg/validator"
"github.com/xingrin/go-backend/internal/repository"
"gorm.io/gorm"
)
// HostPortSnapshotService handles host-port snapshot business logic
type HostPortSnapshotService struct {
snapshotRepo *repository.HostPortSnapshotRepository
scanRepo *repository.ScanRepository
hostPortService *HostPortService
}
// NewHostPortSnapshotService creates a new host-port snapshot service
func NewHostPortSnapshotService(
snapshotRepo *repository.HostPortSnapshotRepository,
scanRepo *repository.ScanRepository,
hostPortService *HostPortService,
) *HostPortSnapshotService {
return &HostPortSnapshotService{
snapshotRepo: snapshotRepo,
scanRepo: scanRepo,
hostPortService: hostPortService,
}
}
// SaveAndSync saves host-port snapshots and syncs to asset table
// 1. Validates scan exists and is not soft-deleted
// 2. Validates host/ip match target (filters invalid items)
// 3. Saves to host_port_mapping_snapshot table
// 4. TODO: Sync to host_port_mapping table (when asset service is implemented)
func (s *HostPortSnapshotService) SaveAndSync(scanID int, targetID int, items []dto.HostPortSnapshotItem) (snapshotCount int64, assetCount int64, err error) {
if len(items) == 0 {
return 0, 0, nil
}
// Validate scan exists
scan, err := s.scanRepo.FindByID(scanID)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return 0, 0, ErrScanNotFoundForSnapshot
}
return 0, 0, err
}
if scan.TargetID != targetID {
return 0, 0, errors.New("target_id does not match scan's target")
}
// Get target for validation
target, err := s.scanRepo.GetTargetByScanID(scanID)
if err != nil {
return 0, 0, err
}
// Filter valid host-port mappings
snapshots := make([]model.HostPortSnapshot, 0, len(items))
for _, item := range items {
if isHostPortMatchTarget(item.Host, item.IP, target.Name, target.Type) {
snapshots = append(snapshots, model.HostPortSnapshot{
ScanID: scanID,
Host: item.Host,
IP: item.IP,
Port: item.Port,
})
}
}
if len(snapshots) == 0 {
return 0, 0, nil
}
// Save to snapshot table
snapshotCount, err = s.snapshotRepo.BulkCreate(snapshots)
if err != nil {
return 0, 0, fmt.Errorf("failed to bulk create snapshots: %w", err)
}
// Sync to asset table (HostPortSnapshotItem is an alias of HostPortItem, no conversion needed)
assetCount, err = s.hostPortService.BulkUpsert(targetID, items)
if err != nil {
return snapshotCount, 0, fmt.Errorf("failed to sync to asset table: %w", err)
}
return snapshotCount, assetCount, nil
}
// ListByScan returns paginated host-port snapshots for a scan
func (s *HostPortSnapshotService) ListByScan(scanID int, query *dto.HostPortSnapshotListQuery) ([]model.HostPortSnapshot, int64, error) {
_, err := s.scanRepo.FindByID(scanID)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, 0, ErrScanNotFoundForSnapshot
}
return nil, 0, err
}
return s.snapshotRepo.FindByScanID(scanID, query.GetPage(), query.GetPageSize(), query.Filter)
}
// StreamByScan returns a cursor for streaming export
func (s *HostPortSnapshotService) StreamByScan(scanID int) (*sql.Rows, error) {
_, err := s.scanRepo.FindByID(scanID)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, ErrScanNotFoundForSnapshot
}
return nil, err
}
return s.snapshotRepo.StreamByScanID(scanID)
}
// CountByScan returns the count of host-port snapshots for a scan
func (s *HostPortSnapshotService) CountByScan(scanID int) (int64, error) {
_, err := s.scanRepo.FindByID(scanID)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return 0, ErrScanNotFoundForSnapshot
}
return 0, err
}
return s.snapshotRepo.CountByScanID(scanID)
}
// ScanRow scans a row into HostPortSnapshot model
func (s *HostPortSnapshotService) ScanRow(rows *sql.Rows) (*model.HostPortSnapshot, error) {
return s.snapshotRepo.ScanRow(rows)
}
// isHostPortMatchTarget checks if host/ip belongs to target
// Matching rules by target type:
// - domain: host equals target or ends with .target
// - ip: ip must exactly equal target
// - cidr: ip must be within the CIDR range
func isHostPortMatchTarget(host, ip, targetName, targetType string) bool {
host = strings.ToLower(strings.TrimSpace(host))
ip = strings.TrimSpace(ip)
targetName = strings.ToLower(strings.TrimSpace(targetName))
if host == "" || ip == "" || targetName == "" {
return false
}
switch targetType {
case validator.TargetTypeDomain:
// Check if host matches target domain
return host == targetName || strings.HasSuffix(host, "."+targetName)
case validator.TargetTypeIP:
// IP must exactly match target
return ip == targetName
case validator.TargetTypeCIDR:
// IP must be within CIDR range
ipAddr := net.ParseIP(ip)
if ipAddr == nil {
return false
}
_, network, err := net.ParseCIDR(targetName)
if err != nil {
return false
}
return network.Contains(ipAddr)
default:
return false
}
}

View File

@@ -109,27 +109,8 @@ func (s *WebsiteSnapshotService) SaveAndSync(scanID int, targetID int, items []d
return 0, 0, err
}
// Step 5: Sync to asset table via WebsiteService
// Note: WebsiteService.BulkUpsert also validates, but we already filtered
assetItems := make([]dto.WebsiteUpsertItem, 0, len(validItems))
for _, item := range validItems {
assetItems = append(assetItems, dto.WebsiteUpsertItem{
URL: item.URL,
Host: item.Host,
Title: item.Title,
StatusCode: item.StatusCode,
ContentLength: intPtrToIntPtr(item.ContentLength),
Location: item.Location,
Webserver: item.Webserver,
ContentType: item.ContentType,
Tech: item.Tech,
ResponseBody: item.ResponseBody,
Vhost: item.Vhost,
ResponseHeaders: item.ResponseHeaders,
})
}
assetCount, err = s.websiteService.BulkUpsert(targetID, assetItems)
// Step 5: Sync to asset table (WebsiteSnapshotItem is an alias of WebsiteUpsertItem, no conversion needed)
assetCount, err = s.websiteService.BulkUpsert(targetID, validItems)
if err != nil {
// Log error but don't fail - snapshot is already saved
// In production, consider using a transaction or compensation logic
@@ -139,15 +120,6 @@ func (s *WebsiteSnapshotService) SaveAndSync(scanID int, targetID int, items []d
return snapshotCount, assetCount, nil
}
// intPtrToIntPtr converts *int64 to *int
func intPtrToIntPtr(v *int64) *int {
if v == nil {
return nil
}
i := int(*v)
return &i
}
// ListByScan returns paginated website snapshots for a scan
func (s *WebsiteSnapshotService) ListByScan(scanID int, query *dto.WebsiteSnapshotListQuery) ([]model.WebsiteSnapshot, int64, error) {
// Validate scan exists

View File

@@ -34,7 +34,7 @@ func TestSaveAndSyncDataConsistency(t *testing.T) {
Host: "test.com",
Title: "Test Page",
StatusCode: intPtr(200),
ContentLength: int64Ptr(1024),
ContentLength: intPtr(1024),
Location: "https://test.com/redirect",
Webserver: "nginx",
ContentType: "text/html",
@@ -61,7 +61,7 @@ func TestSaveAndSyncDataConsistency(t *testing.T) {
Host: tt.snapshot.Host,
Title: tt.snapshot.Title,
StatusCode: tt.snapshot.StatusCode,
ContentLength: intPtrFromInt64Ptr(tt.snapshot.ContentLength),
ContentLength: tt.snapshot.ContentLength,
Location: tt.snapshot.Location,
Webserver: tt.snapshot.Webserver,
ContentType: tt.snapshot.ContentType,
@@ -102,22 +102,10 @@ func intPtr(v int) *int {
return &v
}
func int64Ptr(v int64) *int64 {
return &v
}
func boolPtr(v bool) *bool {
return &v
}
func intPtrFromInt64Ptr(v *int64) *int {
if v == nil {
return nil
}
i := int(*v)
return &i
}
func intPtrEqual(a, b *int) bool {
if a == nil && b == nil {
return true