diff --git a/frontend/components/ip-addresses/ip-addresses-columns.tsx b/frontend/components/ip-addresses/ip-addresses-columns.tsx index c784e104..962548b6 100644 --- a/frontend/components/ip-addresses/ip-addresses-columns.tsx +++ b/frontend/components/ip-addresses/ip-addresses-columns.tsx @@ -91,41 +91,8 @@ export function createIPAddressColumns({ ), cell: ({ getValue }) => { const hosts = getValue() - if (!hosts || hosts.length === 0) { - return - - } - - const displayHosts = hosts.slice(0, 3) - const hasMore = hosts.length > 3 - - return ( -
- {displayHosts.map((host, index) => ( - - ))} - {hasMore && ( - - - - +{hosts.length - 3} more - - - -
-

{t.tooltips.allHosts} ({hosts.length})

-
- {hosts.map((host, index) => ( - - {host} - - ))} -
-
-
-
- )} -
- ) + const value = hosts?.length ? hosts.join("\n") : null + return }, }, { diff --git a/frontend/components/ip-addresses/ip-addresses-view.tsx b/frontend/components/ip-addresses/ip-addresses-view.tsx index 415c7cf0..f249527a 100644 --- a/frontend/components/ip-addresses/ip-addresses-view.tsx +++ b/frontend/components/ip-addresses/ip-addresses-view.tsx @@ -200,22 +200,42 @@ export function IPAddressesView({ } // Handle download selected IP addresses - const handleDownloadSelected = () => { + const handleDownloadSelected = async () => { if (selectedIPAddresses.length === 0) { return } - const csvContent = generateCSV(selectedIPAddresses) - const blob = new Blob([csvContent], { type: "text/csv;charset=utf-8" }) - const url = URL.createObjectURL(blob) - const a = document.createElement("a") - const prefix = scanId ? `scan-${scanId}` : targetId ? `target-${targetId}` : "ip-addresses" - a.href = url - a.download = `${prefix}-ip-addresses-selected-${Date.now()}.csv` - document.body.appendChild(a) - a.click() - document.body.removeChild(a) - URL.revokeObjectURL(url) + try { + // Get selected IPs and call backend export API + const ips = selectedIPAddresses.map(ip => ip.ip) + let blob: Blob | null = null + + if (targetId) { + blob = await IPAddressService.exportIPAddressesByTargetId(targetId, ips) + } else if (scanId) { + // For scan, use frontend CSV generation as fallback (scan export doesn't support IP filter yet) + const csvContent = generateCSV(selectedIPAddresses) + blob = new Blob([csvContent], { type: "text/csv;charset=utf-8" }) + } else { + const csvContent = generateCSV(selectedIPAddresses) + blob = new Blob([csvContent], { type: "text/csv;charset=utf-8" }) + } + + if (!blob) return + + const url = URL.createObjectURL(blob) + const a = document.createElement("a") + const prefix = scanId ? `scan-${scanId}` : targetId ? `target-${targetId}` : "ip-addresses" + a.href = url + a.download = `${prefix}-ip-addresses-selected-${Date.now()}.csv` + document.body.appendChild(a) + a.click() + document.body.removeChild(a) + URL.revokeObjectURL(url) + } catch (error) { + console.error("Failed to download selected IP addresses", error) + toast.error(tToast("downloadFailed")) + } } // Handle bulk delete diff --git a/frontend/messages/zh.json b/frontend/messages/zh.json index 5ac0eaf3..72511e63 100644 --- a/frontend/messages/zh.json +++ b/frontend/messages/zh.json @@ -44,11 +44,11 @@ "createdAt": "创建时间" }, "ipAddress": { - "ipAddress": "IP Address", - "hosts": "Hosts", - "openPorts": "Open Ports", - "allHosts": "All Hosts", - "allOpenPorts": "All Open Ports" + "ipAddress": "IP 地址", + "hosts": "主机名", + "openPorts": "开放端口", + "allHosts": "所有主机", + "allOpenPorts": "所有开放端口" }, "endpoint": { "title": "标题", diff --git a/frontend/services/ip-address.service.ts b/frontend/services/ip-address.service.ts index 6b979dad..76d9b779 100644 --- a/frontend/services/ip-address.service.ts +++ b/frontend/services/ip-address.service.ts @@ -9,12 +9,12 @@ export interface BulkDeleteResponse { export class IPAddressService { /** * Bulk delete IP addresses - * POST /api/assets/ip-addresses/bulk-delete/ + * POST /api/ip-addresses/bulk-delete/ * Note: IP addresses are aggregated, so we pass IP strings instead of IDs */ static async bulkDelete(ips: string[]): Promise { const response = await api.post( - `/assets/ip-addresses/bulk-delete/`, + `/ip-addresses/bulk-delete/`, { ips } ) return response.data @@ -48,15 +48,20 @@ export class IPAddressService { return response.data } - /** Export all IP addresses by target (text file, one per line) */ - static async exportIPAddressesByTargetId(targetId: number): Promise { + /** Export all IP addresses by target (CSV format) */ + static async exportIPAddressesByTargetId(targetId: number, ips?: string[]): Promise { + const params: Record = {} + if (ips && ips.length > 0) { + params.ips = ips.join(',') + } const response = await api.get(`/targets/${targetId}/ip-addresses/export/`, { + params, responseType: 'blob', }) return response.data } - /** Export all IP addresses by scan task (text file, one per line) */ + /** Export all IP addresses by scan task (CSV format) */ static async exportIPAddressesByScanId(scanId: number): Promise { const response = await api.get(`/scans/${scanId}/ip-addresses/export/`, { responseType: 'blob', diff --git a/go-backend/cmd/seed/main.go b/go-backend/cmd/seed/main.go index aa558a5f..dd8a4c4f 100644 --- a/go-backend/cmd/seed/main.go +++ b/go-backend/cmd/seed/main.go @@ -108,12 +108,19 @@ func main() { os.Exit(1) } + // Create host port mappings for targets (20 per target) + if err := createHostPortMappings(db, targets, assetsPerTarget); err != nil { + fmt.Printf("❌ Failed to create host port mappings: %v\n", err) + os.Exit(1) + } + fmt.Println("\n✅ Test data generation completed!") } func clearData(db *gorm.DB) error { // Delete in order to respect foreign key constraints tables := []string{ + "host_port_mapping", "directory", "endpoint", "subdomain", @@ -668,3 +675,86 @@ func createDirectories(db *gorm.DB, targets []model.Target, directoriesPerTarget fmt.Printf(" ✓ Created %d directories\n", createdCount) return nil } + +func createHostPortMappings(db *gorm.DB, targets []model.Target, mappingsPerTarget int) error { + // Increase mappings to ensure pagination (100 per target = more IPs) + actualMappingsPerTarget := mappingsPerTarget * 5 // 20 * 5 = 100 mappings per target + totalCount := len(targets) * actualMappingsPerTarget + fmt.Printf("🔌 Creating %d host port mappings (%d per target)...\n", totalCount, actualMappingsPerTarget) + + if len(targets) == 0 { + return nil + } + + // Common ports + ports := []int{22, 80, 443, 8080, 8443, 3000, 3306, 5432, 6379, 27017, 9200, 9300, 5000, 8000, 8888, 9000, 9090, 10000, 11211, 25} + + // Subdomain prefixes for hosts + subdomains := []string{"www", "api", "app", "admin", "portal", "dashboard", "dev", "staging", "test", "cdn", "mail", "ftp", "db", "cache", "search", "auth", "login", "shop", "store", "blog"} + + createdCount := 0 + + for _, target := range targets { + // Generate base IP for this target + baseIP1 := rand.Intn(223) + 1 + baseIP2 := rand.Intn(256) + baseIP3 := rand.Intn(256) + + // Generate more unique IPs per target (10-15 IPs with 6-10 ports each) + numIPs := 10 + rand.Intn(6) // 10-15 unique IPs + portsPerIP := actualMappingsPerTarget / numIPs + + for ipIdx := 0; ipIdx < numIPs; ipIdx++ { + // Generate unique IP + ip := fmt.Sprintf("%d.%d.%d.%d", baseIP1, baseIP2, baseIP3, ipIdx+1) + + // Generate multiple hosts for this IP + numHosts := 3 + rand.Intn(4) // 3-6 hosts per IP + + for hostIdx := 0; hostIdx < numHosts; hostIdx++ { + var host string + + // Generate host based on target type + switch target.Type { + case "domain": + subdomain := subdomains[(ipIdx*numHosts+hostIdx)%len(subdomains)] + host = fmt.Sprintf("%s.%s", subdomain, target.Name) + case "ip": + host = target.Name + case "cidr": + baseIP := target.Name[:len(target.Name)-3] + host = baseIP + default: + continue + } + + // Generate multiple ports for this host-IP combination + numPorts := portsPerIP / numHosts + if numPorts < 1 { + numPorts = 1 + } + + for portIdx := 0; portIdx < numPorts; portIdx++ { + port := ports[(ipIdx*numHosts*numPorts+hostIdx*numPorts+portIdx)%len(ports)] + + mapping := &model.HostPortMapping{ + TargetID: target.ID, + Host: host, + IP: ip, + Port: port, + CreatedAt: time.Now().AddDate(0, 0, -(ipIdx*numHosts*numPorts + hostIdx*numPorts + portIdx)), + } + + if err := db.Create(mapping).Error; err != nil { + // Ignore duplicate key errors + continue + } + createdCount++ + } + } + } + } + + fmt.Printf(" ✓ Created %d host port mappings\n", createdCount) + return nil +} diff --git a/go-backend/cmd/server/main.go b/go-backend/cmd/server/main.go index 22f9117c..54c7a0bc 100644 --- a/go-backend/cmd/server/main.go +++ b/go-backend/cmd/server/main.go @@ -135,6 +135,7 @@ func main() { subdomainRepo := repository.NewSubdomainRepository(db) endpointRepo := repository.NewEndpointRepository(db) directoryRepo := repository.NewDirectoryRepository(db) + ipAddressRepo := repository.NewIPAddressRepository(db) // Create services userSvc := service.NewUserService(userRepo) @@ -145,6 +146,7 @@ func main() { subdomainSvc := service.NewSubdomainService(subdomainRepo, targetRepo) endpointSvc := service.NewEndpointService(endpointRepo, targetRepo) directorySvc := service.NewDirectoryService(directoryRepo, targetRepo) + ipAddressSvc := service.NewIPAddressService(ipAddressRepo, targetRepo) // Create handlers healthHandler := handler.NewHealthHandler(db, redisClient) @@ -157,6 +159,7 @@ func main() { subdomainHandler := handler.NewSubdomainHandler(subdomainSvc) endpointHandler := handler.NewEndpointHandler(endpointSvc) directoryHandler := handler.NewDirectoryHandler(directorySvc) + ipAddressHandler := handler.NewIPAddressHandler(ipAddressSvc) // Register health routes router.GET("/health", healthHandler.Check) @@ -244,6 +247,14 @@ func main() { // Directories (standalone) protected.POST("/directories/bulk-delete", directoryHandler.BulkDelete) + // IP Addresses (nested under targets) + protected.GET("/targets/:id/ip-addresses", ipAddressHandler.List) + protected.GET("/targets/:id/ip-addresses/export", ipAddressHandler.Export) + protected.POST("/targets/:id/ip-addresses/bulk-upsert", ipAddressHandler.BulkUpsert) + + // IP Addresses (standalone) + protected.POST("/ip-addresses/bulk-delete", ipAddressHandler.BulkDelete) + // Engines protected.POST("/engines", engineHandler.Create) protected.GET("/engines", engineHandler.List) diff --git a/go-backend/internal/dto/ip_address.go b/go-backend/internal/dto/ip_address.go new file mode 100644 index 00000000..17e765b4 --- /dev/null +++ b/go-backend/internal/dto/ip_address.go @@ -0,0 +1,44 @@ +package dto + +import "time" + +// IPAddressListQuery represents IP address list query parameters +type IPAddressListQuery struct { + PaginationQuery + Filter string `form:"filter"` +} + +// IPAddressResponse represents aggregated IP address response (grouped by IP) +type IPAddressResponse struct { + IP string `json:"ip"` + Hosts []string `json:"hosts"` + Ports []int `json:"ports"` + CreatedAt time.Time `json:"createdAt"` +} + +// IPAddressItem represents a single IP address mapping for bulk operations +type IPAddressItem struct { + Host string `json:"host" binding:"required"` + IP string `json:"ip" binding:"required,ip"` + Port int `json:"port" binding:"required,min=1,max=65535"` +} + +// BulkUpsertIPAddressesRequest represents bulk upsert request (for scanner import) +type BulkUpsertIPAddressesRequest struct { + Mappings []IPAddressItem `json:"mappings" binding:"required,min=1,max=5000,dive"` +} + +// BulkUpsertIPAddressesResponse represents bulk upsert response +type BulkUpsertIPAddressesResponse struct { + UpsertedCount int `json:"upsertedCount"` +} + +// BulkDeleteIPAddressesRequest represents bulk delete request (by IP list) +type BulkDeleteIPAddressesRequest struct { + IPs []string `json:"ips" binding:"required,min=1"` +} + +// BulkDeleteIPAddressesResponse represents bulk delete response +type BulkDeleteIPAddressesResponse struct { + DeletedCount int64 `json:"deletedCount"` +} diff --git a/go-backend/internal/handler/ip_address.go b/go-backend/internal/handler/ip_address.go new file mode 100644 index 00000000..833d0310 --- /dev/null +++ b/go-backend/internal/handler/ip_address.go @@ -0,0 +1,177 @@ +package handler + +import ( + "database/sql" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/gin-gonic/gin" + "github.com/xingrin/go-backend/internal/dto" + "github.com/xingrin/go-backend/internal/pkg/csv" + "github.com/xingrin/go-backend/internal/service" +) + +// IPAddressHandler handles IP address endpoints +type IPAddressHandler struct { + svc *service.IPAddressService +} + +// NewIPAddressHandler creates a new IP address handler +func NewIPAddressHandler(svc *service.IPAddressService) *IPAddressHandler { + return &IPAddressHandler{svc: svc} +} + +// List returns paginated IP addresses aggregated by IP +// GET /api/targets/:id/ip-addresses +func (h *IPAddressHandler) List(c *gin.Context) { + targetID, err := strconv.Atoi(c.Param("id")) + if err != nil { + dto.BadRequest(c, "Invalid target ID") + return + } + + var query dto.IPAddressListQuery + if !dto.BindQuery(c, &query) { + return + } + + results, total, err := h.svc.ListByTarget(targetID, &query) + if err != nil { + if errors.Is(err, service.ErrTargetNotFound) { + dto.NotFound(c, "Target not found") + return + } + dto.InternalError(c, "Failed to list IP addresses") + return + } + + // Ensure empty arrays instead of null + for i := range results { + if results[i].Hosts == nil { + results[i].Hosts = []string{} + } + if results[i].Ports == nil { + results[i].Ports = []int{} + } + } + + dto.Paginated(c, results, total, query.GetPage(), query.GetPageSize()) +} + +// Export exports IP addresses as CSV (raw format) +// GET /api/targets/:id/ip-addresses/export +// Query params: ips (optional, comma-separated IP list for filtering) +func (h *IPAddressHandler) Export(c *gin.Context) { + targetID, err := strconv.Atoi(c.Param("id")) + if err != nil { + dto.BadRequest(c, "Invalid target ID") + return + } + + // Parse optional IP filter + var ips []string + if ipsParam := c.Query("ips"); ipsParam != "" { + ips = strings.Split(ipsParam, ",") + } + + var rows *sql.Rows + var count int64 + + if len(ips) > 0 { + // Export selected IPs only + rows, err = h.svc.StreamByTargetAndIPs(targetID, ips) + count = 0 // Unknown count for filtered export + } else { + // Export all + count, err = h.svc.CountByTarget(targetID) + if err != nil { + if errors.Is(err, service.ErrTargetNotFound) { + dto.NotFound(c, "Target not found") + return + } + dto.InternalError(c, "Failed to export IP addresses") + return + } + rows, err = h.svc.StreamByTarget(targetID) + } + + if err != nil { + if errors.Is(err, service.ErrTargetNotFound) { + dto.NotFound(c, "Target not found") + return + } + dto.InternalError(c, "Failed to export IP addresses") + return + } + + headers := []string{"ip", "host", "port", "created_at"} + filename := fmt.Sprintf("target-%d-ip-addresses.csv", targetID) + + mapper := func(rows *sql.Rows) ([]string, error) { + mapping, err := h.svc.ScanRow(rows) + if err != nil { + return nil, err + } + + return []string{ + mapping.IP, + mapping.Host, + strconv.Itoa(mapping.Port), + mapping.CreatedAt.Format("2006-01-02 15:04:05"), + }, nil + } + + if err := csv.StreamCSV(c, rows, headers, filename, mapper, count); err != nil { + return + } +} + +// BulkUpsert creates multiple IP address mappings (ignores duplicates) +// POST /api/targets/:id/ip-addresses/bulk-upsert +func (h *IPAddressHandler) BulkUpsert(c *gin.Context) { + targetID, err := strconv.Atoi(c.Param("id")) + if err != nil { + dto.BadRequest(c, "Invalid target ID") + return + } + + var req dto.BulkUpsertIPAddressesRequest + if !dto.BindJSON(c, &req) { + return + } + + upsertedCount, err := h.svc.BulkUpsert(targetID, req.Mappings) + if err != nil { + if errors.Is(err, service.ErrTargetNotFound) { + dto.NotFound(c, "Target not found") + return + } + dto.InternalError(c, "Failed to upsert IP addresses") + return + } + + dto.Success(c, dto.BulkUpsertIPAddressesResponse{ + UpsertedCount: int(upsertedCount), + }) +} + +// BulkDelete deletes IP address mappings by IP list +// POST /api/ip-addresses/bulk-delete +func (h *IPAddressHandler) BulkDelete(c *gin.Context) { + var req dto.BulkDeleteIPAddressesRequest + if !dto.BindJSON(c, &req) { + return + } + + deletedCount, err := h.svc.BulkDeleteByIPs(req.IPs) + if err != nil { + dto.InternalError(c, "Failed to delete IP addresses") + return + } + + dto.Success(c, dto.BulkDeleteIPAddressesResponse{ + DeletedCount: deletedCount, + }) +} diff --git a/go-backend/internal/pkg/scope/filter.go b/go-backend/internal/pkg/scope/filter.go index 460a46df..a9fe2a83 100644 --- a/go-backend/internal/pkg/scope/filter.go +++ b/go-backend/internal/pkg/scope/filter.go @@ -30,8 +30,10 @@ type FilterGroup struct { // FieldConfig represents field configuration for filtering type FieldConfig struct { - Column string // Database column name - IsArray bool // Whether it's a PostgreSQL array field + Column string // Database column name + IsArray bool // Whether it's a PostgreSQL array field + IsNumeric bool // Whether it's a numeric field (int, float) + NeedsCast bool // Whether it needs ::text cast (e.g. inet, uuid) } // FilterMapping is a map of field name to field config @@ -270,6 +272,15 @@ func buildSingleCondition(config FieldConfig, filter ParsedFilter) (string, []in return buildArrayCondition(column, filter) } + if config.IsNumeric { + return buildNumericCondition(column, filter) + } + + // For fields that need text cast (inet, uuid, etc) + if config.NeedsCast { + column = column + "::text" + } + switch filter.Operator { case "==": // Exact match @@ -283,6 +294,22 @@ func buildSingleCondition(config FieldConfig, filter ParsedFilter) (string, []in } } +// buildNumericCondition builds condition for numeric fields +// Uses ::text cast to enable string operations on numeric columns +func buildNumericCondition(column string, filter ParsedFilter) (string, []interface{}) { + switch filter.Operator { + case "==": + // Exact match + return column + "::text = ?", []interface{}{filter.Value} + case "!=": + // Not equal + return column + "::text != ?", []interface{}{filter.Value} + default: // "=" + // Fuzzy match + return column + "::text ILIKE ?", []interface{}{"%" + filter.Value + "%"} + } +} + // buildArrayCondition builds condition for PostgreSQL array fields func buildArrayCondition(column string, filter ParsedFilter) (string, []interface{}) { switch filter.Operator { diff --git a/go-backend/internal/repository/directory.go b/go-backend/internal/repository/directory.go index f7f53c49..784e9974 100644 --- a/go-backend/internal/repository/directory.go +++ b/go-backend/internal/repository/directory.go @@ -22,7 +22,7 @@ func NewDirectoryRepository(db *gorm.DB) *DirectoryRepository { // DirectoryFilterMapping defines field mapping for directory filtering var DirectoryFilterMapping = scope.FilterMapping{ "url": {Column: "url"}, - "status": {Column: "status"}, + "status": {Column: "status", IsNumeric: true}, } // FindByTargetID finds directories by target ID with pagination and filter diff --git a/go-backend/internal/repository/endpoint.go b/go-backend/internal/repository/endpoint.go index 5a3ebdf5..d638d213 100644 --- a/go-backend/internal/repository/endpoint.go +++ b/go-backend/internal/repository/endpoint.go @@ -24,7 +24,7 @@ var EndpointFilterMapping = scope.FilterMapping{ "url": {Column: "url"}, "host": {Column: "host"}, "title": {Column: "title"}, - "status": {Column: "status_code"}, + "status": {Column: "status_code", IsNumeric: true}, "tech": {Column: "tech", IsArray: true}, } diff --git a/go-backend/internal/repository/ip_address.go b/go-backend/internal/repository/ip_address.go new file mode 100644 index 00000000..79ff115b --- /dev/null +++ b/go-backend/internal/repository/ip_address.go @@ -0,0 +1,170 @@ +package repository + +import ( + "database/sql" + "sort" + "time" + + "github.com/xingrin/go-backend/internal/model" + "github.com/xingrin/go-backend/internal/pkg/scope" + "gorm.io/gorm" + "gorm.io/gorm/clause" +) + +// IPAddressRepository handles IP address (host_port_mapping) database operations +type IPAddressRepository struct { + db *gorm.DB +} + +// NewIPAddressRepository creates a new IP address repository +func NewIPAddressRepository(db *gorm.DB) *IPAddressRepository { + return &IPAddressRepository{db: db} +} + +// IPAddressFilterMapping defines field mapping for filtering +var IPAddressFilterMapping = scope.FilterMapping{ + "host": {Column: "host"}, + "ip": {Column: "ip", NeedsCast: true}, + "port": {Column: "port", IsNumeric: true}, +} + +// IPAggregationRow represents a row from IP aggregation query +type IPAggregationRow struct { + IP string + CreatedAt time.Time +} + +// GetIPAggregation returns IPs with their earliest created_at, ordered by created_at DESC +func (r *IPAddressRepository) GetIPAggregation(targetID int, filter string) ([]IPAggregationRow, int64, error) { + // Build base query + baseQuery := r.db.Model(&model.HostPortMapping{}).Where("target_id = ?", targetID) + + // Apply filter + baseQuery = baseQuery.Scopes(scope.WithFilter(filter, IPAddressFilterMapping)) + + // Get distinct IPs with MIN(created_at) + var results []IPAggregationRow + err := baseQuery. + Select("ip, MIN(created_at) as created_at"). + Group("ip"). + Order("MIN(created_at) DESC"). + Scan(&results).Error + if err != nil { + return nil, 0, err + } + + return results, int64(len(results)), nil +} + +// GetHostsAndPortsByIP returns hosts and ports for a specific IP +func (r *IPAddressRepository) GetHostsAndPortsByIP(targetID int, ip string, filter string) ([]string, []int, error) { + baseQuery := r.db.Model(&model.HostPortMapping{}). + Where("target_id = ? AND ip = ?", targetID, ip) + + // Apply filter + baseQuery = baseQuery.Scopes(scope.WithFilter(filter, IPAddressFilterMapping)) + + // Get distinct host and port combinations + var mappings []struct { + Host string + Port int + } + err := baseQuery. + Select("DISTINCT host, port"). + Scan(&mappings).Error + if err != nil { + return nil, nil, err + } + + // Collect unique hosts and ports + hostSet := make(map[string]struct{}) + portSet := make(map[int]struct{}) + for _, m := range mappings { + hostSet[m.Host] = struct{}{} + portSet[m.Port] = struct{}{} + } + + // Convert to sorted slices + hosts := make([]string, 0, len(hostSet)) + for h := range hostSet { + hosts = append(hosts, h) + } + sort.Strings(hosts) + + ports := make([]int, 0, len(portSet)) + for p := range portSet { + ports = append(ports, p) + } + sort.Ints(ports) + + return hosts, ports, nil +} + +// StreamByTargetID returns a sql.Rows cursor for streaming export (raw format) +func (r *IPAddressRepository) StreamByTargetID(targetID int) (*sql.Rows, error) { + return r.db.Model(&model.HostPortMapping{}). + Where("target_id = ?", targetID). + Order("ip, host, port"). + Rows() +} + +// StreamByTargetIDAndIPs returns a sql.Rows cursor for streaming export filtered by IPs +func (r *IPAddressRepository) StreamByTargetIDAndIPs(targetID int, ips []string) (*sql.Rows, error) { + return r.db.Model(&model.HostPortMapping{}). + Where("target_id = ? AND ip IN ?", targetID, ips). + Order("ip, host, port"). + Rows() +} + +// CountByTargetID returns the count of unique IPs for a target +func (r *IPAddressRepository) CountByTargetID(targetID int) (int64, error) { + var count int64 + err := r.db.Model(&model.HostPortMapping{}). + Where("target_id = ?", targetID). + Distinct("ip"). + Count(&count).Error + return count, err +} + +// ScanRow scans a single row into HostPortMapping model +func (r *IPAddressRepository) ScanRow(rows *sql.Rows) (*model.HostPortMapping, error) { + var mapping model.HostPortMapping + if err := r.db.ScanRows(rows, &mapping); err != nil { + return nil, err + } + return &mapping, nil +} + +// BulkUpsert creates multiple mappings, ignoring duplicates (ON CONFLICT DO NOTHING) +func (r *IPAddressRepository) BulkUpsert(mappings []model.HostPortMapping) (int64, error) { + if len(mappings) == 0 { + return 0, nil + } + + var totalAffected int64 + + // Process in batches to avoid PostgreSQL parameter limits + batchSize := 100 + for i := 0; i < len(mappings); i += batchSize { + end := min(i+batchSize, len(mappings)) + batch := mappings[i:end] + + // Use ON CONFLICT DO NOTHING since all fields are in unique constraint + result := r.db.Clauses(clause.OnConflict{DoNothing: true}).Create(&batch) + if result.Error != nil { + return totalAffected, result.Error + } + totalAffected += result.RowsAffected + } + + return totalAffected, nil +} + +// DeleteByIPs deletes all mappings for the given IPs +func (r *IPAddressRepository) DeleteByIPs(ips []string) (int64, error) { + if len(ips) == 0 { + return 0, nil + } + result := r.db.Where("ip IN ?", ips).Delete(&model.HostPortMapping{}) + return result.RowsAffected, result.Error +} diff --git a/go-backend/internal/repository/website.go b/go-backend/internal/repository/website.go index e0acfcb3..c458334b 100644 --- a/go-backend/internal/repository/website.go +++ b/go-backend/internal/repository/website.go @@ -25,7 +25,7 @@ var WebsiteFilterMapping = scope.FilterMapping{ "url": {Column: "url"}, "host": {Column: "host"}, "title": {Column: "title"}, - "status": {Column: "status_code"}, + "status": {Column: "status_code", IsNumeric: true}, "tech": {Column: "tech", IsArray: true}, } diff --git a/go-backend/internal/service/ip_address.go b/go-backend/internal/service/ip_address.go new file mode 100644 index 00000000..9ff4e8f1 --- /dev/null +++ b/go-backend/internal/service/ip_address.go @@ -0,0 +1,141 @@ +package service + +import ( + "database/sql" + "errors" + + "github.com/xingrin/go-backend/internal/dto" + "github.com/xingrin/go-backend/internal/model" + "github.com/xingrin/go-backend/internal/repository" + "gorm.io/gorm" +) + +// IPAddressService handles IP address business logic +type IPAddressService struct { + repo *repository.IPAddressRepository + targetRepo *repository.TargetRepository +} + +// NewIPAddressService creates a new IP address service +func NewIPAddressService(repo *repository.IPAddressRepository, targetRepo *repository.TargetRepository) *IPAddressService { + return &IPAddressService{repo: repo, targetRepo: targetRepo} +} + +// ListByTarget returns paginated IP addresses aggregated by IP +func (s *IPAddressService) ListByTarget(targetID int, query *dto.IPAddressListQuery) ([]dto.IPAddressResponse, int64, error) { + // Get IP aggregation (all IPs with their earliest created_at) + ipRows, total, err := s.repo.GetIPAggregation(targetID, query.Filter) + if err != nil { + return nil, 0, err + } + + // Apply pagination to IP list + page := query.GetPage() + pageSize := query.GetPageSize() + start := (page - 1) * pageSize + end := start + pageSize + + if start >= len(ipRows) { + return []dto.IPAddressResponse{}, total, nil + } + if end > len(ipRows) { + end = len(ipRows) + } + + pagedIPs := ipRows[start:end] + + // For each IP, get its hosts and ports + results := make([]dto.IPAddressResponse, 0, len(pagedIPs)) + for _, row := range pagedIPs { + hosts, ports, err := s.repo.GetHostsAndPortsByIP(targetID, row.IP, query.Filter) + if err != nil { + return nil, 0, err + } + + results = append(results, dto.IPAddressResponse{ + IP: row.IP, + Hosts: hosts, + Ports: ports, + CreatedAt: row.CreatedAt, + }) + } + + return results, total, nil +} + +// StreamByTarget returns a cursor for streaming export (raw format) +func (s *IPAddressService) StreamByTarget(targetID int) (*sql.Rows, error) { + _, err := s.targetRepo.FindByID(targetID) + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, ErrTargetNotFound + } + return nil, err + } + return s.repo.StreamByTargetID(targetID) +} + +// StreamByTargetAndIPs returns a cursor for streaming export filtered by IPs +func (s *IPAddressService) StreamByTargetAndIPs(targetID int, ips []string) (*sql.Rows, error) { + _, err := s.targetRepo.FindByID(targetID) + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, ErrTargetNotFound + } + return nil, err + } + return s.repo.StreamByTargetIDAndIPs(targetID, ips) +} + +// CountByTarget returns the count of unique IPs for a target +func (s *IPAddressService) CountByTarget(targetID int) (int64, error) { + _, err := s.targetRepo.FindByID(targetID) + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return 0, ErrTargetNotFound + } + return 0, err + } + return s.repo.CountByTargetID(targetID) +} + +// ScanRow scans a row into HostPortMapping model +func (s *IPAddressService) ScanRow(rows *sql.Rows) (*model.HostPortMapping, error) { + return s.repo.ScanRow(rows) +} + +// BulkUpsert creates multiple mappings for a target (ignores duplicates) +func (s *IPAddressService) BulkUpsert(targetID int, items []dto.IPAddressItem) (int64, error) { + _, err := s.targetRepo.FindByID(targetID) + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return 0, ErrTargetNotFound + } + return 0, err + } + + // Convert DTOs to models + mappings := make([]model.HostPortMapping, 0, len(items)) + for _, item := range items { + mappings = append(mappings, model.HostPortMapping{ + TargetID: targetID, + Host: item.Host, + IP: item.IP, + Port: item.Port, + }) + } + + if len(mappings) == 0 { + return 0, nil + } + + return s.repo.BulkUpsert(mappings) +} + +// BulkDeleteByIPs deletes all mappings for the given IPs +func (s *IPAddressService) BulkDeleteByIPs(ips []string) (int64, error) { + if len(ips) == 0 { + return 0, nil + } + return s.repo.DeleteByIPs(ips) +} diff --git a/go-backend/seed b/go-backend/seed index f71ce608..119a74f0 100755 Binary files a/go-backend/seed and b/go-backend/seed differ