diff --git a/go.sum b/go.sum index aed5e024e..ce3f2b214 100644 --- a/go.sum +++ b/go.sum @@ -341,6 +341,8 @@ github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-echarts/go-echarts/v2 v2.3.3 h1:uImZAk6qLkC6F9ju6mZ5SPBqTyK8xjZKwSmwnCg4bxg= +github.com/go-echarts/go-echarts/v2 v2.3.3/go.mod h1:56YlvzhW/a+du15f3S2qUGNDfKnFOeJSThBIrVFHDtI= github.com/go-echarts/go-echarts/v2 v2.3.3/go.mod h1:56YlvzhW/a+du15f3S2qUGNDfKnFOeJSThBIrVFHDtI= github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI= github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM= @@ -835,8 +837,6 @@ github.com/projectdiscovery/clistats v0.0.20 h1:5jO5SLiRJ7f0nDV0ndBNmBeesbROouPo github.com/projectdiscovery/clistats v0.0.20/go.mod h1:GJ2av0KnOvK0AISQnP8hyDclYIji1LVkx2l0pwnzAu4= github.com/projectdiscovery/dsl v0.0.52 h1:jvIvF+qN8+MbI1MHtWJJKfWqAZQlCExL3ob7SddQbZE= github.com/projectdiscovery/dsl v0.0.52/go.mod h1:xfcHwhy2HSaeGgh+1wqzOoCGm2XTdh5JzjBRBVHEMvI= -github.com/projectdiscovery/fastdialer v0.0.67 h1:NvBpZUiLr9Ne9N+Lvi6FFiNNLWuhk5Bc1H+oE9J8C1E= -github.com/projectdiscovery/fastdialer v0.0.67/go.mod h1:GhSAKnojJN8N9K0JNjLmwLCmEDsQ5cBAStqSCm/tm84= github.com/projectdiscovery/fastdialer v0.0.68 h1:JuIrr8aVGdGWkEwL4axsJWAWDY2uviSqBB0TCekeCOo= github.com/projectdiscovery/fastdialer v0.0.68/go.mod h1:asHSBFJgmwrXpiegcrcAgOyd/QewCVgeI4idH55+v7M= github.com/projectdiscovery/fasttemplate v0.0.2 h1:h2cISk5xDhlJEinlBQS6RRx0vOlOirB2y3Yu4PJzpiA= @@ -889,8 +889,6 @@ github.com/projectdiscovery/uncover v1.0.7 h1:ut+2lTuvmftmveqF5RTjMWAgyLj8ltPQC7 github.com/projectdiscovery/uncover v1.0.7/go.mod h1:HFXgm1sRPuoN0D4oATljPIdmbo/EEh1wVuxQqo/dwFE= github.com/projectdiscovery/useragent v0.0.47 h1:VEOU7uG7TutZNIE0DZNP7hGAGi4bwLPGM1X7Rny52s0= github.com/projectdiscovery/useragent v0.0.47/go.mod h1:Cfk9X9SISYSCmqpej0r9+paJbDHzNHic2YdWQtpdz2M= -github.com/projectdiscovery/utils v0.0.89 h1:ruH2bSkpX/rB7EPp2EV/rWyAubQVxCVU38nRcLp4L1w= -github.com/projectdiscovery/utils v0.0.89/go.mod h1:Dwh5cxn7y97jvyYG3GmBvj0negfH9IjH15qXnzFNtOI= github.com/projectdiscovery/utils v0.0.91 h1:aHAAnC0qX9pJZrWq4Qpl2PSTYLrSCL1dm1QWLjprE2w= github.com/projectdiscovery/utils v0.0.91/go.mod h1:O/6U3ZoU+tNw4lKurdjyVMZPVXL5IYq0YeaDc15PRls= github.com/projectdiscovery/wappalyzergo v0.0.116 h1:xy+mBpwbYo/0PSzmJOQ/RXHomEh0D3nDBcbCxsW69m8= diff --git a/pkg/protocols/common/hosterrorscache/hosterrorscache.go b/pkg/protocols/common/hosterrorscache/hosterrorscache.go index 2eb12cfcb..3ada7718a 100644 --- a/pkg/protocols/common/hosterrorscache/hosterrorscache.go +++ b/pkg/protocols/common/hosterrorscache/hosterrorscache.go @@ -124,7 +124,7 @@ func (c *Cache) MarkFailed(value string, err error) { _ = c.failedTargets.Set(finalValue, existingCacheItemValue) } -var reCheckError = regexp.MustCompile(`(no address found for host|Client\.Timeout exceeded while awaiting headers|could not resolve host|connection refused|connection reset by peer)`) +var reCheckError = regexp.MustCompile(`(no address found for host|Client\.Timeout exceeded while awaiting headers|could not resolve host|connection refused|connection reset by peer|i/o timeout|could not connect to any address found for host)`) // checkError checks if an error represents a type that should be // added to the host skipping table. diff --git a/pkg/protocols/http/httputils/spm.go b/pkg/protocols/http/httputils/spm.go index 52d13f06f..5e20fea58 100644 --- a/pkg/protocols/http/httputils/spm.go +++ b/pkg/protocols/http/httputils/spm.go @@ -5,6 +5,7 @@ import ( "sync" syncutil "github.com/projectdiscovery/utils/sync" + "golang.org/x/exp/maps" ) // WorkPoolType is the type of work pool to use @@ -19,7 +20,7 @@ const ( // StopAtFirstMatchHandler is a handler that executes // request and stops on first match -type StopAtFirstMatchHandler[T any] struct { +type StopAtFirstMatchHandler[T comparable] struct { once sync.Once // Result Channel ResultChan chan T @@ -33,12 +34,14 @@ type StopAtFirstMatchHandler[T any] struct { ctx context.Context cancel context.CancelFunc internalWg *sync.WaitGroup - results []T + results map[T]struct{} + onResult func(T) stopEnabled bool + maxResults int } // NewBlockingSPMHandler creates a new stop at first match handler -func NewBlockingSPMHandler[T any](ctx context.Context, size int, spm bool) *StopAtFirstMatchHandler[T] { +func NewBlockingSPMHandler[T comparable](ctx context.Context, size int, maxResults int, spm bool) *StopAtFirstMatchHandler[T] { ctx1, cancel := context.WithCancel(ctx) awg, _ := syncutil.New(syncutil.WithSize(size)) @@ -51,6 +54,8 @@ func NewBlockingSPMHandler[T any](ctx context.Context, size int, spm bool) *Stop ctx: ctx1, cancel: cancel, stopEnabled: spm, + results: make(map[T]struct{}), + maxResults: maxResults, } s.internalWg.Add(1) go s.run(ctx) @@ -58,7 +63,7 @@ func NewBlockingSPMHandler[T any](ctx context.Context, size int, spm bool) *Stop } // NewNonBlockingSPMHandler creates a new stop at first match handler -func NewNonBlockingSPMHandler[T any](ctx context.Context, spm bool) *StopAtFirstMatchHandler[T] { +func NewNonBlockingSPMHandler[T comparable](ctx context.Context, maxResults int, spm bool) *StopAtFirstMatchHandler[T] { ctx1, cancel := context.WithCancel(ctx) s := &StopAtFirstMatchHandler[T]{ ResultChan: make(chan T, 1), @@ -68,6 +73,8 @@ func NewNonBlockingSPMHandler[T any](ctx context.Context, spm bool) *StopAtFirst ctx: ctx1, cancel: cancel, stopEnabled: spm, + results: make(map[T]struct{}), + maxResults: maxResults, } s.internalWg.Add(1) go s.run(ctx) @@ -82,6 +89,25 @@ func (h *StopAtFirstMatchHandler[T]) Trigger() { } } +// Cancel cancels spm context +func (h *StopAtFirstMatchHandler[T]) Cancel() { + h.cancel() +} + +// SetOnResult callback +// this is not thread safe +func (h *StopAtFirstMatchHandler[T]) SetOnResultCallback(fn func(T)) { + if h.onResult != nil { + tmp := h.onResult + h.onResult = func(t T) { + tmp(t) + fn(t) + } + } else { + h.onResult = fn + } +} + // MatchCallback is called when a match is found // input fn should be the callback that is intended to be called // if stop at first is enabled and other conditions are met @@ -104,7 +130,14 @@ func (h *StopAtFirstMatchHandler[T]) run(ctx context.Context) { if !ok { return } - h.results = append(h.results, val) + if h.onResult != nil { + h.onResult(val) + } + if len(h.results) >= h.maxResults { + // skip or do not store the result + continue + } + h.results[val] = struct{}{} } } } @@ -114,6 +147,11 @@ func (h *StopAtFirstMatchHandler[T]) Done() <-chan struct{} { return h.ctx.Done() } +// Cancelled returns true if the context is cancelled +func (h *StopAtFirstMatchHandler[T]) Cancelled() bool { + return h.ctx.Err() != nil +} + // FoundFirstMatch returns true if first match was found // in stop at first match mode func (h *StopAtFirstMatchHandler[T]) FoundFirstMatch() bool { @@ -168,5 +206,5 @@ func (h *StopAtFirstMatchHandler[T]) Wait() { // CombinedResults returns the combined results func (h *StopAtFirstMatchHandler[T]) CombinedResults() []T { - return h.results + return maps.Keys(h.results) } diff --git a/pkg/protocols/http/request.go b/pkg/protocols/http/request.go index 5d64216f6..d966a4bb7 100644 --- a/pkg/protocols/http/request.go +++ b/pkg/protocols/http/request.go @@ -47,6 +47,9 @@ import ( const ( defaultMaxWorkers = 150 + // max unique errors to store & combine + // when executing requests in parallel + maxErrorsWhenParallel = 3 ) var ( @@ -111,7 +114,7 @@ func (request *Request) executeRaceRequest(input *contextargs.Context, previous } shouldStop := (request.options.Options.StopAtFirstMatch || request.StopAtFirstMatch || request.options.StopAtFirstMatch) - spmHandler := httputils.NewNonBlockingSPMHandler[error](ctx, shouldStop) + spmHandler := httputils.NewNonBlockingSPMHandler[error](ctx, maxErrorsWhenParallel, shouldStop) gotMatches := &atomic.Bool{} // wrappedCallback is a callback that wraps the original callback // to implement stop at first match logic @@ -132,12 +135,29 @@ func (request *Request) executeRaceRequest(input *contextargs.Context, previous } } + // look for unresponsive hosts and cancel inflight requests as well + spmHandler.SetOnResultCallback(func(err error) { + if err == nil { + return + } + // marks thsi host as unresponsive if applicable + request.markUnresponsiveHost(input, err) + if request.isUnresponsiveHost(input) { + // stop all inflight requests + spmHandler.Cancel() + } + }) + for i := 0; i < request.RaceNumberRequests; i++ { + if spmHandler.FoundFirstMatch() || request.isUnresponsiveHost(input) { + // stop sending more requests condition is met + break + } spmHandler.Acquire() // execute http request go func(httpRequest *generatedRequest) { defer spmHandler.Release() - if spmHandler.FoundFirstMatch() { + if spmHandler.FoundFirstMatch() || request.isUnresponsiveHost(input) { // stop sending more requests condition is met return } @@ -175,7 +195,7 @@ func (request *Request) executeParallelHTTP(input *contextargs.Context, dynamicV // Stop-at-first-match logic while executing requests // parallely using threads shouldStop := (request.options.Options.StopAtFirstMatch || request.StopAtFirstMatch || request.options.StopAtFirstMatch) - spmHandler := httputils.NewBlockingSPMHandler[error](context.Background(), maxWorkers, shouldStop) + spmHandler := httputils.NewBlockingSPMHandler[error](context.Background(), maxWorkers, maxErrorsWhenParallel, shouldStop) // wrappedCallback is a callback that wraps the original callback // to implement stop at first match logic wrappedCallback := func(event *output.InternalWrappedEvent) { @@ -194,6 +214,19 @@ func (request *Request) executeParallelHTTP(input *contextargs.Context, dynamicV } } + // look for unresponsive hosts and cancel inflight requests as well + spmHandler.SetOnResultCallback(func(err error) { + if err == nil { + return + } + // marks thsi host as unresponsive if applicable + request.markUnresponsiveHost(input, err) + if request.isUnresponsiveHost(input) { + // stop all inflight requests + spmHandler.Cancel() + } + }) + // iterate payloads and make requests generator := request.newGenerator(false) for { @@ -207,6 +240,11 @@ func (request *Request) executeParallelHTTP(input *contextargs.Context, dynamicV spmHandler.Resize(request.options.Options.PayloadConcurrency) } + // break if stop at first match is found or host is unresponsive + if spmHandler.FoundFirstMatch() || request.isUnresponsiveHost(input) { + break + } + ctx := request.newContext(input) generatedHttpRequest, err := generator.Make(ctx, input, inputData, payloads, dynamicValues) if err != nil { @@ -222,19 +260,21 @@ func (request *Request) executeParallelHTTP(input *contextargs.Context, dynamicV spmHandler.Acquire() go func(httpRequest *generatedRequest) { defer spmHandler.Release() - if spmHandler.FoundFirstMatch() { + if spmHandler.FoundFirstMatch() || request.isUnresponsiveHost(input) || spmHandler.Cancelled() { + return + } + // putting ratelimiter here prevents any unnecessary waiting if any + request.options.RateLimitTake() + + // after ratelimit take, check if we need to stop + if spmHandler.FoundFirstMatch() || request.isUnresponsiveHost(input) || spmHandler.Cancelled() { return } select { case <-spmHandler.Done(): return - case spmHandler.ResultChan <- func() error { - // putting ratelimiter here prevents any unnecessary waiting if any - request.options.RateLimitTake() - previous := make(map[string]interface{}) - return request.executeRequest(input, httpRequest, previous, false, wrappedCallback, 0) - }(): + case spmHandler.ResultChan <- request.executeRequest(input, httpRequest, make(map[string]interface{}), false, wrappedCallback, 0): return } }(generatedHttpRequest) @@ -276,12 +316,10 @@ func (request *Request) executeTurboHTTP(input *contextargs.Context, dynamicValu maxWorkers = pipeOptions.MaxPendingRequests } - // Stop-at-first-match logic while executing requests - // parallely using threads // Stop-at-first-match logic while executing requests // parallely using threads shouldStop := (request.options.Options.StopAtFirstMatch || request.StopAtFirstMatch || request.options.StopAtFirstMatch) - spmHandler := httputils.NewBlockingSPMHandler[error](context.Background(), maxWorkers, shouldStop) + spmHandler := httputils.NewBlockingSPMHandler[error](context.Background(), maxWorkers, maxErrorsWhenParallel, shouldStop) // wrappedCallback is a callback that wraps the original callback // to implement stop at first match logic wrappedCallback := func(event *output.InternalWrappedEvent) { @@ -300,11 +338,29 @@ func (request *Request) executeTurboHTTP(input *contextargs.Context, dynamicValu } } + // look for unresponsive hosts and cancel inflight requests as well + spmHandler.SetOnResultCallback(func(err error) { + if err == nil { + return + } + // marks thsi host as unresponsive if applicable + request.markUnresponsiveHost(input, err) + if request.isUnresponsiveHost(input) { + // stop all inflight requests + spmHandler.Cancel() + } + }) + for { inputData, payloads, ok := generator.nextValue() if !ok { break } + if spmHandler.FoundFirstMatch() || request.isUnresponsiveHost(input) || spmHandler.Cancelled() { + // skip if first match is found + break + } + ctx := request.newContext(input) generatedHttpRequest, err := generator.Make(ctx, input, inputData, payloads, dynamicValues) if err != nil { @@ -318,11 +374,10 @@ func (request *Request) executeTurboHTTP(input *contextargs.Context, dynamicValu spmHandler.Acquire() go func(httpRequest *generatedRequest) { defer spmHandler.Release() - if spmHandler.FoundFirstMatch() { + if spmHandler.FoundFirstMatch() || request.isUnresponsiveHost(input) { // skip if first match is found return } - select { case <-spmHandler.Done(): return @@ -398,7 +453,7 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, dynamicVa input.MetaInput.Input = generatedHttpRequest.URL() } // Check if hosts keep erroring - if request.options.HostErrorsCache != nil && request.options.HostErrorsCache.Check(input.MetaInput.ID()) { + if request.isUnresponsiveHost(input) { return true, nil } var gotMatches bool @@ -437,12 +492,13 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, dynamicVa return true, nil } if execReqErr != nil { - if request.options.HostErrorsCache != nil { - request.options.HostErrorsCache.MarkFailed(input.MetaInput.ID(), err) - } + // if applicable mark the host as unresponsive + request.markUnresponsiveHost(input, execReqErr) requestErr = errorutil.NewWithErr(execReqErr).Msgf("got err while executing %v", generatedHttpRequest.URL()) + request.options.Progress.IncrementFailedRequestsBy(1) + } else { + request.options.Progress.IncrementRequests() } - request.options.Progress.IncrementRequests() // If this was a match, and we want to stop at first match, skip all further requests. shouldStopAtFirstMatch := generatedHttpRequest.original.options.Options.StopAtFirstMatch || generatedHttpRequest.original.options.StopAtFirstMatch || request.StopAtFirstMatch @@ -482,6 +538,10 @@ const drainReqSize = int64(8 * 1024) // executeRequest executes the actual generated request and returns error if occurred func (request *Request) executeRequest(input *contextargs.Context, generatedRequest *generatedRequest, previousEvent output.InternalEvent, hasInteractMatchers bool, processEvent protocols.OutputEventCallback, requestCount int) (err error) { + // Check if hosts keep erroring + if request.isUnresponsiveHost(input) { + return fmt.Errorf("hostErrorsCache : host %s is unresponsive", input.MetaInput.Input) + } // wrap one more callback for validation and fixing event callback := func(event *output.InternalWrappedEvent) { @@ -985,3 +1045,21 @@ func (request *Request) newContext(input *contextargs.Context) context.Context { } return context.Background() } + +// markUnresponsiveHost checks if the error is a unreponsive host error and marks it +func (request *Request) markUnresponsiveHost(input *contextargs.Context, err error) { + if err == nil { + return + } + if request.options.HostErrorsCache != nil { + request.options.HostErrorsCache.MarkFailed(input.MetaInput.ID(), err) + } +} + +// isUnresponsiveHost checks if the error is a unreponsive based on its execution history +func (request *Request) isUnresponsiveHost(input *contextargs.Context) bool { + if request.options.HostErrorsCache != nil { + return request.options.HostErrorsCache.Check(input.MetaInput.ID()) + } + return false +}