nuclei v3 : misc updates (#4247)

* use parsed options while signing

* update project layout to v3

* fix .gitignore

* remove example template

* misc updates

* bump tlsx version

* hide template sig warning with env

* js: retain value while using log

* fix nil pointer derefernce

* misc doc update

---------

Co-authored-by: sandeep <8293321+ehsandeep@users.noreply.github.com>
This commit is contained in:
Tarun Koyalwar
2023-10-17 17:44:13 +05:30
committed by GitHub
parent 3276703244
commit dc44105baf
542 changed files with 1252 additions and 1338 deletions

View File

@@ -0,0 +1,142 @@
package hosterrorscache
import (
"net"
"net/url"
"regexp"
"strings"
"sync"
"sync/atomic"
"github.com/bluele/gcache"
"github.com/projectdiscovery/gologger"
)
// CacheInterface defines the signature of the hosterrorscache so that
// users of Nuclei as embedded lib may implement their own cache
type CacheInterface interface {
SetVerbose(verbose bool) // log verbosely
Close() // close the cache
Check(value string) bool // return true if the host should be skipped
MarkFailed(value string, err error) // record a failure (and cause) for the host
}
// Cache is a cache for host based errors. It allows skipping
// certain hosts based on an error threshold.
//
// It uses an LRU cache internally for skipping unresponsive hosts
// that remain so for a duration.
type Cache struct {
MaxHostError int
verbose bool
failedTargets gcache.Cache
TrackError []string
}
type cacheItem struct {
errors atomic.Int32
sync.Once
}
const DefaultMaxHostsCount = 10000
// New returns a new host max errors cache
func New(maxHostError, maxHostsCount int, trackError []string) *Cache {
gc := gcache.New(maxHostsCount).
ARC().
Build()
return &Cache{failedTargets: gc, MaxHostError: maxHostError, TrackError: trackError}
}
// SetVerbose sets the cache to log at verbose level
func (c *Cache) SetVerbose(verbose bool) {
c.verbose = verbose
}
// Close closes the host errors cache
func (c *Cache) Close() {
c.failedTargets.Purge()
}
func (c *Cache) normalizeCacheValue(value string) string {
finalValue := value
if strings.HasPrefix(value, "http") {
if parsed, err := url.Parse(value); err == nil {
hostname := parsed.Host
finalPort := parsed.Port()
if finalPort == "" {
if parsed.Scheme == "https" {
finalPort = "443"
} else {
finalPort = "80"
}
hostname = net.JoinHostPort(parsed.Host, finalPort)
}
finalValue = hostname
}
}
return finalValue
}
// ErrUnresponsiveHost is returned when a host is unresponsive
// var ErrUnresponsiveHost = errors.New("skipping as host is unresponsive")
// Check returns true if a host should be skipped as it has been
// unresponsive for a certain number of times.
//
// The value can be many formats -
// - URL: https?:// type
// - Host:port type
// - host type
func (c *Cache) Check(value string) bool {
finalValue := c.normalizeCacheValue(value)
existingCacheItem, err := c.failedTargets.GetIFPresent(finalValue)
if err != nil {
return false
}
existingCacheItemValue := existingCacheItem.(*cacheItem)
if existingCacheItemValue.errors.Load() >= int32(c.MaxHostError) {
existingCacheItemValue.Do(func() {
gologger.Info().Msgf("Skipped %s from target list as found unresponsive %d times", finalValue, existingCacheItemValue.errors.Load())
})
return true
}
return false
}
// MarkFailed marks a host as failed previously
func (c *Cache) MarkFailed(value string, err error) {
if !c.checkError(err) {
return
}
finalValue := c.normalizeCacheValue(value)
existingCacheItem, err := c.failedTargets.GetIFPresent(finalValue)
if err != nil || existingCacheItem == nil {
newItem := &cacheItem{errors: atomic.Int32{}}
newItem.errors.Store(1)
_ = c.failedTargets.Set(finalValue, newItem)
return
}
existingCacheItemValue := existingCacheItem.(*cacheItem)
existingCacheItemValue.errors.Add(1)
_ = c.failedTargets.Set(finalValue, existingCacheItemValue)
}
var reCheckError = regexp.MustCompile(`(no address found for host|Client\.Timeout exceeded while awaiting headers|could not resolve host|connection refused)`)
// checkError checks if an error represents a type that should be
// added to the host skipping table.
func (c *Cache) checkError(err error) bool {
if err == nil {
return false
}
errString := err.Error()
for _, msg := range c.TrackError {
if strings.Contains(errString, msg) {
return true
}
}
return reCheckError.MatchString(errString)
}

View File

@@ -0,0 +1,141 @@
package hosterrorscache
import (
"fmt"
"sync"
"sync/atomic"
"testing"
"github.com/stretchr/testify/require"
)
func TestCacheCheck(t *testing.T) {
cache := New(3, DefaultMaxHostsCount, nil)
for i := 0; i < 100; i++ {
cache.MarkFailed("test", fmt.Errorf("could not resolve host"))
got := cache.Check("test")
if i < 2 {
// till 3 the host is not flagged to skip
require.False(t, got)
} else {
// above 3 it must remain flagged to skip
require.True(t, got)
}
}
value := cache.Check("test")
require.Equal(t, true, value, "could not get checked value")
}
func TestTrackErrors(t *testing.T) {
cache := New(3, DefaultMaxHostsCount, []string{"custom error"})
for i := 0; i < 100; i++ {
cache.MarkFailed("custom", fmt.Errorf("got: nested: custom error"))
got := cache.Check("custom")
if i < 2 {
// till 3 the host is not flagged to skip
require.False(t, got)
} else {
// above 3 it must remain flagged to skip
require.True(t, got)
}
}
value := cache.Check("custom")
require.Equal(t, true, value, "could not get checked value")
}
func TestCacheItemDo(t *testing.T) {
var (
count int
item cacheItem
)
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
item.Do(func() {
count++
})
}()
}
wg.Wait()
// ensures the increment happened only once regardless of the multiple call
require.Equal(t, count, 1)
}
func TestCacheMarkFailed(t *testing.T) {
cache := New(3, DefaultMaxHostsCount, nil)
tests := []struct {
host string
expected int
}{
{"http://example.com:80", 1},
{"example.com:80", 2},
{"example.com", 1},
}
for _, test := range tests {
normalizedCacheValue := cache.normalizeCacheValue(test.host)
cache.MarkFailed(test.host, fmt.Errorf("no address found for host"))
failedTarget, err := cache.failedTargets.Get(normalizedCacheValue)
require.Nil(t, err)
require.NotNil(t, failedTarget)
value, ok := failedTarget.(*cacheItem)
require.True(t, ok)
require.EqualValues(t, test.expected, value.errors.Load())
}
}
func TestCacheMarkFailedConcurrent(t *testing.T) {
cache := New(3, DefaultMaxHostsCount, nil)
tests := []struct {
host string
expected int32
}{
{"http://example.com:80", 200},
{"example.com:80", 200},
{"example.com", 100},
}
// the cache is not atomic during items creation, so we pre-create them with counter to zero
for _, test := range tests {
normalizedValue := cache.normalizeCacheValue(test.host)
newItem := &cacheItem{errors: atomic.Int32{}}
newItem.errors.Store(0)
_ = cache.failedTargets.Set(normalizedValue, newItem)
}
wg := sync.WaitGroup{}
for _, test := range tests {
currentTest := test
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
cache.MarkFailed(currentTest.host, fmt.Errorf("could not resolve host"))
}()
}
}
wg.Wait()
for _, test := range tests {
require.True(t, cache.Check(test.host))
normalizedCacheValue := cache.normalizeCacheValue(test.host)
failedTarget, err := cache.failedTargets.Get(normalizedCacheValue)
require.Nil(t, err)
require.NotNil(t, failedTarget)
value, ok := failedTarget.(*cacheItem)
require.True(t, ok)
require.EqualValues(t, test.expected, value.errors.Load())
}
}