mirror of
https://github.com/projectdiscovery/nuclei.git
synced 2026-01-31 15:53:10 +08:00
@@ -350,7 +350,7 @@ func (c *Config) IsDebugArgEnabled(arg string) bool {
|
||||
|
||||
// parseDebugArgs from string
|
||||
func (c *Config) parseDebugArgs(data string) {
|
||||
// use space as seperator instead of commas
|
||||
// use space as separator instead of commas
|
||||
tmp := strings.Fields(data)
|
||||
for _, v := range tmp {
|
||||
key := v
|
||||
|
||||
@@ -97,7 +97,7 @@ func getRemoteContent(URL string, remoteTemplateDomainList []string, contentType
|
||||
_ = response.Body.Close()
|
||||
}()
|
||||
if response.StatusCode < 200 || response.StatusCode > 299 {
|
||||
return RemoteContent{Error: fmt.Errorf("get \"%s\": unexpect status %d", URL, response.StatusCode)}
|
||||
return RemoteContent{Error: fmt.Errorf("get \"%s\": unexpected status %d", URL, response.StatusCode)}
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(response.Body)
|
||||
|
||||
@@ -61,7 +61,7 @@ func checkTimingDependency(
|
||||
|
||||
var requestsSent []requestsSentMetadata
|
||||
for requestsLeft > 0 {
|
||||
isCorrelationPossible, delayRecieved, err := sendRequestAndTestConfidence(regression, highSleepTimeSeconds, requestSender, baselineDelay)
|
||||
isCorrelationPossible, delayReceived, err := sendRequestAndTestConfidence(regression, highSleepTimeSeconds, requestSender, baselineDelay)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
@@ -69,29 +69,29 @@ func checkTimingDependency(
|
||||
return false, "", nil
|
||||
}
|
||||
// Check the delay is greater than baseline by seconds requested
|
||||
if delayRecieved < baselineDelay+float64(highSleepTimeSeconds)*0.8 {
|
||||
if delayReceived < baselineDelay+float64(highSleepTimeSeconds)*0.8 {
|
||||
return false, "", nil
|
||||
}
|
||||
requestsSent = append(requestsSent, requestsSentMetadata{
|
||||
delay: highSleepTimeSeconds,
|
||||
delayReceived: delayRecieved,
|
||||
delayReceived: delayReceived,
|
||||
})
|
||||
|
||||
isCorrelationPossibleSecond, delayRecievedSecond, err := sendRequestAndTestConfidence(regression, int(DefaultLowSleepTimeSeconds), requestSender, baselineDelay)
|
||||
isCorrelationPossibleSecond, delayReceivedSecond, err := sendRequestAndTestConfidence(regression, int(DefaultLowSleepTimeSeconds), requestSender, baselineDelay)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
if !isCorrelationPossibleSecond {
|
||||
return false, "", nil
|
||||
}
|
||||
if delayRecievedSecond < baselineDelay+float64(DefaultLowSleepTimeSeconds)*0.8 {
|
||||
if delayReceivedSecond < baselineDelay+float64(DefaultLowSleepTimeSeconds)*0.8 {
|
||||
return false, "", nil
|
||||
}
|
||||
requestsLeft = requestsLeft - 2
|
||||
|
||||
requestsSent = append(requestsSent, requestsSentMetadata{
|
||||
delay: int(DefaultLowSleepTimeSeconds),
|
||||
delayReceived: delayRecievedSecond,
|
||||
delayReceived: delayReceivedSecond,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// and not arrays
|
||||
//
|
||||
// TODO: Support arrays + other JSON oddities by
|
||||
// adding more attirbutes to the map[string]interface{}
|
||||
// adding more attributes to the map[string]interface{}
|
||||
type JSON struct{}
|
||||
|
||||
var (
|
||||
|
||||
@@ -21,14 +21,14 @@ import (
|
||||
// for parameters that are less likely to give results for a rule.
|
||||
type Tracker struct {
|
||||
frequencies gcache.Cache
|
||||
paramOccurenceThreshold int
|
||||
paramOccurrenceThreshold int
|
||||
|
||||
isDebug bool
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultMaxTrackCount = 10000
|
||||
DefaultParamOccurenceThreshold = 10
|
||||
DefaultParamOccurrenceThreshold = 10
|
||||
)
|
||||
|
||||
type cacheItem struct {
|
||||
@@ -38,7 +38,7 @@ type cacheItem struct {
|
||||
|
||||
// New creates a new frequency tracker with a given maximum
|
||||
// number of params to track in LRU fashion with a max error threshold
|
||||
func New(maxTrackCount, paramOccurenceThreshold int) *Tracker {
|
||||
func New(maxTrackCount, paramOccurrenceThreshold int) *Tracker {
|
||||
gc := gcache.New(maxTrackCount).ARC().Build()
|
||||
|
||||
var isDebug bool
|
||||
@@ -48,7 +48,7 @@ func New(maxTrackCount, paramOccurenceThreshold int) *Tracker {
|
||||
return &Tracker{
|
||||
isDebug: isDebug,
|
||||
frequencies: gc,
|
||||
paramOccurenceThreshold: paramOccurenceThreshold,
|
||||
paramOccurrenceThreshold: paramOccurrenceThreshold,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,10 +56,10 @@ func (t *Tracker) Close() {
|
||||
t.frequencies.Purge()
|
||||
}
|
||||
|
||||
// MarkParameter marks a parameter as frequently occuring once.
|
||||
// MarkParameter marks a parameter as frequently occurring once.
|
||||
//
|
||||
// The logic requires a parameter to be marked as frequently occuring
|
||||
// multiple times before it's considered as frequently occuring.
|
||||
// The logic requires a parameter to be marked as frequently occurring
|
||||
// multiple times before it's considered as frequently occurring.
|
||||
func (t *Tracker) MarkParameter(parameter, target, template string) {
|
||||
normalizedTarget := normalizeTarget(target)
|
||||
key := getFrequencyKey(parameter, normalizedTarget, template)
|
||||
@@ -81,7 +81,7 @@ func (t *Tracker) MarkParameter(parameter, target, template string) {
|
||||
_ = t.frequencies.Set(key, existingCacheItemValue)
|
||||
}
|
||||
|
||||
// IsParameterFrequent checks if a parameter is frequently occuring
|
||||
// IsParameterFrequent checks if a parameter is frequently occurring
|
||||
// in the input with no much results.
|
||||
func (t *Tracker) IsParameterFrequent(parameter, target, template string) bool {
|
||||
normalizedTarget := normalizeTarget(target)
|
||||
@@ -97,7 +97,7 @@ func (t *Tracker) IsParameterFrequent(parameter, target, template string) bool {
|
||||
}
|
||||
existingCacheItemValue := existingCacheItem.(*cacheItem)
|
||||
|
||||
if existingCacheItemValue.errors.Load() >= int32(t.paramOccurenceThreshold) {
|
||||
if existingCacheItemValue.errors.Load() >= int32(t.paramOccurrenceThreshold) {
|
||||
existingCacheItemValue.Do(func() {
|
||||
gologger.Verbose().Msgf("[%s] Skipped %s from parameter for %s as found uninteresting %d times", template, parameter, target, existingCacheItemValue.errors.Load())
|
||||
})
|
||||
@@ -106,7 +106,7 @@ func (t *Tracker) IsParameterFrequent(parameter, target, template string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// UnmarkParameter unmarks a parameter as frequently occuring. This carries
|
||||
// UnmarkParameter unmarks a parameter as frequently occurring. This carries
|
||||
// more weight and resets the frequency counter for the parameter causing
|
||||
// it to be checked again. This is done when results are found.
|
||||
func (t *Tracker) UnmarkParameter(parameter, target, template string) {
|
||||
|
||||
@@ -136,7 +136,7 @@ func (rule *Rule) executePartComponentOnKV(input *ExecuteRuleInput, payload Valu
|
||||
return qerr
|
||||
}
|
||||
|
||||
// after building change back to original value to avoid repeating it in furthur requests
|
||||
// after building change back to original value to avoid repeating it in further requests
|
||||
if origKey != "" {
|
||||
err = ruleComponent.SetValue(origKey, types.ToString(origValue)) // change back to previous value for temp
|
||||
if err != nil {
|
||||
|
||||
12
pkg/input/formats/testdata/openapi.yaml
vendored
12
pkg/input/formats/testdata/openapi.yaml
vendored
@@ -131,7 +131,7 @@ paths:
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Sucessfully created user
|
||||
description: Successfully created user
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
@@ -170,7 +170,7 @@ paths:
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Sucessfully logged in user
|
||||
description: Successfully logged in user
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
@@ -262,7 +262,7 @@ paths:
|
||||
example: 'name1'
|
||||
responses:
|
||||
'200':
|
||||
description: Sucessfully deleted user
|
||||
description: Successfully deleted user
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
@@ -331,7 +331,7 @@ paths:
|
||||
required: true
|
||||
responses:
|
||||
'204':
|
||||
description: Sucessfully updated user email
|
||||
description: Successfully updated user email
|
||||
content: {}
|
||||
'400':
|
||||
description: Invalid request
|
||||
@@ -389,7 +389,7 @@ paths:
|
||||
required: true
|
||||
responses:
|
||||
'204':
|
||||
description: Sucessfully updated users password
|
||||
description: Successfully updated users password
|
||||
content: {}
|
||||
'400':
|
||||
description: Invalid request
|
||||
@@ -475,7 +475,7 @@ paths:
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Sucessfully added a book
|
||||
description: Successfully added a book
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
|
||||
@@ -83,7 +83,7 @@ func UpdateIgnoreFile() error {
|
||||
}
|
||||
|
||||
func doVersionCheck(isSDK bool) error {
|
||||
// we use global retryablehttp client so its not immeditely gc'd if any references are held
|
||||
// we use global retryablehttp client so its not immediately gc'd if any references are held
|
||||
// and according our config we have idle connections which are shown as leaked by goleak in tests
|
||||
// i.e we close all idle connections after our use and it doesn't affect any other part of the code
|
||||
defer retryableHttpClient.HTTPClient.CloseIdleConnections()
|
||||
|
||||
@@ -9,7 +9,7 @@ The Very First before making any type of contribution to javascript runtime in n
|
||||
|
||||
## Documentation/Typo Contribution
|
||||
|
||||
Most of Javascript API Reference documentation is auto-generated with help of code-generation and [jsdocgen](./devtools/jsdocgen/README.md) and hence any type of documentation contribution are always welcome and can be done by editing [javscript jsdoc](./generated/js/) files
|
||||
Most of Javascript API Reference documentation is auto-generated with help of code-generation and [jsdocgen](./devtools/jsdocgen/README.md) and hence any type of documentation contribution are always welcome and can be done by editing [javascript jsdoc](./generated/js/) files
|
||||
|
||||
|
||||
## Improving Existing Libraries(aka node_modules)
|
||||
@@ -33,7 +33,7 @@ Libraries/node_modules represent adding new protocol or something similar and sh
|
||||
|
||||
## Adding Helper Objects/Types/Functions
|
||||
|
||||
Helper objects/types/functions can simply be understood as javascript utils to simplify writing javscript and reduce code duplication in javascript templates. Helper functions/objects are divided into two categories
|
||||
Helper objects/types/functions can simply be understood as javascript utils to simplify writing javascript and reduce code duplication in javascript templates. Helper functions/objects are divided into two categories
|
||||
|
||||
### javascript based helpers
|
||||
|
||||
@@ -47,7 +47,7 @@ go based helpers are written in go and can import any go library if required. Mi
|
||||
|
||||
### Updating / Publishing Docs
|
||||
|
||||
Javscript Protocol Documentation is auto-generated using [jsdoc] and is hosted at [js-proto-docs](https://projectdiscovery.github.io/js-proto-docs/). To update documentation, please follow steps mentioned at [projectdiscovery/js-proto-docs](https://github.com/projectdiscovery/js-proto-docs)
|
||||
Javascript Protocol Documentation is auto-generated using [jsdoc] and is hosted at [js-proto-docs](https://projectdiscovery.github.io/js-proto-docs/). To update documentation, please follow steps mentioned at [projectdiscovery/js-proto-docs](https://github.com/projectdiscovery/js-proto-docs)
|
||||
|
||||
|
||||
### Go Code Guidelines
|
||||
@@ -60,7 +60,7 @@ Javscript Protocol Documentation is auto-generated using [jsdoc] and is hosted a
|
||||
return false, protocolstate.ErrHostDenied.Msgf(host)
|
||||
}
|
||||
```
|
||||
3. Keep exported package clean. Do not keep unncessary global exports which the consumer of the API doesn't need to know about. Keep only user-exposed API public.
|
||||
3. Keep exported package clean. Do not keep unnecessary global exports which the consumer of the API doesn't need to know about. Keep only user-exposed API public.
|
||||
4. Use timeouts and context cancellation when calling Network related stuff. Also make sure to close your connections or provide a mechanism to the user of the API to do so.
|
||||
5. Always try to return single types from inside javascript with an error like `(IsRDP, error)` instead of returning multiple values `(name, version string, err error)`. The second one will get converted to an array is much harder for consumers to deal with. Instead, try to return `Structures` which will be accessible natively.
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrJSExecDeadline is the error returned when alloted time for script execution exceeds
|
||||
// ErrJSExecDeadline is the error returned when allotted time for script execution exceeds
|
||||
ErrJSExecDeadline = errkit.New("js engine execution deadline exceeded").SetKind(errkit.ErrKindDeadline).Build()
|
||||
)
|
||||
|
||||
|
||||
@@ -214,7 +214,7 @@ func createNewRuntime() *goja.Runtime {
|
||||
// by default import below modules every time
|
||||
_ = runtime.Set("console", require.Require(runtime, console.ModuleName))
|
||||
|
||||
// Register embedded javacript helpers
|
||||
// Register embedded javascript helpers
|
||||
if err := global.RegisterNativeScripts(runtime); err != nil {
|
||||
gologger.Error().Msgf("Could not register scripts: %s\n", err)
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ func (p *EntityParser) Parse() error {
|
||||
for _, file := range p.syntax {
|
||||
// Traverse the AST and find all relevant declarations
|
||||
ast.Inspect(file, func(n ast.Node) bool {
|
||||
// look for funtions and methods
|
||||
// look for functions and methods
|
||||
// and generate entities for them
|
||||
fn, ok := n.(*ast.FuncDecl)
|
||||
if ok {
|
||||
|
||||
@@ -64,7 +64,7 @@ export class MySQLClient {
|
||||
|
||||
|
||||
/**
|
||||
* returns MySQLInfo when fingerpint is successful
|
||||
* returns MySQLInfo when fingerprint is successful
|
||||
* @example
|
||||
* ```javascript
|
||||
* const mysql = require('nuclei/mysql');
|
||||
|
||||
@@ -41,7 +41,7 @@ func registerAdditionalHelpers(runtime *goja.Runtime) {
|
||||
|
||||
func init() {
|
||||
// these are dummy functions we use trigger documentation generation
|
||||
// actual definations are in exports.js
|
||||
// actual definitions are in exports.js
|
||||
_ = gojs.RegisterFuncWithSignature(nil, gojs.FuncOpts{
|
||||
Name: "to_json",
|
||||
Signatures: []string{
|
||||
|
||||
@@ -127,7 +127,7 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
// returns MySQLInfo when fingerpint is successful
|
||||
// returns MySQLInfo when fingerprint is successful
|
||||
// @example
|
||||
// ```javascript
|
||||
// const mysql = require('nuclei/mysql');
|
||||
|
||||
@@ -66,7 +66,7 @@ final.append(packet.bytes());
|
||||
|
||||
console.log("Netbios", netbios.hex(), netbios.len());
|
||||
console.log("Header", header.hex(), header.len());
|
||||
console.log("Negotation", negotiation.hex(), negotiation.len());
|
||||
console.log("Negotiation", negotiation.hex(), negotiation.len());
|
||||
console.log("Packet", final.hex(), final.len());
|
||||
|
||||
const c = require("nuclei/libnet");
|
||||
|
||||
@@ -439,7 +439,7 @@ func getJSONLogRequestFromError(templatePath, input, requestType string, request
|
||||
request.Attrs = slog.GroupValue(errX.Attrs()...)
|
||||
}
|
||||
}
|
||||
// check if address slog attr is avaiable in error if set use it
|
||||
// check if address slog attr is available in error if set use it
|
||||
if val := errkit.GetAttrValue(requestErr, "address"); val.Any() != nil {
|
||||
request.Address = val.String()
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ const (
|
||||
var (
|
||||
// pythonEnvRegexCompiled is the compiled regex for python environment variables
|
||||
pythonEnvRegexCompiled = regexp.MustCompile(pythonEnvRegex)
|
||||
// ErrCodeExecutionDeadline is the error returned when alloted time for script execution exceeds
|
||||
// ErrCodeExecutionDeadline is the error returned when allotted time for script execution exceeds
|
||||
ErrCodeExecutionDeadline = errkit.New("code execution deadline exceeded").SetKind(errkit.ErrKindDeadline).Build()
|
||||
)
|
||||
|
||||
@@ -279,7 +279,7 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, dynamicVa
|
||||
fmt.Fprintf(sb, "\n%v\n%v\n%v\n", dashes, "Command Output:", dashes)
|
||||
sb.WriteString(gOutput.DebugData.String())
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString("[WRN] Command Output here is stdout+sterr, in response variables they are seperate (use -v -svd flags for more details)")
|
||||
sb.WriteString("[WRN] Command Output here is stdout+sterr, in response variables they are separate (use -v -svd flags for more details)")
|
||||
return sb.String()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ type aggressionLevelToPayloads struct {
|
||||
|
||||
// parsePayloadsWithAggression parses the payloads with the aggression level
|
||||
//
|
||||
// Three agression are supported -
|
||||
// Three aggression are supported -
|
||||
// - low
|
||||
// - medium
|
||||
// - high
|
||||
@@ -83,7 +83,7 @@ type aggressionLevelToPayloads struct {
|
||||
// low is the default level. If medium is specified, all templates from
|
||||
// low and medium are executed. Similarly with high, including all templates
|
||||
// from low, medium, high.
|
||||
func parsePayloadsWithAggression(name string, v map[interface{}]interface{}, agression string) (map[string]interface{}, error) {
|
||||
func parsePayloadsWithAggression(name string, v map[interface{}]interface{}, aggression string) (map[string]interface{}, error) {
|
||||
payloadsLevels := &aggressionLevelToPayloads{}
|
||||
|
||||
for k, v := range v {
|
||||
@@ -107,7 +107,7 @@ func parsePayloadsWithAggression(name string, v map[interface{}]interface{}, agr
|
||||
}
|
||||
|
||||
payloads := make(map[string]interface{})
|
||||
switch agression {
|
||||
switch aggression {
|
||||
case "low":
|
||||
payloads[name] = payloadsLevels.Low
|
||||
case "medium":
|
||||
@@ -116,7 +116,7 @@ func parsePayloadsWithAggression(name string, v map[interface{}]interface{}, agr
|
||||
payloads[name] = append(payloadsLevels.Low, payloadsLevels.Medium...)
|
||||
payloads[name] = append(payloads[name].([]interface{}), payloadsLevels.High...)
|
||||
default:
|
||||
return nil, errors.Errorf("invalid aggression level %s specified for %s", agression, name)
|
||||
return nil, errors.Errorf("invalid aggression level %s specified for %s", aggression, name)
|
||||
}
|
||||
return payloads, nil
|
||||
}
|
||||
|
||||
@@ -33,8 +33,8 @@ import (
|
||||
var (
|
||||
errinvalidArguments = errkit.New("invalid arguments provided")
|
||||
ErrLFAccessDenied = errkit.New("Use -allow-local-file-access flag to enable local file access")
|
||||
// ErrActionExecDealine is the error returned when alloted time for action execution exceeds
|
||||
ErrActionExecDealine = errkit.New("headless action execution deadline exceeded").SetKind(errkit.ErrKindDeadline).Build()
|
||||
// ErrActionExecDeadline is the error returned when allotted time for action execution exceeds
|
||||
ErrActionExecDeadline = errkit.New("headless action execution deadline exceeded").SetKind(errkit.ErrKindDeadline).Build()
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -824,7 +824,7 @@ func (p *Page) WaitEvent(act *Action, out ActionData) (func() error, error) {
|
||||
// Just wait the event to happen
|
||||
waitFunc := func() (err error) {
|
||||
// execute actual wait event
|
||||
ctx, cancel := context.WithTimeoutCause(context.Background(), maxDuration, ErrActionExecDealine)
|
||||
ctx, cancel := context.WithTimeoutCause(context.Background(), maxDuration, ErrActionExecDeadline)
|
||||
defer cancel()
|
||||
|
||||
err = contextutil.ExecFunc(ctx, p.page.WaitEvent(waitEvent))
|
||||
|
||||
@@ -645,7 +645,7 @@ func testHeadless(t *testing.T, actions []*Action, timeout time.Duration, handle
|
||||
t.Helper()
|
||||
|
||||
lfa := envutil.GetEnvOrDefault("LOCAL_FILE_ACCESS", true)
|
||||
rna := envutil.GetEnvOrDefault("RESTRICED_LOCAL_NETWORK_ACCESS", false)
|
||||
rna := envutil.GetEnvOrDefault("RESTRICTED_LOCAL_NETWORK_ACCESS", false)
|
||||
|
||||
opts := &types.Options{AllowLocalFileAccess: lfa, RestrictLocalNetworkAccess: rna}
|
||||
|
||||
@@ -736,9 +736,9 @@ func TestBlockedHeadlessURLS(t *testing.T) {
|
||||
"fTP://example.com:21\r\n",
|
||||
"ftp://example.com:21",
|
||||
"chrome://settings",
|
||||
" chROme://version",
|
||||
" chRSome://version",
|
||||
"chrome-extension://version\r",
|
||||
" chrOme-EXTension://settings",
|
||||
" chrSome-EXTension://settings",
|
||||
"view-source:file:/etc/hosts",
|
||||
}
|
||||
|
||||
|
||||
@@ -59,9 +59,9 @@ const (
|
||||
|
||||
var (
|
||||
MaxBodyRead = 10 * unitutils.Mega
|
||||
// ErrMissingVars is error occured when variables are missing
|
||||
// ErrMissingVars is error occurred when variables are missing
|
||||
ErrMissingVars = errkit.New("stop execution due to unresolved variables").SetKind(nucleierr.ErrTemplateLogic).Build()
|
||||
// ErrHttpEngineRequestDeadline is error occured when request deadline set by http request engine is exceeded
|
||||
// ErrHttpEngineRequestDeadline is error occurred when request deadline set by http request engine is exceeded
|
||||
ErrHttpEngineRequestDeadline = errkit.New("http request engine deadline exceeded").SetKind(errkit.ErrKindDeadline).Build()
|
||||
)
|
||||
|
||||
@@ -150,7 +150,7 @@ func (request *Request) executeRaceRequest(input *contextargs.Context, previous
|
||||
|
||||
// look for unresponsive hosts and cancel inflight requests as well
|
||||
spmHandler.SetOnResultCallback(func(err error) {
|
||||
// marks thsi host as unresponsive if applicable
|
||||
// marks this host as unresponsive if applicable
|
||||
request.markHostError(input, err)
|
||||
if request.isUnresponsiveAddress(input) {
|
||||
// stop all inflight requests
|
||||
@@ -203,7 +203,7 @@ func (request *Request) executeParallelHTTP(input *contextargs.Context, dynamicV
|
||||
}
|
||||
|
||||
// Stop-at-first-match logic while executing requests
|
||||
// parallely using threads
|
||||
// parallelly using threads
|
||||
shouldStop := (request.options.Options.StopAtFirstMatch || request.StopAtFirstMatch || request.options.StopAtFirstMatch)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -232,7 +232,7 @@ func (request *Request) executeParallelHTTP(input *contextargs.Context, dynamicV
|
||||
|
||||
// look for unresponsive hosts and cancel inflight requests as well
|
||||
spmHandler.SetOnResultCallback(func(err error) {
|
||||
// marks thsi host as unresponsive if applicable
|
||||
// marks this host as unresponsive if applicable
|
||||
request.markHostError(input, err)
|
||||
if request.isUnresponsiveAddress(input) {
|
||||
// stop all inflight requests
|
||||
@@ -390,7 +390,7 @@ func (request *Request) executeTurboHTTP(input *contextargs.Context, dynamicValu
|
||||
maxWorkers := max(pipeOptions.MaxPendingRequests, defaultMaxWorkers)
|
||||
|
||||
// Stop-at-first-match logic while executing requests
|
||||
// parallely using threads
|
||||
// parallelly using threads
|
||||
shouldStop := (request.options.Options.StopAtFirstMatch || request.StopAtFirstMatch || request.options.StopAtFirstMatch)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -419,7 +419,7 @@ func (request *Request) executeTurboHTTP(input *contextargs.Context, dynamicValu
|
||||
|
||||
// look for unresponsive hosts and cancel inflight requests as well
|
||||
spmHandler.SetOnResultCallback(func(err error) {
|
||||
// marks thsi host as unresponsive if applicable
|
||||
// marks this host as unresponsive if applicable
|
||||
request.markHostError(input, err)
|
||||
if request.isUnresponsiveAddress(input) {
|
||||
// stop all inflight requests
|
||||
@@ -953,7 +953,7 @@ func (request *Request) executeRequest(input *contextargs.Context, generatedRequ
|
||||
}
|
||||
})
|
||||
|
||||
// evaluate responses continiously until first redirect request in reverse order
|
||||
// evaluate responses continuously until first redirect request in reverse order
|
||||
for respChain.Has() {
|
||||
// fill buffers, read response body and reuse connection
|
||||
if err := respChain.Fill(); err != nil {
|
||||
|
||||
@@ -31,12 +31,12 @@ var (
|
||||
reOnceAnnotation = regexp.MustCompile(`(?m)^@once\s*$`)
|
||||
|
||||
// ErrTimeoutAnnotationDeadline is the error returned when a specific amount of time was exceeded for a request
|
||||
// which was alloted using @timeout annotation this usually means that vulnerability was not found
|
||||
// which was allotted using @timeout annotation this usually means that vulnerability was not found
|
||||
// in rare case it could also happen due to network congestion
|
||||
// the assigned class is TemplateLogic since this in almost every case means that server is not vulnerable
|
||||
ErrTimeoutAnnotationDeadline = errkit.New("timeout annotation deadline exceeded").SetKind(nucleierr.ErrTemplateLogic).Build()
|
||||
// ErrRequestTimeoutDeadline is the error returned when a specific amount of time was exceeded for a request
|
||||
// this happens when the request execution exceeds alloted time
|
||||
// this happens when the request execution exceeds allotted time
|
||||
ErrRequestTimeoutDeadline = errkit.New("request timeout deadline exceeded when notimeout is set").SetKind(errkit.ErrKindDeadline).Build()
|
||||
)
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ type Request struct {
|
||||
// Inputs contains inputs for the network socket
|
||||
Inputs []*Input `yaml:"inputs,omitempty" json:"inputs,omitempty" jsonschema:"title=inputs for the network request,description=Inputs contains any input/output for the current request"`
|
||||
// description: |
|
||||
// Port is the port to send network requests to. this acts as default port but is overriden if target/input contains
|
||||
// Port is the port to send network requests to. this acts as default port but is overridden if target/input contains
|
||||
// non-http(s) ports like 80,8080,8081 etc
|
||||
Port string `yaml:"port,omitempty" json:"port,omitempty" jsonschema:"title=port to send requests to,description=Port to send network requests to,oneof_type=string;integer"`
|
||||
|
||||
|
||||
@@ -336,7 +336,7 @@ type Request interface {
|
||||
type OutputEventCallback func(result *output.InternalWrappedEvent)
|
||||
|
||||
func MakeDefaultResultEvent(request Request, wrapped *output.InternalWrappedEvent) []*output.ResultEvent {
|
||||
// Note: operator result is generated if something was succesfull match/extract/dynamic-extract
|
||||
// Note: operator result is generated if something was successful match/extract/dynamic-extract
|
||||
// but results should not be generated if
|
||||
// 1. no match was found and some dynamic values were extracted
|
||||
// 2. if something was extracted (matchers exist but no match was found)
|
||||
|
||||
@@ -41,7 +41,7 @@ func UnmarshalGraphQL(data []byte, v any) error {
|
||||
}
|
||||
}
|
||||
|
||||
// decoder is a JSON decoder that performs custom unmarshaling behavior
|
||||
// decoder is a JSON decoder that performs custom unmarshalling behavior
|
||||
// for GraphQL query data structures. It's implemented on top of a JSON tokenizer.
|
||||
type decoder struct {
|
||||
tokenizer interface {
|
||||
@@ -54,7 +54,7 @@ type decoder struct {
|
||||
// Stacks of values where to unmarshal.
|
||||
// The top of each stack is the reflect.Value where to unmarshal next JSON value.
|
||||
//
|
||||
// The reason there's more than one stack is because we might be unmarshaling
|
||||
// The reason there's more than one stack is because we might be unmarshalling
|
||||
// a single JSON value into multiple GraphQL fragments or embedded structs, so
|
||||
// we keep track of them all.
|
||||
vs [][]reflect.Value
|
||||
|
||||
@@ -65,7 +65,7 @@ func (s *ScanContext) GenerateResult() []*output.ResultEvent {
|
||||
return s.results
|
||||
}
|
||||
|
||||
// LogEvent logs events to all events and triggeres any callbacks
|
||||
// LogEvent logs events to all events and triggers any callbacks
|
||||
func (s *ScanContext) LogEvent(e *output.InternalWrappedEvent) {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
@@ -88,7 +88,7 @@ func (s *ScanContext) LogEvent(e *output.InternalWrappedEvent) {
|
||||
s.results = append(s.results, e.Results...)
|
||||
}
|
||||
|
||||
// LogError logs error to all events and triggeres any callbacks
|
||||
// LogError logs error to all events and triggers any callbacks
|
||||
func (s *ScanContext) LogError(err error) {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
var (
|
||||
ErrCreateTemplateExecutor = errors.New("cannot create template executer")
|
||||
ErrIncompatibleWithOfflineMatching = errors.New("template can't be used for offline matching")
|
||||
// track how many templates are verfied and by which signer
|
||||
// track how many templates are verified and by which signer
|
||||
SignatureStats = map[string]*atomic.Uint64{}
|
||||
)
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ func UseOptionsForSigner(opts *types.Options) {
|
||||
// New Signer/Verification logic requires it to load content of file references
|
||||
// and this is done respecting sandbox restrictions to avoid any security issues
|
||||
// AllowLocalFileAccess is a function that allows local file access by disabling sandbox restrictions
|
||||
// and **MUST** be called before signing / verifying any templates for intialization
|
||||
// and **MUST** be called before signing / verifying any templates for initialization
|
||||
func TemplateSignerLFA() {
|
||||
defaultOpts.AllowLocalFileAccess = true
|
||||
}
|
||||
|
||||
@@ -1402,7 +1402,7 @@ func init() {
|
||||
NETWORKRequestDoc.Fields[6].Name = "port"
|
||||
NETWORKRequestDoc.Fields[6].Type = "string"
|
||||
NETWORKRequestDoc.Fields[6].Note = ""
|
||||
NETWORKRequestDoc.Fields[6].Description = "description: |\n Port is the port to send network requests to. this acts as default port but is overriden if target/input contains\n non-http(s) ports like 80,8080,8081 etc"
|
||||
NETWORKRequestDoc.Fields[6].Description = "description: |\n Port is the port to send network requests to. this acts as default port but is overridden if target/input contains\n non-http(s) ports like 80,8080,8081 etc"
|
||||
NETWORKRequestDoc.Fields[6].Comments[encoder.LineComment] = " description: |"
|
||||
NETWORKRequestDoc.Fields[7].Name = "exclude-ports"
|
||||
NETWORKRequestDoc.Fields[7].Type = "string"
|
||||
@@ -2107,8 +2107,8 @@ func init() {
|
||||
JAVASCRIPTRequestDoc.Fields[7].Name = "threads"
|
||||
JAVASCRIPTRequestDoc.Fields[7].Type = "int"
|
||||
JAVASCRIPTRequestDoc.Fields[7].Note = ""
|
||||
JAVASCRIPTRequestDoc.Fields[7].Description = "Payload concurreny i.e threads for sending requests."
|
||||
JAVASCRIPTRequestDoc.Fields[7].Comments[encoder.LineComment] = "Payload concurreny i.e threads for sending requests."
|
||||
JAVASCRIPTRequestDoc.Fields[7].Description = "Payload concurrency i.e threads for sending requests."
|
||||
JAVASCRIPTRequestDoc.Fields[7].Comments[encoder.LineComment] = "Payload concurrency i.e threads for sending requests."
|
||||
|
||||
JAVASCRIPTRequestDoc.Fields[7].AddExample("Send requests using 10 concurrent threads", 10)
|
||||
JAVASCRIPTRequestDoc.Fields[8].Name = "payloads"
|
||||
|
||||
@@ -104,7 +104,7 @@ func parseWorkflowTemplate(workflow *workflows.WorkflowTemplate, preprocessor Pr
|
||||
}
|
||||
continue
|
||||
} else if !template.Verified {
|
||||
// unverfied code templates are not allowed in workflows
|
||||
// unverified code templates are not allowed in workflows
|
||||
gologger.Warning().Msgf("skipping unverified code template from workflow: %v\n", path)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# tmplexec
|
||||
|
||||
tmplexec also known as template executer executes template it is different from `protocols` package which only contains logic within the scope of one protocol. tmplexec is resposible for executing `Template` with defined logic. with introduction of `multi protocol` and `flow` templates (deprecated package protocols/common/executer) did not seem appropriate/helpful anymore as it is outside of protocol scope and deals with execution of template which can contain 1 requests , or multiple requests of same protocol or multiple requests of different protocols. tmplexec is responsible for executing template and handling all logic related to it.
|
||||
tmplexec also known as template executer executes template it is different from `protocols` package which only contains logic within the scope of one protocol. tmplexec is responsible for executing `Template` with defined logic. with introduction of `multi protocol` and `flow` templates (deprecated package protocols/common/executer) did not seem appropriate/helpful anymore as it is outside of protocol scope and deals with execution of template which can contain 1 requests , or multiple requests of same protocol or multiple requests of different protocols. tmplexec is responsible for executing template and handling all logic related to it.
|
||||
|
||||
## Engine/Backends
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ To better understand orchestration we can try to build a template for vhost enum
|
||||
|
||||
**for basic vhost enumeration a template should**
|
||||
- do a PTR lookup for given ip
|
||||
- get SSL ceritificate for given ip (i.e tls-grab)
|
||||
- get SSL certificate for given ip (i.e tls-grab)
|
||||
- extract subject_cn from certificate
|
||||
- extract subject_alt_names(SAN) from certificate
|
||||
- filter out wildcard prefix from above values
|
||||
@@ -316,4 +316,4 @@ This section contains a brief description of all nuclei JS bindings and their us
|
||||
And that's it , this automatically converts any slice/array to map and removes duplicates from it and returns a slice/array of unique values
|
||||
|
||||
------
|
||||
> Similar to DSL helper functions . we can either use built in functions available with `Javscript (ECMAScript 5.1)` or use DSL helper functions and its upto user to decide which one to uses
|
||||
> Similar to DSL helper functions . we can either use built in functions available with `Javascript (ECMAScript 5.1)` or use DSL helper functions and its upto user to decide which one to uses
|
||||
@@ -66,7 +66,7 @@ func (m *MultiProtocol) ExecuteWithResults(ctx *scan.ScanContext) error {
|
||||
|
||||
// template context: contains values extracted using `internal` extractor from previous protocols
|
||||
// these values are extracted from each protocol in queue and are passed to next protocol in queue
|
||||
// instead of adding seperator field to handle such cases these values are appended to `dynamicValues` (which are meant to be used in workflows)
|
||||
// instead of adding separator field to handle such cases these values are appended to `dynamicValues` (which are meant to be used in workflows)
|
||||
// this makes it possible to use multi protocol templates in workflows
|
||||
// Note: internal extractor values take precedence over dynamicValues from workflows (i.e other templates in workflow)
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTemplateLogic are errors that occured due to missing variable or something similar in template logic
|
||||
// ErrTemplateLogic are errors that occurred due to missing variable or something similar in template logic
|
||||
// so this is more of a virtual error that is expected due to template logic
|
||||
ErrTemplateLogic = errkit.NewPrimitiveErrKind("TemplateLogic", "Error expected due to template logic", isTemplateLogicKind)
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
// implementation.
|
||||
//
|
||||
// This package acts as a wrapper around the underlying JSON APIs, offering
|
||||
// standard operations such as marshaling, unmarshaling, and working with JSON
|
||||
// standard operations such as marshaling, unmarshalling, and working with JSON
|
||||
// encoders/decoders. It maintains compatibility with the standard encoding/json
|
||||
// interfaces while delivering improved performance when possible.
|
||||
//
|
||||
|
||||
Reference in New Issue
Block a user