Skip to content

Commit

Permalink
Merge pull request #371 from projectdiscovery/feature-race-conditions
Browse files Browse the repository at this point in the history
Synced Race Condition Attack
  • Loading branch information
Mzack9999 committed Oct 23, 2020
2 parents 7728335 + cb5ceed commit bf54f9f
Show file tree
Hide file tree
Showing 3 changed files with 165 additions and 7 deletions.
57 changes: 51 additions & 6 deletions v2/pkg/executer/executer_http.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,48 @@ func NewHTTPExecuter(options *HTTPOptions) (*HTTPExecuter, error) {
return executer, nil
}

func (e *HTTPExecuter) ExecuteRaceRequest(reqURL string) *Result {
result := &Result{
Matches: make(map[string]interface{}),
Extractions: make(map[string]interface{}),
}

dynamicvalues := make(map[string]interface{})

// verify if the URL is already being processed
if e.bulkHTTPRequest.HasGenerator(reqURL) {
return result
}

e.bulkHTTPRequest.CreateGenerator(reqURL)

// Workers that keeps enqueuing new requests
maxWorkers := e.bulkHTTPRequest.RaceNumberRequests
swg := sizedwaitgroup.New(maxWorkers)
for i := 0; i < e.bulkHTTPRequest.RaceNumberRequests; i++ {
swg.Add()
// base request
request, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
if err != nil {
result.Error = err
return result
}
go func(httpRequest *requests.HTTPRequest) {
defer swg.Done()

// If the request was built correctly then execute it
err = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, "")
if err != nil {
result.Error = errors.Wrap(err, "could not handle http request")
}
}(request)
}

swg.Wait()

return result
}

func (e *HTTPExecuter) ExecuteParallelHTTP(p progress.IProgress, reqURL string) *Result {
result := &Result{
Matches: make(map[string]interface{}),
Expand Down Expand Up @@ -196,7 +238,7 @@ func (e *HTTPExecuter) ExecuteParallelHTTP(p progress.IProgress, reqURL string)
return result
}

func (e *HTTPExecuter) ExecuteTurboHTTP(p progress.IProgress, reqURL string) *Result {
func (e *HTTPExecuter) ExecuteTurboHTTP(reqURL string) *Result {
result := &Result{
Matches: make(map[string]interface{}),
Extractions: make(map[string]interface{}),
Expand All @@ -209,7 +251,6 @@ func (e *HTTPExecuter) ExecuteTurboHTTP(p progress.IProgress, reqURL string) *Re
return result
}

remaining := e.bulkHTTPRequest.GetRequestCount()
e.bulkHTTPRequest.CreateGenerator(reqURL)

// need to extract the target from the url
Expand Down Expand Up @@ -240,7 +281,6 @@ func (e *HTTPExecuter) ExecuteTurboHTTP(p progress.IProgress, reqURL string) *Re
request, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))
if err != nil {
result.Error = err
p.Drop(remaining)
} else {
swg.Add()
go func(httpRequest *requests.HTTPRequest) {
Expand All @@ -254,7 +294,6 @@ func (e *HTTPExecuter) ExecuteTurboHTTP(p progress.IProgress, reqURL string) *Re
if err != nil {
e.traceLog.Request(e.template.ID, reqURL, "http", err)
result.Error = errors.Wrap(err, "could not handle http request")
p.Drop(remaining)
} else {
e.traceLog.Request(e.template.ID, reqURL, "http", nil)
}
Expand All @@ -274,9 +313,15 @@ func (e *HTTPExecuter) ExecuteTurboHTTP(p progress.IProgress, reqURL string) *Re
func (e *HTTPExecuter) ExecuteHTTP(p progress.IProgress, reqURL string) *Result {
// verify if pipeline was requested
if e.bulkHTTPRequest.Pipeline {
return e.ExecuteTurboHTTP(p, reqURL)
return e.ExecuteTurboHTTP(reqURL)
}

// verify if a basic race condition was requested
if e.bulkHTTPRequest.Race && e.bulkHTTPRequest.RaceNumberRequests > 0 {
return e.ExecuteRaceRequest(reqURL)
}

// verify if parallel elaboration was requested
if e.bulkHTTPRequest.Threads > 0 {
return e.ExecuteParallelHTTP(p, reqURL)
}
Expand Down Expand Up @@ -602,7 +647,7 @@ func makeCheckRedirectFunc(followRedirects bool, maxRedirects int) checkRedirect
func (e *HTTPExecuter) setCustomHeaders(r *requests.HTTPRequest) {
for _, customHeader := range e.customHeaders {
// This should be pre-computed somewhere and done only once
tokens := strings.SplitN(customHeader, ":", 2)
tokens := strings.SplitN(customHeader, ":", two)
// if it's an invalid header skip it
if len(tokens) < two {
continue
Expand Down
18 changes: 17 additions & 1 deletion v2/pkg/requests/bulk-http-request.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,20 @@ import (
"bufio"
"context"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"time"

"github.com/Knetic/govaluate"
"github.com/projectdiscovery/nuclei/v2/pkg/extractors"
"github.com/projectdiscovery/nuclei/v2/pkg/generators"
"github.com/projectdiscovery/nuclei/v2/pkg/matchers"
"github.com/projectdiscovery/nuclei/v2/pkg/syncedreadcloser"
"github.com/projectdiscovery/rawhttp"
retryablehttp "github.com/projectdiscovery/retryablehttp-go"
)
Expand Down Expand Up @@ -77,6 +80,11 @@ type BulkHTTPRequest struct {
DisableAutoHostname bool `yaml:"disable-automatic-host-header,omitempty"`
// DisableAutoContentLength Enable/Disable Content-Length header for unsafe raw requests
DisableAutoContentLength bool `yaml:"disable-automatic-content-length-header,omitempty"`
// Race determines if all the request have to be attempted at the same time
// The minimum number fof requests is determined by threads
Race bool `yaml:"race,omitempty"`
// Number of same request to send in race condition attack
RaceNumberRequests int `yaml:"race_count,omitempty"`
}

// GetMatchersCondition returns the condition for the matcher
Expand Down Expand Up @@ -235,7 +243,15 @@ func (r *BulkHTTPRequest) handleRawWithPaylods(ctx context.Context, raw, baseURL
}

// retryablehttp
req, err := http.NewRequestWithContext(ctx, rawRequest.Method, rawRequest.FullURL, strings.NewReader(rawRequest.Data))
var body io.ReadCloser
body = ioutil.NopCloser(strings.NewReader(rawRequest.Data))
if r.Race {
// More or less this ensures that all requests hit the endpoint at the same approximated time
// Todo: sync internally upon writing latest request byte
body = syncedreadcloser.NewOpenGateWithTimeout(body, time.Duration(two)*time.Second)
}

req, err := http.NewRequestWithContext(ctx, rawRequest.Method, rawRequest.FullURL, body)
if err != nil {
return nil, err
}
Expand Down
97 changes: 97 additions & 0 deletions v2/pkg/syncedreadcloser/syncedreadcloser.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
package syncedreadcloser

import (
"fmt"
"io"
"io/ioutil"
"time"
)

// compatible with ReadSeeker
type SyncedReadCloser struct {
data []byte
p int64
length int64
opengate chan struct{}
enableBlocking bool
}

func New(r io.ReadCloser) *SyncedReadCloser {
var (
s SyncedReadCloser
err error
)
s.data, err = ioutil.ReadAll(r)
if err != nil {
return nil
}
r.Close()
s.length = int64(len(s.data))
s.opengate = make(chan struct{})
s.enableBlocking = true

return &s
}

func NewOpenGateWithTimeout(r io.ReadCloser, d time.Duration) *SyncedReadCloser {
s := New(r)
s.OpenGateAfter(d)

return s
}

func (s *SyncedReadCloser) SetOpenGate(status bool) {
s.enableBlocking = status
}

func (s *SyncedReadCloser) OpenGate() {
s.opengate <- struct{}{}
}

func (s *SyncedReadCloser) OpenGateAfter(d time.Duration) {
time.AfterFunc(d, func() {
s.opengate <- struct{}{}
})
}

func (s *SyncedReadCloser) Seek(offset int64, whence int) (int64, error) {
var err error
switch whence {
case io.SeekStart:
s.p = 0
case io.SeekCurrent:
if s.p+offset < s.length {
s.p += offset
break
}
err = fmt.Errorf("offset is too big")
case io.SeekEnd:
if s.length-offset >= 0 {
s.p = s.length - offset
break
}
err = fmt.Errorf("offset is too big")
}
return s.p, err
}

func (s *SyncedReadCloser) Read(p []byte) (n int, err error) {
// If the data fits in the buffer blocks awaiting the sync instruction
if s.p+int64(len(p)) >= s.length && s.enableBlocking {
<-s.opengate
}
n = copy(p, s.data[s.p:])
s.p += int64(n)
if s.p == s.length {
err = io.EOF
}
return n, err
}

func (s *SyncedReadCloser) Close() error {
return nil
}

func (s *SyncedReadCloser) Len() int {
return int(s.length)
}

0 comments on commit bf54f9f

Please sign in to comment.