Skip to content

Commit

Permalink
GOCBC-428: Make search parse errors as object or array
Browse files Browse the repository at this point in the history
Motivation
----------
Errors in search response are object or array, not just array.

Changes
-------
Make search parse the errors as an interface and then type assert
them into the correct format, still returning a []string.

Change-Id: Iecfd553eeeddc7f525001688b18ca97e9eb1d19b
Reviewed-on: http://review.couchbase.org/107797
Reviewed-by: Brett Lawson <brett19@gmail.com>
Tested-by: Charles Dixon <chvckd@gmail.com>
  • Loading branch information
chvck committed Apr 16, 2019
1 parent 4a9cd52 commit 51c89c2
Show file tree
Hide file tree
Showing 3 changed files with 117 additions and 40 deletions.
77 changes: 37 additions & 40 deletions cluster_searchquery.go
Expand Up @@ -79,6 +79,15 @@ type searchResultStatus struct {
Errors []string `json:"errors,omitempty"`
}

// The response from the server can contain errors as either array or object so we use this as an intermediary
// between response and result.
type searchResponseStatus struct {
Total int `json:"total,omitempty"`
Failed int `json:"failed,omitempty"`
Successful int `json:"successful,omitempty"`
Errors interface{} `json:"errors,omitempty"`
}

// SearchResultsMetadata provides access to the metadata properties of a search query result.
type SearchResultsMetadata struct {
status SearchResultStatus
Expand Down Expand Up @@ -240,7 +249,7 @@ func (r *SearchResults) readAttribute(decoder *json.Decoder, t json.Token) (bool
return false, nil
}

var status searchResultStatus
var status searchResponseStatus
err := decoder.Decode(&status)
if err != nil {
return false, err
Expand All @@ -250,12 +259,27 @@ func (r *SearchResults) readAttribute(decoder *json.Decoder, t json.Token) (bool
r.metadata.status.Successful = status.Successful
r.metadata.status.Failed = status.Failed

if len(status.Errors) > 0 {
errs := make([]SearchError, len(status.Errors))
for _, err := range status.Errors {
errs = append(errs, searchError{
var statusErrors []string
if statusError, ok := status.Errors.([]string); ok {
statusErrors = statusError
} else if statusError, ok := status.Errors.(map[string]interface{}); ok {
for k, v := range statusError {
msg, ok := v.(string)
if !ok {
return false, errors.New("could not parse errors")
}
statusErrors = append(statusErrors, fmt.Sprintf("%s-%s", k, msg))
}
} else {
return false, errors.New("could not parse errors")
}

if len(statusErrors) > 0 {
errs := make([]SearchError, len(statusErrors))
for i, err := range statusErrors {
errs[i] = searchError{
message: err,
})
}
}
r.err = searchMultiError{
errors: errs,
Expand Down Expand Up @@ -283,46 +307,19 @@ func (r *SearchResults) readAttribute(decoder *json.Decoder, t json.Token) (bool
if err != nil {
return false, err
}
case "errors":
var respErrs []searchError
err := decoder.Decode(&respErrs)
if err != nil {
return false, err
}
if len(respErrs) > 0 {
errs := make([]SearchError, len(respErrs))
for i, e := range respErrs {
errs[i] = e
}
// this isn't an error that we want to bail on so store it and keep going
r.err = searchMultiError{
errors: errs,
endpoint: r.metadata.sourceAddr,
httpStatus: r.httpStatus,
}
}
case "error":
var sErr string
err := decoder.Decode(&sErr)
if err != nil {
return false, err
}
r.err = searchMultiError{
errors: []SearchError{
searchError{
message: sErr,
},
},
endpoint: r.metadata.sourceAddr,
httpStatus: r.httpStatus,
}
case "hits":
// read the opening [, this prevents the decoder from loading the entire results array into memory
t, err := decoder.Token()
if err != nil {
return false, err
}
if delim, ok := t.(json.Delim); !ok || delim != '[' {
delim, ok := t.(json.Delim)
if !ok {
// hits can be null
return false, nil
}

if delim != '[' {
return false, errors.New("expected results opening token to be [ but was " + string(delim))
}

Expand Down
42 changes: 42 additions & 0 deletions cluster_searchquery_test.go
@@ -1,6 +1,7 @@
package gocb

import (
"bytes"
"encoding/json"
"fmt"
"testing"
Expand Down Expand Up @@ -246,3 +247,44 @@ func TestSearchQueryRetries(t *testing.T) {
t.Fatalf("Expected query to be retried 3 time but ws retried %d times", retries)
}
}

func TestSearchQueryObjectError(t *testing.T) {
q := SearchQuery{
Name: "test",
Query: NewMatchQuery("test"),
}
timeout := 60 * time.Second

dataBytes, err := loadRawTestDataset("searchquery_timeout")
if err != nil {
t.Fatalf("Could not read test dataset: %v", err)
}

doHTTP := func(req *gocbcore.HttpRequest) (*gocbcore.HttpResponse, error) {
return &gocbcore.HttpResponse{
Endpoint: "http://localhost:8093",
StatusCode: 200,
Body: &testReadCloser{bytes.NewBuffer(dataBytes), nil},
}, nil
}

provider := &mockHTTPProvider{
doFn: doHTTP,
}

cluster := testGetClusterForHTTP(provider, timeout, 0, 0)
cluster.sb.SearchRetryBehavior = StandardDelayRetryBehavior(3, 1, 100*time.Millisecond, LinearDelayFunction)

_, err = cluster.SearchQuery(q, nil)
if err == nil {
t.Fatal("Expected query execution to error")
}

if searchErr, ok := err.(SearchErrors); ok {
if len(searchErr.Errors()) != 6 {
t.Fatalf("Expected length of search errors to be 6 but was %d", len(searchErr.Errors()))
}
} else {
t.Fatalf("Expected error to be SearchErrors but was %v", err)
}
}
38 changes: 38 additions & 0 deletions testdata/searchquery_timeout.json
@@ -0,0 +1,38 @@
{
"status": {
"total": 6,
"failed": 6,
"successful": 0,
"errors": {
"travel_a464a32f957f35f1_13aa53f3": "context deadline exceeded",
"travel_a464a32f957f35f1_18572d87": "context deadline exceeded",
"travel_a464a32f957f35f1_54820232": "context deadline exceeded",
"travel_a464a32f957f35f1_6ddbfb54": "context deadline exceeded",
"travel_a464a32f957f35f1_aa574717": "context deadline exceeded",
"travel_a464a32f957f35f1_f4e0a48a": "context deadline exceeded"
}
},
"request": {
"query": {
"boost": null,
"match_all": {}
},
"size": 1000,
"from": 0,
"highlight": null,
"fields": [
"*"
],
"facets": null,
"explain": false,
"sort": [
"-_score"
],
"includeLocations": false
},
"hits": null,
"total_hits": 0,
"max_score": 0,
"took": 32464205,
"facets": null
}

0 comments on commit 51c89c2

Please sign in to comment.