Skip to content

Commit

Permalink
json: properly report capacity of encoded jsons
Browse files Browse the repository at this point in the history
Previously, jsonEncoded structs reported their memory size as including
the underlying capacity of their encoded byte slices. This was
incorrect, as usually that underlying slice is an entire kv batch. This
problem led to significant overcounting of the memory used by queries
that read JSON data from disk, which resulted in spurious
BudgetExceededErrors.

Release note (bug fix): prevent spurious BudgetExceededErrors for some
queries that read a lot of JSON data from disk.

Co-authored-by: Alfonso Subiotto Marqués <alfonso@cockroachlabs.com>
  • Loading branch information
jordanlewis and asubiotto committed May 18, 2018
1 parent 793f4a5 commit 23280f4
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 1 deletion.
6 changes: 5 additions & 1 deletion pkg/util/json/encoded.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,11 @@ func newEncodedFromRoot(v []byte) (*jsonEncoded, error) {
return &jsonEncoded{
typ: typ,
containerLen: containerLen,
value: v,
// Manually set the capacity of the new slice to its length, so we properly
// report the memory size of this encoded json object. The original slice
// capacity is very large, since it probably points to the backing byte
// slice of a kv batch.
value: v[:len(v):len(v)],
}, nil
}

Expand Down
26 changes: 26 additions & 0 deletions pkg/util/json/json_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,7 @@ func TestJSONSize(t *testing.T) {
{`{"":{}}`, sliceHeaderSize + keyValuePairSize + sliceHeaderSize},
{`{"a":"b"}`, sliceHeaderSize + keyValuePairSize + 1 + stringHeaderSize + 1},
}
largeBuf := make([]byte, 0, 10240)
for _, tc := range testCases {
t.Run(tc.input, func(t *testing.T) {
j, err := ParseJSON(tc.input)
Expand All @@ -313,6 +314,31 @@ func TestJSONSize(t *testing.T) {
if j.Size() != tc.size {
t.Fatalf("expected %v to have size %d, but had size %d", j, tc.size, j.Size())
}

t.Run("jsonEncodedSize", func(t *testing.T) {
largeBuf = largeBuf[:0]
var buf []byte
var err error

if buf, err = EncodeJSON(buf, j); err != nil {
t.Fatal(err)
}
if largeBuf, err = EncodeJSON(largeBuf, j); err != nil {
t.Fatal(err)
}

encoded, err := newEncodedFromRoot(buf)
if err != nil {
t.Fatal(err)
}
encodedFromLargeBuf, err := newEncodedFromRoot(largeBuf)
if err != nil {
t.Fatal(err)
}
if encodedFromLargeBuf.Size() != encoded.Size() {
t.Errorf("expected jsonEncoded on a large buf for %v to have size %d, found %d", j, encoded.Size(), encodedFromLargeBuf.Size())
}
})
})
}
}
Expand Down

0 comments on commit 23280f4

Please sign in to comment.