Skip to content

Commit

Permalink
fix(orm): fix edge case exporting json (#11134)
Browse files Browse the repository at this point in the history
* fix(orm): fix edge case exporting json

* add import json test
  • Loading branch information
aaronc committed Feb 7, 2022
1 parent 1944a08 commit ebddeee
Show file tree
Hide file tree
Showing 5 changed files with 37 additions and 9 deletions.
9 changes: 3 additions & 6 deletions orm/model/ormtable/auto_increment.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,9 @@ func (t autoIncrementTable) ExportJSON(ctx context.Context, writer io.Writer) er
return err
}

start := true
if seq != 0 {
start = false
bz, err := json.Marshal(seq)
if err != nil {
return err
Expand All @@ -222,14 +224,9 @@ func (t autoIncrementTable) ExportJSON(ctx context.Context, writer io.Writer) er
if err != nil {
return err
}

_, err = writer.Write([]byte(",\n"))
if err != nil {
return err
}
}

return t.doExportJSON(ctx, writer)
return t.doExportJSON(ctx, writer, start)
}

func (t *autoIncrementTable) GetTable(message proto.Message) Table {
Expand Down
13 changes: 13 additions & 0 deletions orm/model/ormtable/auto_increment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,19 @@ func runAutoIncrementScenario(t *testing.T, table ormtable.AutoIncrementTable, c
store2 := ormtable.WrapContextDefault(testkv.NewSplitMemBackend())
assert.NilError(t, table.ImportJSON(store2, bytes.NewReader(buf.Bytes())))
assertTablesEqual(t, table, ctx, store2)

// test edge case where we have deleted all entities but we're still exporting the sequence number
assert.NilError(t, table.Delete(ctx, ex1))
assert.NilError(t, table.Delete(ctx, ex2))
buf = &bytes.Buffer{}
assert.NilError(t, table.ExportJSON(ctx, buf))
assert.NilError(t, table.ValidateJSON(bytes.NewReader(buf.Bytes())))
golden.Assert(t, buf.String(), "trivial_auto_inc_export.golden")
store3 := ormtable.WrapContextDefault(testkv.NewSplitMemBackend())
assert.NilError(t, table.ImportJSON(store3, bytes.NewReader(buf.Bytes())))
ex1.Id = 0
assert.NilError(t, table.Insert(store3, ex1))
assert.Equal(t, uint64(3), ex1.Id) // should equal 3 because the sequence number 2 should have been imported from JSON
}

func TestBadJSON(t *testing.T) {
Expand Down
5 changes: 2 additions & 3 deletions orm/model/ormtable/table_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,18 +293,17 @@ func (t tableImpl) ExportJSON(context context.Context, writer io.Writer) error {
return err
}

return t.doExportJSON(context, writer)
return t.doExportJSON(context, writer, true)
}

func (t tableImpl) doExportJSON(ctx context.Context, writer io.Writer) error {
func (t tableImpl) doExportJSON(ctx context.Context, writer io.Writer, start bool) error {
marshalOptions := protojson.MarshalOptions{
UseProtoNames: true,
Resolver: t.typeResolver,
}

var err error
it, _ := t.List(ctx, nil)
start := true
for {
found := it.Next()

Expand Down
18 changes: 18 additions & 0 deletions orm/model/ormtable/testdata/test_auto_inc.golden
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,21 @@ ITERATOR 0300 -> 0301
PK testpb.ExampleAutoIncrementTable 2 -> {"id":2,"x":"bar","y":10}
NEXT
VALID false
GET 03000001 1203666f6f1805
PK testpb.ExampleAutoIncrementTable 1 -> {"id":1,"x":"foo","y":5}
ORM DELETE testpb.ExampleAutoIncrementTable {"id":1,"x":"foo","y":5}
DEL 03000001
DEL PK testpb.ExampleAutoIncrementTable 1 -> {"id":1}
DEL 0301666f6f
DEL ERR:EOF
GET 03000002 1203626172180a
PK testpb.ExampleAutoIncrementTable 2 -> {"id":2,"x":"bar","y":10}
ORM DELETE testpb.ExampleAutoIncrementTable {"id":2,"x":"bar","y":10}
DEL 03000002
DEL PK testpb.ExampleAutoIncrementTable 2 -> {"id":2}
DEL 0301626172
DEL ERR:EOF
GET 03808002 02
SEQ testpb.ExampleAutoIncrementTable 2
ITERATOR 0300 -> 0301
VALID false
1 change: 1 addition & 0 deletions orm/model/ormtable/testdata/trivial_auto_inc_export.golden
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
[2]

0 comments on commit ebddeee

Please sign in to comment.