From 8c77392a7dc93096c3a0cfde916aad2e08bd5500 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Thu, 6 Jan 2022 08:59:01 -0500 Subject: [PATCH 01/21] Update query syntax for starts/ends with and tweak for consistency Signed-off-by: Peter Broadhurst --- docs/reference/api_query_syntax.md | 62 ++++++-- internal/apiserver/restfilter.go | 136 ++++++++++++++---- internal/apiserver/restfilter_test.go | 70 ++++++++- internal/database/sqlcommon/filter_sql.go | 26 +++- .../database/sqlcommon/filter_sql_test.go | 28 ++++ internal/i18n/en_translations.go | 3 +- pkg/database/filter.go | 105 ++++++++++++-- pkg/database/filter_test.go | 30 +++- 8 files changed, 401 insertions(+), 59 deletions(-) diff --git a/docs/reference/api_query_syntax.md b/docs/reference/api_query_syntax.md index d64f41fd4e..100fdb0025 100644 --- a/docs/reference/api_query_syntax.md +++ b/docs/reference/api_query_syntax.md @@ -20,6 +20,7 @@ nav_order: 1 REST collections provide filter, `skip`, `limit` and `sort` support. - The field in the message is used as the query parameter + - Syntax: `field=[modifiers][operator]match-string` - When multiple query parameters are supplied these are combined with AND - When the same query parameter is supplied multiple times, these are combined with OR @@ -38,15 +39,52 @@ This states: Table of filter operations, which must be the first character of the query string (after the `=` in the above URL path example) -| Operator | Description | -|----------|-----------------------------------| -| (none) | Equal | -| `!` | Not equal | -| `<` | Less than | -| `<=` | Less than or equal | -| `>` | Greater than | -| `>=` | Greater than or equal | -| `@` | Containing - case sensitive | -| `!@` | Not containing - case sensitive | -| `^` | Containing - case insensitive | -| `!^` | Not containing - case insensitive | +### Operators + +Operators are a type of comparison operation to +perform against the match string. + +| Operator | Description | +|----------|------------------------------------| +| `=` | Equal | +| (none) | Equal (shortcut) | +| `@` | Containing | +| `^` | Starts with | +| `$` | Ends with | +| `<<` | Less than | +| `<` | Less than (shortcut) | +| `<=` | Less than or equal | +| `>>` | Greater than | +| `>` | Greater than (shortcut) | +| `>=` | Greater than or equal | + +> Shortcuts are only safe to use when your match +> string starts with `a-z`, `A-Z`, `0-9`, `-` or `_`. + +### Modifiers + +Modifiers can appear before the operator, to change its +behavior. + +| Modifier | Description | +|----------|------------------------------------| +| `!` | Not - negates the match | +| `:` | Case insensitive | + +> Characters `=`,`@`,`$`,`!` and `:` should technically be encoded +> in URLs, but in practice should function fine without encoding. + +## Detailed examples + +| Example | Description | +|--------------|--------------------------------------------| +| `cat` | Equals "cat" | +| `=cat` | Equals "cat" (same) | +| `!=cat` | Not equal to "cat" | +| `:=cat` | Equal to "CAT", "cat", "CaT etc. | +| `!:cat` | Not equal to "CAT", "cat", "CaT etc. | +| `=!cat` | Equal to "!cat" (! is after operator) | +| `^cats/` | Starts with "cats/" | +| `$_cat` | Ends with with "_cat" | +| `!:^cats/` | Does not start with "cats/", "CATs/" etc. | +| `!$-cat` | Does not end with "-cat" | diff --git a/internal/apiserver/restfilter.go b/internal/apiserver/restfilter.go index 1e52e03745..8f5d938f33 100644 --- a/internal/apiserver/restfilter.go +++ b/internal/apiserver/restfilter.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -17,6 +17,7 @@ package apiserver import ( + "context" "net/http" "net/url" "reflect" @@ -75,12 +76,20 @@ func (as *apiServer) buildFilter(req *http.Request, ff database.QueryFactory) (d for _, field := range possibleFields { values := as.getValues(req.Form, field) if len(values) == 1 { - filter.Condition(as.getCondition(fb, field, values[0])) + cond, err := as.getCondition(ctx, fb, field, values[0]) + if err != nil { + return nil, err + } + filter.Condition(cond) } else if len(values) > 0 { sort.Strings(values) fs := make([]database.Filter, len(values)) for i, value := range values { - fs[i] = as.getCondition(fb, field, value) + cond, err := as.getCondition(ctx, fb, field, value) + if err != nil { + return nil, err + } + fs[i] = cond } filter.Condition(fb.Or(fs...)) } @@ -123,27 +132,106 @@ func (as *apiServer) buildFilter(req *http.Request, ff database.QueryFactory) (d return filter, nil } -func (as *apiServer) getCondition(fb database.FilterBuilder, field, value string) database.Filter { - switch { - case strings.HasPrefix(value, ">="): - return fb.Gte(field, value[2:]) - case strings.HasPrefix(value, "<="): - return fb.Lte(field, value[2:]) - case strings.HasPrefix(value, ">"): - return fb.Gt(field, value[1:]) - case strings.HasPrefix(value, "<"): - return fb.Lt(field, value[1:]) - case strings.HasPrefix(value, "@"): - return fb.Contains(field, value[1:]) - case strings.HasPrefix(value, "^"): - return fb.IContains(field, value[1:]) - case strings.HasPrefix(value, "!@"): - return fb.NotContains(field, value[2:]) - case strings.HasPrefix(value, "!^"): - return fb.NotIContains(field, value[2:]) - case strings.HasPrefix(value, "!"): - return fb.Neq(field, value[1:]) +func (as *apiServer) checkNoMods(ctx context.Context, negate, caseInsensitive bool, field, op string, filter database.Filter) (database.Filter, error) { + if negate || caseInsensitive { + return nil, i18n.NewError(ctx, i18n.MsgQueryOpUnsupportedMod, op, field) + } + return filter, nil +} + +func (as *apiServer) getCondition(ctx context.Context, fb database.FilterBuilder, field, value string) (filter database.Filter, err error) { + + negate := false + caseInsensitive := false + operator := make([]rune, 0, 2) + prefixLength := 0 +opFinder: + for _, r := range value { + switch r { + case '!': + negate = true + prefixLength++ + case ':': + caseInsensitive = true + prefixLength++ + case '>', '<': + // Terminates the opFinder if it's the second character + if len(operator) == 1 && operator[0] != r { + // Detected "><" or "<>" - which is a single char operator, followed by beginning of match string + break opFinder + } + operator = append(operator, r) + prefixLength++ + if len(operator) > 1 { + // Detected ">>" or "<<" full operators + break opFinder + } + case '=', '@', '^', '$': + // Always terminates the opFinder + // Could be ">=" or "<=" (due to above logic continuing on '>' or '<' first char) + operator = append(operator, r) + prefixLength++ + break opFinder + default: + // Found a normal character + break opFinder + } + } + + op := string(operator) + matchString := value[prefixLength:] + switch op { + case ">=": + return as.checkNoMods(ctx, negate, caseInsensitive, field, op, fb.Gte(field, matchString)) + case "<=": + return as.checkNoMods(ctx, negate, caseInsensitive, field, op, fb.Lte(field, matchString)) + case ">", ">>": + return as.checkNoMods(ctx, negate, caseInsensitive, field, op, fb.Gt(field, matchString)) + case "<", "<<": + return as.checkNoMods(ctx, negate, caseInsensitive, field, op, fb.Lt(field, matchString)) + case "@": + if caseInsensitive { + if negate { + return fb.NotIContains(field, matchString), nil + } + return fb.IContains(field, matchString), nil + } + if negate { + return fb.NotContains(field, matchString), nil + } + return fb.Contains(field, matchString), nil + case "^": + if caseInsensitive { + if negate { + return fb.NotIStartsWith(field, matchString), nil + } + return fb.IStartsWith(field, matchString), nil + } + if negate { + return fb.NotStartsWith(field, matchString), nil + } + return fb.StartsWith(field, matchString), nil + case "$": + if caseInsensitive { + if negate { + return fb.NotIEndsWith(field, matchString), nil + } + return fb.IEndsWith(field, matchString), nil + } + if negate { + return fb.NotEndsWith(field, matchString), nil + } + return fb.EndsWith(field, matchString), nil default: - return fb.Eq(field, value) + if caseInsensitive { + if negate { + return fb.NIeq(field, matchString), nil + } + return fb.IEq(field, matchString), nil + } + if negate { + return fb.Neq(field, matchString), nil + } + return fb.Eq(field, matchString), nil } } diff --git a/internal/apiserver/restfilter_test.go b/internal/apiserver/restfilter_test.go index 897bbd2f71..3eb0a56fe2 100644 --- a/internal/apiserver/restfilter_test.go +++ b/internal/apiserver/restfilter_test.go @@ -17,6 +17,7 @@ package apiserver import ( + "fmt" "net/http/httptest" "testing" @@ -35,7 +36,74 @@ func TestBuildFilterDescending(t *testing.T) { fi, err := filter.Finalize() assert.NoError(t, err) - assert.Equal(t, "( confirmed != 0 ) && ( created == 0 ) && ( ( tag %! 'abc' ) || ( tag ^! 'abc' ) || ( tag <= 'abc' ) || ( tag < 'abc' ) || ( tag >= 'abc' ) || ( tag > 'abc' ) || ( tag %= 'abc' ) || ( tag ^= 'abc' ) ) sort=-tag,-sequence skip=10 limit=50", fi.String()) + assert.Equal(t, "( confirmed != 0 ) && ( created == 0 ) && ( ( tag !% 'abc' ) || ( tag !^ 'abc' ) || ( tag <= 'abc' ) || ( tag << 'abc' ) || ( tag >= 'abc' ) || ( tag >> 'abc' ) || ( tag %= 'abc' ) || ( tag ^= 'abc' ) ) sort=-tag,-sequence skip=10 limit=50", fi.String()) +} + +func testIndividualFilter(t *testing.T, queryString, expectedToString string) { + as := &apiServer{ + maxFilterLimit: 250, + } + req := httptest.NewRequest("GET", fmt.Sprintf("/things?%s", queryString), nil) + filter, err := as.buildFilter(req, database.MessageQueryFactory) + assert.NoError(t, err) + fi, err := filter.Finalize() + assert.NoError(t, err) + assert.Equal(t, expectedToString, fi.String()) +} + +func TestBuildFilterEachCombo(t *testing.T) { + testIndividualFilter(t, "tag=cat", "( tag == 'cat' )") + testIndividualFilter(t, "tag==cat", "( tag == 'cat' )") + testIndividualFilter(t, "tag===cat", "( tag == '=cat' )") + testIndividualFilter(t, "tag=!cat", "( tag != 'cat' )") + testIndividualFilter(t, "tag=!=cat", "( tag != 'cat' )") + testIndividualFilter(t, "tag=!=!cat", "( tag != '!cat' )") + testIndividualFilter(t, "tag=!==cat", "( tag != '=cat' )") + testIndividualFilter(t, "tag=!:=cat", "( tag ;= 'cat' )") + testIndividualFilter(t, "tag=:!=cat", "( tag ;= 'cat' )") + testIndividualFilter(t, "tag=:=cat", "( tag := 'cat' )") + testIndividualFilter(t, "tag=>cat", "( tag >> 'cat' )") + testIndividualFilter(t, "tag=>>cat", "( tag >> 'cat' )") + testIndividualFilter(t, "tag=>>>cat", "( tag >> '>cat' )") + testIndividualFilter(t, "tag=cat", "( tag << '>cat' )") + testIndividualFilter(t, "tag=>> '=cat", "( tag >= 'cat' )") + testIndividualFilter(t, "tag=<=cat", "( tag <= 'cat' )") + testIndividualFilter(t, "tag=>=>cat", "( tag >= '>cat' )") + testIndividualFilter(t, "tag=>==cat", "( tag >= '=cat' )") + testIndividualFilter(t, "tag=@@cat", "( tag %= '@cat' )") + testIndividualFilter(t, "tag=@cat", "( tag %= 'cat' )") + testIndividualFilter(t, "tag=!@cat", "( tag !% 'cat' )") + testIndividualFilter(t, "tag=:@cat", "( tag :% 'cat' )") + testIndividualFilter(t, "tag=!:@cat", "( tag ;% 'cat' )") + testIndividualFilter(t, "tag=^cat", "( tag ^= 'cat' )") + testIndividualFilter(t, "tag=!^cat", "( tag !^ 'cat' )") + testIndividualFilter(t, "tag=:^cat", "( tag :^ 'cat' )") + testIndividualFilter(t, "tag=!:^cat", "( tag ;^ 'cat' )") + testIndividualFilter(t, "tag=$cat", "( tag $= 'cat' )") + testIndividualFilter(t, "tag=!$cat", "( tag !$ 'cat' )") + testIndividualFilter(t, "tag=:$cat", "( tag :$ 'cat' )") + testIndividualFilter(t, "tag=!:$cat", "( tag ;$ 'cat' )") +} + +func testFailFilter(t *testing.T, queryString, errCode string) { + as := &apiServer{ + maxFilterLimit: 250, + } + req := httptest.NewRequest("GET", fmt.Sprintf("/things?%s", queryString), nil) + _, err := as.buildFilter(req, database.MessageQueryFactory) + assert.Regexp(t, errCode, err) +} + +func TestCheckNoMods(t *testing.T) { + testFailFilter(t, "tag=!>=test", "FF10302") + testFailFilter(t, "tag=:>test", "FF10302") + testFailFilter(t, "tag=!= ? AND mt.created <> ? AND mt.seq > ? AND mt.topics LIKE ? AND mt.topics NOT LIKE ? AND mt.topics ILIKE ? AND mt.topics NOT ILIKE ?) ORDER BY mt.seq DESC", sqlFilter) } +func TestSQLQueryFactoryEvenMoreOps(t *testing.T) { + + s, _ := newMockProvider().init() + fb := database.MessageQueryFactory.NewFilter(context.Background()) + u := fftypes.MustParseUUID("4066ABDC-8BBD-4472-9D29-1A55B467F9B9") + f := fb.And( + fb.IEq("id", u), + fb.NIeq("id", nil), + fb.StartsWith("topics", "abc"), + fb.NotStartsWith("topics", "def"), + fb.IStartsWith("topics", "ghi"), + fb.NotIStartsWith("topics", "jkl"), + fb.EndsWith("topics", "mno"), + fb.NotEndsWith("topics", "pqr"), + fb.IEndsWith("topics", "sty"), + fb.NotIEndsWith("topics", "vwx"), + ). + Descending() + + sel := squirrel.Select("*").From("mytable AS mt") + sel, _, _, err := s.filterSelect(context.Background(), "mt", sel, f, nil, []interface{}{"sequence"}) + assert.NoError(t, err) + + sqlFilter, _, err := sel.ToSql() + assert.NoError(t, err) + assert.Equal(t, "SELECT * FROM mytable AS mt WHERE (mt.id ILIKE ? AND mt.id NOT ILIKE ? AND mt.topics LIKE ? AND mt.topics NOT LIKE ? AND mt.topics ILIKE ? AND mt.topics NOT ILIKE ? AND mt.topics LIKE ? AND mt.topics NOT LIKE ? AND mt.topics ILIKE ? AND mt.topics NOT ILIKE ?) ORDER BY mt.seq DESC", sqlFilter) +} + func TestSQLQueryFactoryFinalizeFail(t *testing.T) { s, _ := newMockProvider().init() fb := database.MessageQueryFactory.NewFilter(context.Background()) diff --git a/internal/i18n/en_translations.go b/internal/i18n/en_translations.go index f8b9c0b2d3..3a9bd67fc6 100644 --- a/internal/i18n/en_translations.go +++ b/internal/i18n/en_translations.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -219,4 +219,5 @@ var ( MsgInvalidChartNumberParam = ffm("FF10299", "Invalid %s. Must be a number.", 400) MsgHistogramInvalidTimes = ffm("FF10300", "Start time must be before end time", 400) MsgUnsupportedCollection = ffm("FF10301", "%s collection is not supported", 400) + MsgQueryOpUnsupportedMod = ffm("FF10302", "Operation '%s' on '%s' does not support modifiers", 400) ) diff --git a/pkg/database/filter.go b/pkg/database/filter.go index 16bbe1b2bf..7284223a6b 100644 --- a/pkg/database/filter.go +++ b/pkg/database/filter.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -68,6 +68,7 @@ type OrFilter interface{ MultiConditionFilter } // used in the core string formatting method (for logging etc.) type FilterOp string +// The character pairs in this are not used anywhere externally, just in a to-string representation of queries const ( // FilterOpAnd and FilterOpAnd FilterOp = "&&" @@ -75,16 +76,20 @@ const ( FilterOpOr FilterOp = "||" // FilterOpEq equal FilterOpEq FilterOp = "==" + // FilterOpIEq equal + FilterOpIEq FilterOp = ":=" // FilterOpNe not equal - FilterOpNe FilterOp = "!=" + FilterOpNeq FilterOp = "!=" + // FilterOpNIeq not equal + FilterOpNIeq FilterOp = ";=" // FilterOpIn in list of values FilterOpIn FilterOp = "IN" // FilterOpNotIn not in list of values FilterOpNotIn FilterOp = "NI" // FilterOpGt greater than - FilterOpGt FilterOp = ">" + FilterOpGt FilterOp = ">>" // FilterOpLt less than - FilterOpLt FilterOp = "<" + FilterOpLt FilterOp = "<<" // FilterOpGte greater than or equal FilterOpGte FilterOp = ">=" // FilterOpLte less than or equal @@ -92,11 +97,27 @@ const ( // FilterOpCont contains the specified text, case sensitive FilterOpCont FilterOp = "%=" // FilterOpNotCont does not contain the specified text, case sensitive - FilterOpNotCont FilterOp = "%!" + FilterOpNotCont FilterOp = "!%" // FilterOpICont contains the specified text, case insensitive - FilterOpICont FilterOp = "^=" + FilterOpICont FilterOp = ":%" // FilterOpNotICont does not contain the specified text, case insensitive - FilterOpNotICont FilterOp = "^!" + FilterOpNotICont FilterOp = ";%" + // FilterOpStartsWith contains the specified text, case sensitive + FilterOpStartsWith FilterOp = "^=" + // FilterOpNotCont does not contain the specified text, case sensitive + FilterOpNotStartsWith FilterOp = "!^" + // FilterOpICont contains the specified text, case insensitive + FilterOpIStartsWith FilterOp = ":^" + // FilterOpNotICont does not contain the specified text, case insensitive + FilterOpNotIStartsWith FilterOp = ";^" + // FilterOpEndsWith contains the specified text, case sensitive + FilterOpEndsWith FilterOp = "$=" + // FilterOpNotCont does not contain the specified text, case sensitive + FilterOpNotEndsWith FilterOp = "!$" + // FilterOpICont contains the specified text, case insensitive + FilterOpIEndsWith FilterOp = ":$" + // FilterOpNotICont does not contain the specified text, case insensitive + FilterOpNotIEndsWith FilterOp = ";$" ) // FilterBuilder is the syntax used to build the filter, where And() and Or() can be nested @@ -107,10 +128,14 @@ type FilterBuilder interface { And(and ...Filter) AndFilter // Or requires any of the sub-filters to match Or(and ...Filter) OrFilter - // Eq equal + // Eq equal - case sensitive Eq(name string, value driver.Value) Filter - // Neq not equal + // Neq not equal - case sensitive Neq(name string, value driver.Value) Filter + // IEq equal - case insensitive + IEq(name string, value driver.Value) Filter + // INeq not equal - case insensitive + NIeq(name string, value driver.Value) Filter // In one of an array of values In(name string, value []driver.Value) Filter // NotIn not one of an array of values @@ -127,10 +152,26 @@ type FilterBuilder interface { Contains(name string, value driver.Value) Filter // NotContains disallows the string anywhere - case sensitive NotContains(name string, value driver.Value) Filter - // IContains allows the string anywhere - case sensitive + // IContains allows the string anywhere - case insensitive IContains(name string, value driver.Value) Filter - // INotContains disallows the string anywhere - case sensitive + // INotContains disallows the string anywhere - case insensitive NotIContains(name string, value driver.Value) Filter + // StartsWith allows the string at the start - case sensitive + StartsWith(name string, value driver.Value) Filter + // NotStartsWith disallows the string at the start - case sensitive + NotStartsWith(name string, value driver.Value) Filter + // IStartsWith allows the string at the start - case insensitive + IStartsWith(name string, value driver.Value) Filter + // NotIStartsWith disallows the string att the start - case insensitive + NotIStartsWith(name string, value driver.Value) Filter + // EndsWith allows the string at the end - case sensitive + EndsWith(name string, value driver.Value) Filter + // NotEndsWith disallows the string at the end - case sensitive + NotEndsWith(name string, value driver.Value) Filter + // IEndsWith allows the string at the end - case insensitive + IEndsWith(name string, value driver.Value) Filter + // NotIEndsWith disallows the string att the end - case insensitive + NotIEndsWith(name string, value driver.Value) Filter } // NullBehavior specifies whether to sort nulls first or last in a query @@ -416,7 +457,15 @@ func (fb *filterBuilder) Eq(name string, value driver.Value) Filter { } func (fb *filterBuilder) Neq(name string, value driver.Value) Filter { - return fb.fieldFilter(FilterOpNe, name, value) + return fb.fieldFilter(FilterOpNeq, name, value) +} + +func (fb *filterBuilder) IEq(name string, value driver.Value) Filter { + return fb.fieldFilter(FilterOpIEq, name, value) +} + +func (fb *filterBuilder) NIeq(name string, value driver.Value) Filter { + return fb.fieldFilter(FilterOpNIeq, name, value) } func (fb *filterBuilder) In(name string, values []driver.Value) Filter { @@ -459,6 +508,38 @@ func (fb *filterBuilder) NotIContains(name string, value driver.Value) Filter { return fb.fieldFilter(FilterOpNotICont, name, value) } +func (fb *filterBuilder) StartsWith(name string, value driver.Value) Filter { + return fb.fieldFilter(FilterOpStartsWith, name, value) +} + +func (fb *filterBuilder) NotStartsWith(name string, value driver.Value) Filter { + return fb.fieldFilter(FilterOpNotStartsWith, name, value) +} + +func (fb *filterBuilder) IStartsWith(name string, value driver.Value) Filter { + return fb.fieldFilter(FilterOpIStartsWith, name, value) +} + +func (fb *filterBuilder) NotIStartsWith(name string, value driver.Value) Filter { + return fb.fieldFilter(FilterOpNotIStartsWith, name, value) +} + +func (fb *filterBuilder) EndsWith(name string, value driver.Value) Filter { + return fb.fieldFilter(FilterOpEndsWith, name, value) +} + +func (fb *filterBuilder) NotEndsWith(name string, value driver.Value) Filter { + return fb.fieldFilter(FilterOpNotEndsWith, name, value) +} + +func (fb *filterBuilder) IEndsWith(name string, value driver.Value) Filter { + return fb.fieldFilter(FilterOpIEndsWith, name, value) +} + +func (fb *filterBuilder) NotIEndsWith(name string, value driver.Value) Filter { + return fb.fieldFilter(FilterOpNotIEndsWith, name, value) +} + func (fb *filterBuilder) fieldFilter(op FilterOp, name string, value interface{}) Filter { return &fieldFilter{ baseFilter: baseFilter{ diff --git a/pkg/database/filter_test.go b/pkg/database/filter_test.go index 2243d0038a..38ed802199 100644 --- a/pkg/database/filter_test.go +++ b/pkg/database/filter_test.go @@ -42,7 +42,7 @@ func TestBuildMessageFilter(t *testing.T) { Descending(). Finalize() assert.NoError(t, err) - assert.Equal(t, "( namespace == 'ns1' ) && ( ( id == '35c11cba-adff-4a4d-970a-02e3a0858dc8' ) || ( id == 'caefb9d1-9fc9-4d6a-a155-514d3139adf7' ) ) && ( sequence > 12345 ) && ( confirmed == null ) sort=-namespace skip=50 limit=25 count=true", f.String()) + assert.Equal(t, "( namespace == 'ns1' ) && ( ( id == '35c11cba-adff-4a4d-970a-02e3a0858dc8' ) || ( id == 'caefb9d1-9fc9-4d6a-a155-514d3139adf7' ) ) && ( sequence >> 12345 ) && ( confirmed == null ) sort=-namespace skip=50 limit=25 count=true", f.String()) } func TestBuildMessageFilter2(t *testing.T) { @@ -53,7 +53,7 @@ func TestBuildMessageFilter2(t *testing.T) { Finalize() assert.NoError(t, err) - assert.Equal(t, "sequence > 0 sort=sequence", f.String()) + assert.Equal(t, "sequence >> 0 sort=sequence", f.String()) } func TestBuildMessageFilter3(t *testing.T) { @@ -76,7 +76,25 @@ func TestBuildMessageFilter3(t *testing.T) { Sort("-sequence"). Finalize() assert.NoError(t, err) - assert.Equal(t, "( created IN [1000000000,2000000000,3000000000] ) && ( created NI [1000000000,2000000000,3000000000] ) && ( created < 0 ) && ( created <= 0 ) && ( created >= 0 ) && ( created != 0 ) && ( sequence > 12345 ) && ( topics %= 'abc' ) && ( topics %! 'def' ) && ( topics ^= 'ghi' ) && ( topics ^! 'jkl' ) sort=-created,topics,-sequence", f.String()) + assert.Equal(t, "( created IN [1000000000,2000000000,3000000000] ) && ( created NI [1000000000,2000000000,3000000000] ) && ( created << 0 ) && ( created <= 0 ) && ( created >= 0 ) && ( created != 0 ) && ( sequence >> 12345 ) && ( topics %= 'abc' ) && ( topics !% 'def' ) && ( topics :% 'ghi' ) && ( topics ;% 'jkl' ) sort=-created,topics,-sequence", f.String()) +} + +func TestBuildMessageFilter4(t *testing.T) { + fb := MessageQueryFactory.NewFilter(context.Background()) + f, err := fb.And( + fb.IEq("topics", "abc"), + fb.NIeq("topics", "bcd"), + fb.StartsWith("topics", "cde"), + fb.IStartsWith("topics", "def"), + fb.NotStartsWith("topics", "efg"), + fb.NotIStartsWith("topics", "fgh"), + fb.EndsWith("topics", "hij"), + fb.IEndsWith("topics", "ikl"), + fb.NotEndsWith("topics", "lmn"), + fb.NotIEndsWith("topics", "mno"), + ).Finalize() + assert.NoError(t, err) + assert.Equal(t, "( topics := 'abc' ) && ( topics ;= 'bcd' ) && ( topics ^= 'cde' ) && ( topics :^ 'def' ) && ( topics !^ 'efg' ) && ( topics ;^ 'fgh' ) && ( topics $= 'hij' ) && ( topics :$ 'ikl' ) && ( topics !$ 'lmn' ) && ( topics ;$ 'mno' )", f.String()) } func TestBuildMessageBadInFilterField(t *testing.T) { @@ -142,7 +160,7 @@ func TestBuildMessageIntConvert(t *testing.T) { fb.Lt("sequence", uint64(666)), ).Finalize() assert.NoError(t, err) - assert.Equal(t, "( sequence < 111 ) && ( sequence < 222 ) && ( sequence < 333 ) && ( sequence < 444 ) && ( sequence < 555 ) && ( sequence < 666 )", f.String()) + assert.Equal(t, "( sequence << 111 ) && ( sequence << 222 ) && ( sequence << 333 ) && ( sequence << 444 ) && ( sequence << 555 ) && ( sequence << 666 )", f.String()) } func TestBuildMessageTimeConvert(t *testing.T) { @@ -156,7 +174,7 @@ func TestBuildMessageTimeConvert(t *testing.T) { fb.Lt("created", *fftypes.UnixTime(1621112824)), ).Finalize() assert.NoError(t, err) - assert.Equal(t, "( created > 1621112824000000000 ) && ( created > 0 ) && ( created == 1621112874123456789 ) && ( created == null ) && ( created < 1621112824000000000 ) && ( created < 1621112824000000000 )", f.String()) + assert.Equal(t, "( created >> 1621112824000000000 ) && ( created >> 0 ) && ( created == 1621112874123456789 ) && ( created == null ) && ( created << 1621112824000000000 ) && ( created << 1621112824000000000 )", f.String()) } func TestBuildMessageStringConvert(t *testing.T) { @@ -177,7 +195,7 @@ func TestBuildMessageStringConvert(t *testing.T) { fb.Lt("namespace", b32), ).Finalize() assert.NoError(t, err) - assert.Equal(t, "( namespace < '111' ) && ( namespace < '222' ) && ( namespace < '333' ) && ( namespace < '444' ) && ( namespace < '555' ) && ( namespace < '666' ) && ( namespace < '' ) && ( namespace < '3f96e0d5-a10e-47c6-87a0-f2e7604af179' ) && ( namespace < '3f96e0d5-a10e-47c6-87a0-f2e7604af179' ) && ( namespace < '3f96e0d5a10e47c687a0f2e7604af17900000000000000000000000000000000' ) && ( namespace < '3f96e0d5a10e47c687a0f2e7604af17900000000000000000000000000000000' )", f.String()) + assert.Equal(t, "( namespace << '111' ) && ( namespace << '222' ) && ( namespace << '333' ) && ( namespace << '444' ) && ( namespace << '555' ) && ( namespace << '666' ) && ( namespace << '' ) && ( namespace << '3f96e0d5-a10e-47c6-87a0-f2e7604af179' ) && ( namespace << '3f96e0d5-a10e-47c6-87a0-f2e7604af179' ) && ( namespace << '3f96e0d5a10e47c687a0f2e7604af17900000000000000000000000000000000' ) && ( namespace << '3f96e0d5a10e47c687a0f2e7604af17900000000000000000000000000000000' )", f.String()) } func TestBuildMessageBoolConvert(t *testing.T) { From 2b013c50370071d0cafe9c7eac8ca168663c4fb6 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Thu, 6 Jan 2022 13:03:43 -0500 Subject: [PATCH 02/21] Andrew confirmed we don't need escaping per RFC3986 Signed-off-by: Peter Broadhurst --- docs/reference/api_query_syntax.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/reference/api_query_syntax.md b/docs/reference/api_query_syntax.md index 100fdb0025..9363606637 100644 --- a/docs/reference/api_query_syntax.md +++ b/docs/reference/api_query_syntax.md @@ -71,9 +71,6 @@ behavior. | `!` | Not - negates the match | | `:` | Case insensitive | -> Characters `=`,`@`,`$`,`!` and `:` should technically be encoded -> in URLs, but in practice should function fine without encoding. - ## Detailed examples | Example | Description | From 2a9113066a28752c39d4474ed6cb770aae27a0af Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Thu, 6 Jan 2022 18:02:46 -0500 Subject: [PATCH 03/21] Update code after merge Signed-off-by: Peter Broadhurst --- pkg/fftypes/data_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/fftypes/data_test.go b/pkg/fftypes/data_test.go index 4a2f487c17..3b72fb317f 100644 --- a/pkg/fftypes/data_test.go +++ b/pkg/fftypes/data_test.go @@ -139,7 +139,7 @@ func TestSealBlobMismatch1(t *testing.T) { err := d.Seal(context.Background(), &Blob{ Hash: NewRandB32(), }) - assert.Regexp(t, "FF10303", err) + assert.Regexp(t, "FF10304", err) } func TestSealBlobMismatch2(t *testing.T) { @@ -147,7 +147,7 @@ func TestSealBlobMismatch2(t *testing.T) { Blob: &BlobRef{Hash: NewRandB32()}, } err := d.Seal(context.Background(), nil) - assert.Regexp(t, "FF10303", err) + assert.Regexp(t, "FF10304", err) } func TestSealBlobAndHashOnly(t *testing.T) { From a59a5c13bae5010daca9c755b6b6d83ea28ff025 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 7 Jan 2022 00:12:25 -0500 Subject: [PATCH 04/21] Support partial ID queries, and null queries Signed-off-by: Peter Broadhurst --- .../000049_add_blobs_size_and_name.up.sql | 3 + docs/reference/api_query_syntax.md | 3 + internal/apiserver/restfilter.go | 65 ++++--- internal/apiserver/restfilter_test.go | 8 + .../apiserver/route_get_chart_histogram.go | 6 +- .../route_get_chart_histogram_test.go | 4 +- internal/data/blobstore_test.go | 2 +- pkg/database/filter.go | 27 ++- pkg/database/filter_test.go | 11 +- pkg/database/query_fields.go | 26 ++- pkg/database/query_fields_test.go | 164 ++++++++++++++++++ pkg/fftypes/byteable.go | 6 +- pkg/fftypes/byteable_test.go | 4 +- pkg/fftypes/data.go | 6 +- pkg/fftypes/data_test.go | 2 +- pkg/fftypes/timeutils.go | 8 +- 16 files changed, 286 insertions(+), 59 deletions(-) create mode 100644 pkg/database/query_fields_test.go diff --git a/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql b/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql index 7dc9069175..db5f02e2e1 100644 --- a/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql +++ b/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql @@ -3,5 +3,8 @@ ALTER TABLE blobs ADD size BIGINT; ALTER TABLE data ADD blob_name VARCHAR(1024); ALTER TABLE data ADD blob_size BIGINT; +UPDATE blobs SET size = 0; +UPDATE data SET blob_size = 0, blob_name = ''; + CREATE INDEX data_blob_name ON data(blob_name); CREATE INDEX data_blob_size ON data(blob_size); diff --git a/docs/reference/api_query_syntax.md b/docs/reference/api_query_syntax.md index 9363606637..d7892386cf 100644 --- a/docs/reference/api_query_syntax.md +++ b/docs/reference/api_query_syntax.md @@ -70,6 +70,7 @@ behavior. |----------|------------------------------------| | `!` | Not - negates the match | | `:` | Case insensitive | +| `?` | Treat empty match string as null | ## Detailed examples @@ -85,3 +86,5 @@ behavior. | `$_cat` | Ends with with "_cat" | | `!:^cats/` | Does not start with "cats/", "CATs/" etc. | | `!$-cat` | Does not end with "-cat" | +| `?=` | Is null | +| `!?=` | Is not null | diff --git a/internal/apiserver/restfilter.go b/internal/apiserver/restfilter.go index 8f5d938f33..dd74237c96 100644 --- a/internal/apiserver/restfilter.go +++ b/internal/apiserver/restfilter.go @@ -18,6 +18,7 @@ package apiserver import ( "context" + "database/sql/driver" "net/http" "net/url" "reflect" @@ -36,6 +37,12 @@ type filterResultsWithCount struct { Items interface{} `json:"items"` } +type filterModifiers struct { + negate bool + caseInsensitive bool + emptyIsNull bool +} + func syncRetcode(isSync bool) int { if isSync { return http.StatusOK @@ -132,8 +139,9 @@ func (as *apiServer) buildFilter(req *http.Request, ff database.QueryFactory) (d return filter, nil } -func (as *apiServer) checkNoMods(ctx context.Context, negate, caseInsensitive bool, field, op string, filter database.Filter) (database.Filter, error) { - if negate || caseInsensitive { +func (as *apiServer) checkNoMods(ctx context.Context, mods filterModifiers, field, op string, filter database.Filter) (database.Filter, error) { + emptyModifiers := filterModifiers{} + if mods != emptyModifiers { return nil, i18n.NewError(ctx, i18n.MsgQueryOpUnsupportedMod, op, field) } return filter, nil @@ -141,18 +149,20 @@ func (as *apiServer) checkNoMods(ctx context.Context, negate, caseInsensitive bo func (as *apiServer) getCondition(ctx context.Context, fb database.FilterBuilder, field, value string) (filter database.Filter, err error) { - negate := false - caseInsensitive := false + mods := filterModifiers{} operator := make([]rune, 0, 2) prefixLength := 0 opFinder: for _, r := range value { switch r { case '!': - negate = true + mods.negate = true prefixLength++ case ':': - caseInsensitive = true + mods.caseInsensitive = true + prefixLength++ + case '?': + mods.emptyIsNull = true prefixLength++ case '>', '<': // Terminates the opFinder if it's the second character @@ -178,58 +188,65 @@ opFinder: } } - op := string(operator) - matchString := value[prefixLength:] + var matchString driver.Value = value[prefixLength:] + if mods.emptyIsNull && prefixLength == len(value) { + matchString = nil + } + return as.mapOperation(ctx, fb, field, matchString, string(operator), mods) +} + +func (as *apiServer) mapOperation(ctx context.Context, fb database.FilterBuilder, field string, matchString driver.Value, op string, mods filterModifiers) (filter database.Filter, err error) { + switch op { case ">=": - return as.checkNoMods(ctx, negate, caseInsensitive, field, op, fb.Gte(field, matchString)) + return as.checkNoMods(ctx, mods, field, op, fb.Gte(field, matchString)) case "<=": - return as.checkNoMods(ctx, negate, caseInsensitive, field, op, fb.Lte(field, matchString)) + return as.checkNoMods(ctx, mods, field, op, fb.Lte(field, matchString)) case ">", ">>": - return as.checkNoMods(ctx, negate, caseInsensitive, field, op, fb.Gt(field, matchString)) + return as.checkNoMods(ctx, mods, field, op, fb.Gt(field, matchString)) case "<", "<<": - return as.checkNoMods(ctx, negate, caseInsensitive, field, op, fb.Lt(field, matchString)) + return as.checkNoMods(ctx, mods, field, op, fb.Lt(field, matchString)) case "@": - if caseInsensitive { - if negate { + if mods.caseInsensitive { + if mods.negate { return fb.NotIContains(field, matchString), nil } return fb.IContains(field, matchString), nil } - if negate { + if mods.negate { return fb.NotContains(field, matchString), nil } return fb.Contains(field, matchString), nil case "^": - if caseInsensitive { - if negate { + if mods.caseInsensitive { + if mods.negate { return fb.NotIStartsWith(field, matchString), nil } return fb.IStartsWith(field, matchString), nil } - if negate { + if mods.negate { return fb.NotStartsWith(field, matchString), nil } return fb.StartsWith(field, matchString), nil case "$": - if caseInsensitive { - if negate { + if mods.caseInsensitive { + if mods.negate { return fb.NotIEndsWith(field, matchString), nil } return fb.IEndsWith(field, matchString), nil } - if negate { + if mods.negate { return fb.NotEndsWith(field, matchString), nil } return fb.EndsWith(field, matchString), nil default: - if caseInsensitive { - if negate { + if mods.caseInsensitive { + if mods.negate { return fb.NIeq(field, matchString), nil } return fb.IEq(field, matchString), nil } - if negate { + if mods.negate { return fb.Neq(field, matchString), nil } return fb.Eq(field, matchString), nil diff --git a/internal/apiserver/restfilter_test.go b/internal/apiserver/restfilter_test.go index 3eb0a56fe2..814a0ea56c 100644 --- a/internal/apiserver/restfilter_test.go +++ b/internal/apiserver/restfilter_test.go @@ -87,6 +87,14 @@ func TestBuildFilterEachCombo(t *testing.T) { testIndividualFilter(t, "tag=!$cat", "( tag !$ 'cat' )") testIndividualFilter(t, "tag=:$cat", "( tag :$ 'cat' )") testIndividualFilter(t, "tag=!:$cat", "( tag ;$ 'cat' )") + testIndividualFilter(t, "tag==", "( tag == '' )") + testIndividualFilter(t, "tag=!=", "( tag != '' )") + testIndividualFilter(t, "tag=:!=", "( tag ;= '' )") + testIndividualFilter(t, "tag=?", "( tag == null )") + testIndividualFilter(t, "tag=!?", "( tag != null )") + testIndividualFilter(t, "tag=?=", "( tag == null )") + testIndividualFilter(t, "tag=!?=", "( tag != null )") + testIndividualFilter(t, "tag=?:!=", "( tag ;= null )") } func testFailFilter(t *testing.T, queryString, errCode string) { diff --git a/internal/apiserver/route_get_chart_histogram.go b/internal/apiserver/route_get_chart_histogram.go index 3e71eeb803..645df2e26b 100644 --- a/internal/apiserver/route_get_chart_histogram.go +++ b/internal/apiserver/route_get_chart_histogram.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -46,11 +46,11 @@ var getChartHistogram = &oapispec.Route{ JSONOutputValue: func() interface{} { return []*fftypes.ChartHistogram{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { - startTime, err := fftypes.ParseString(r.QP["startTime"]) + startTime, err := fftypes.ParseTimeString(r.QP["startTime"]) if err != nil { return nil, i18n.NewError(r.Ctx, i18n.MsgInvalidChartNumberParam, "startTime") } - endTime, err := fftypes.ParseString(r.QP["endTime"]) + endTime, err := fftypes.ParseTimeString(r.QP["endTime"]) if err != nil { return nil, i18n.NewError(r.Ctx, i18n.MsgInvalidChartNumberParam, "endTime") } diff --git a/internal/apiserver/route_get_chart_histogram_test.go b/internal/apiserver/route_get_chart_histogram_test.go index 601c131622..d216bace62 100644 --- a/internal/apiserver/route_get_chart_histogram_test.go +++ b/internal/apiserver/route_get_chart_histogram_test.go @@ -65,8 +65,8 @@ func TestGetChartHistogramSuccess(t *testing.T) { req.Header.Set("Content-Type", "application/json; charset=utf-8") res := httptest.NewRecorder() - startTime, _ := fftypes.ParseString("1234567890") - endtime, _ := fftypes.ParseString("1234567891") + startTime, _ := fftypes.ParseTimeString("1234567890") + endtime, _ := fftypes.ParseTimeString("1234567891") o.On("GetChartHistogram", mock.Anything, "mynamespace", startTime.UnixNano(), endtime.UnixNano(), int64(30), database.CollectionName("test")). Return([]*fftypes.ChartHistogram{}, nil) diff --git a/internal/data/blobstore_test.go b/internal/data/blobstore_test.go index 3af70a35b2..ad9fca99e6 100644 --- a/internal/data/blobstore_test.go +++ b/internal/data/blobstore_test.go @@ -222,7 +222,7 @@ func TestUploadBlobSizeMismatch(t *testing.T) { } _, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{}, &fftypes.Multipart{Data: bytes.NewReader([]byte(b))}, false) - assert.Regexp(t, "FF10302", err) + assert.Regexp(t, "FF10303", err) } diff --git a/pkg/database/filter.go b/pkg/database/filter.go index 7284223a6b..6354ef52ff 100644 --- a/pkg/database/filter.go +++ b/pkg/database/filter.go @@ -24,6 +24,7 @@ import ( "strings" "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/pkg/fftypes" ) // Filter is the output of the builder @@ -214,11 +215,8 @@ func valueString(f FieldSerialization) string { v, _ := f.Value() switch tv := v.(type) { case nil: - return "null" + return fftypes.NullString case []byte: - if tv == nil { - return "null" - } return fmt.Sprintf("'%s'", tv) case int64: return strconv.FormatInt(tv, 10) @@ -343,9 +341,24 @@ func (f *baseFilter) Finalize() (fi *FilterInfo, err error) { if !ok { return nil, i18n.NewError(f.fb.ctx, i18n.MsgInvalidFilterField, name) } - value = field.getSerialization() - if err = value.Scan(f.value); err != nil { - return nil, i18n.WrapError(f.fb.ctx, err, i18n.MsgInvalidValueForFilterField, name) + skipScan := false + switch f.value.(type) { + case nil: + value = &nullField{} + skipScan = true + case string: + if field.filterAsString() { + value = &stringField{} + } else { + value = field.getSerialization() + } + default: + value = field.getSerialization() + } + if !skipScan { + if err = value.Scan(f.value); err != nil { + return nil, i18n.WrapError(f.fb.ctx, err, i18n.MsgInvalidValueForFilterField, name) + } } } diff --git a/pkg/database/filter_test.go b/pkg/database/filter_test.go index 38ed802199..9041663077 100644 --- a/pkg/database/filter_test.go +++ b/pkg/database/filter_test.go @@ -130,7 +130,7 @@ func TestBuildMessageUUIDConvert(t *testing.T) { fb.Eq("id", nilB32), ).Finalize() assert.NoError(t, err) - assert.Equal(t, "( id == '4066abdc-8bbd-4472-9d29-1a55b467f9b9' ) && ( id == '4066abdc-8bbd-4472-9d29-1a55b467f9b9' ) && ( id IN ['4066abdc-8bbd-4472-9d29-1a55b467f9b9'] ) && ( id == '4066abdc-8bbd-4472-9d29-1a55b467f9b9' ) && ( id != null ) && ( id == '4066abdc-8bbd-4472-9d29-1a55b467f9b9' ) && ( id != '4066abdc-8bbd-4472-9d29-1a55b467f9b9' ) && ( id == null ) && ( id == null )", f.String()) + assert.Equal(t, "( id == '4066abdc-8bbd-4472-9d29-1a55b467f9b9' ) && ( id == '4066abdc-8bbd-4472-9d29-1a55b467f9b9' ) && ( id IN ['4066abdc-8bbd-4472-9d29-1a55b467f9b9'] ) && ( id == '4066abdc-8bbd-4472-9d29-1a55b467f9b9' ) && ( id != null ) && ( id == '4066abdc-8bbd-4472-9d29-1a55b467f9b9' ) && ( id != '4066abdc-8bbd-4472-9d29-1a55b467f9b9' ) && ( id == '' ) && ( id == null )", f.String()) } func TestBuildMessageBytes32Convert(t *testing.T) { @@ -147,7 +147,7 @@ func TestBuildMessageBytes32Convert(t *testing.T) { fb.Eq("hash", nilB32), ).Finalize() assert.NoError(t, err) - assert.Equal(t, "( hash == '7f4806535f8b3d9bf178af053d2bbdb46047365466ed16bbb0732a71492bdaf0' ) && ( hash == '7f4806535f8b3d9bf178af053d2bbdb46047365466ed16bbb0732a71492bdaf0' ) && ( hash IN ['7f4806535f8b3d9bf178af053d2bbdb46047365466ed16bbb0732a71492bdaf0'] ) && ( hash == '7f4806535f8b3d9bf178af053d2bbdb46047365466ed16bbb0732a71492bdaf0' ) && ( hash != null ) && ( hash == null ) && ( hash == null )", f.String()) + assert.Equal(t, "( hash == '7f4806535f8b3d9bf178af053d2bbdb46047365466ed16bbb0732a71492bdaf0' ) && ( hash == '7f4806535f8b3d9bf178af053d2bbdb46047365466ed16bbb0732a71492bdaf0' ) && ( hash IN ['7f4806535f8b3d9bf178af053d2bbdb46047365466ed16bbb0732a71492bdaf0'] ) && ( hash == '7f4806535f8b3d9bf178af053d2bbdb46047365466ed16bbb0732a71492bdaf0' ) && ( hash != null ) && ( hash == '' ) && ( hash == null )", f.String()) } func TestBuildMessageIntConvert(t *testing.T) { fb := MessageQueryFactory.NewFilter(context.Background()) @@ -188,14 +188,13 @@ func TestBuildMessageStringConvert(t *testing.T) { fb.Lt("namespace", uint(444)), fb.Lt("namespace", uint32(555)), fb.Lt("namespace", uint64(666)), - fb.Lt("namespace", nil), fb.Lt("namespace", *u), fb.Lt("namespace", u), fb.Lt("namespace", *b32), fb.Lt("namespace", b32), ).Finalize() assert.NoError(t, err) - assert.Equal(t, "( namespace << '111' ) && ( namespace << '222' ) && ( namespace << '333' ) && ( namespace << '444' ) && ( namespace << '555' ) && ( namespace << '666' ) && ( namespace << '' ) && ( namespace << '3f96e0d5-a10e-47c6-87a0-f2e7604af179' ) && ( namespace << '3f96e0d5-a10e-47c6-87a0-f2e7604af179' ) && ( namespace << '3f96e0d5a10e47c687a0f2e7604af17900000000000000000000000000000000' ) && ( namespace << '3f96e0d5a10e47c687a0f2e7604af17900000000000000000000000000000000' )", f.String()) + assert.Equal(t, "( namespace << '111' ) && ( namespace << '222' ) && ( namespace << '333' ) && ( namespace << '444' ) && ( namespace << '555' ) && ( namespace << '666' ) && ( namespace << '3f96e0d5-a10e-47c6-87a0-f2e7604af179' ) && ( namespace << '3f96e0d5-a10e-47c6-87a0-f2e7604af179' ) && ( namespace << '3f96e0d5a10e47c687a0f2e7604af17900000000000000000000000000000000' ) && ( namespace << '3f96e0d5a10e47c687a0f2e7604af17900000000000000000000000000000000' )", f.String()) } func TestBuildMessageBoolConvert(t *testing.T) { @@ -216,7 +215,7 @@ func TestBuildMessageBoolConvert(t *testing.T) { fb.Eq("masked", nil), ).Finalize() assert.NoError(t, err) - assert.Equal(t, "( masked == false ) && ( masked == true ) && ( masked == false ) && ( masked == true ) && ( masked == true ) && ( masked == false ) && ( masked == true ) && ( masked == true ) && ( masked == true ) && ( masked == true ) && ( masked == true ) && ( masked == true ) && ( masked == false )", f.String()) + assert.Equal(t, "( masked == false ) && ( masked == true ) && ( masked == false ) && ( masked == true ) && ( masked == true ) && ( masked == false ) && ( masked == true ) && ( masked == true ) && ( masked == true ) && ( masked == true ) && ( masked == true ) && ( masked == true ) && ( masked == null )", f.String()) } func TestBuildMessageJSONConvert(t *testing.T) { @@ -239,7 +238,7 @@ func TestBuildFFNameArrayConvert(t *testing.T) { fb.Eq("topics", []byte(`test2`)), ).Finalize() assert.NoError(t, err) - assert.Equal(t, `( topics == '' ) && ( topics == 'test1' ) && ( topics == 'test2' )`, f.String()) + assert.Equal(t, `( topics == null ) && ( topics == 'test1' ) && ( topics == 'test2' )`, f.String()) } func TestBuildMessageFailStringConvert(t *testing.T) { diff --git a/pkg/database/query_fields.go b/pkg/database/query_fields.go index 90088212a4..8f088e3fb6 100644 --- a/pkg/database/query_fields.go +++ b/pkg/database/query_fields.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -68,8 +68,18 @@ type FieldSerialization interface { type Field interface { getSerialization() FieldSerialization + filterAsString() bool } +// nullField is a special FieldSerialization used to represent nil in queries +type nullField struct{} + +func (f *nullField) Scan(src interface{}) error { + return nil +} +func (f *nullField) Value() (driver.Value, error) { return nil, nil } +func (f *nullField) String() string { return fftypes.NullString } + type StringField struct{} type stringField struct{ s string } @@ -102,6 +112,7 @@ func (f *stringField) Scan(src interface{}) error { case fftypes.Bytes32: f.s = tv.String() case nil: + f.s = "" default: if reflect.TypeOf(tv).Kind() == reflect.String { // This is helpful for status enums @@ -115,6 +126,7 @@ func (f *stringField) Scan(src interface{}) error { func (f *stringField) Value() (driver.Value, error) { return f.s, nil } func (f *stringField) String() string { return f.s } func (f *StringField) getSerialization() FieldSerialization { return &stringField{} } +func (f *StringField) filterAsString() bool { return true } type UUIDField struct{} type uuidField struct{ u *fftypes.UUID } @@ -146,6 +158,7 @@ func (f *uuidField) Scan(src interface{}) (err error) { copy(u[:], tv[0:16]) f.u = &u case nil: + f.u = nil default: return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, f.u) } @@ -154,6 +167,7 @@ func (f *uuidField) Scan(src interface{}) (err error) { func (f *uuidField) Value() (driver.Value, error) { return f.u.Value() } func (f *uuidField) String() string { return fmt.Sprintf("%v", f.u) } func (f *UUIDField) getSerialization() FieldSerialization { return &uuidField{} } +func (f *UUIDField) filterAsString() bool { return true } type Bytes32Field struct{} type bytes32Field struct{ b32 *fftypes.Bytes32 } @@ -173,6 +187,7 @@ func (f *bytes32Field) Scan(src interface{}) (err error) { b32 := tv f.b32 = &b32 case nil: + f.b32 = nil default: return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, f.b32) } @@ -181,6 +196,7 @@ func (f *bytes32Field) Scan(src interface{}) (err error) { func (f *bytes32Field) Value() (driver.Value, error) { return f.b32.Value() } func (f *bytes32Field) String() string { return fmt.Sprintf("%v", f.b32) } func (f *Bytes32Field) getSerialization() FieldSerialization { return &bytes32Field{} } +func (f *Bytes32Field) filterAsString() bool { return true } type Int64Field struct{} type int64Field struct{ i int64 } @@ -212,6 +228,7 @@ func (f *int64Field) Scan(src interface{}) (err error) { func (f *int64Field) Value() (driver.Value, error) { return f.i, nil } func (f *int64Field) String() string { return fmt.Sprintf("%d", f.i) } func (f *Int64Field) getSerialization() FieldSerialization { return &int64Field{} } +func (f *Int64Field) filterAsString() bool { return false } type TimeField struct{} type timeField struct{ t *fftypes.FFTime } @@ -223,7 +240,7 @@ func (f *timeField) Scan(src interface{}) (err error) { case int64: f.t = fftypes.UnixTime(tv) case string: - f.t, err = fftypes.ParseString(tv) + f.t, err = fftypes.ParseTimeString(tv) return err case fftypes.FFTime: f.t = &tv @@ -233,7 +250,6 @@ func (f *timeField) Scan(src interface{}) (err error) { return nil case nil: f.t = nil - return nil default: return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, f.t) } @@ -247,6 +263,7 @@ func (f *timeField) Value() (driver.Value, error) { } func (f *timeField) String() string { return fmt.Sprintf("%v", f.t) } func (f *TimeField) getSerialization() FieldSerialization { return &timeField{} } +func (f *TimeField) filterAsString() bool { return false } type JSONField struct{} type jsonField struct{ b []byte } @@ -269,6 +286,7 @@ func (f *jsonField) Scan(src interface{}) (err error) { func (f *jsonField) Value() (driver.Value, error) { return f.b, nil } func (f *jsonField) String() string { return string(f.b) } func (f *JSONField) getSerialization() FieldSerialization { return &jsonField{} } +func (f *JSONField) filterAsString() bool { return true } type FFNameArrayField struct{} type ffNameArrayField struct{ na fftypes.FFNameArray } @@ -279,6 +297,7 @@ func (f *ffNameArrayField) Scan(src interface{}) (err error) { func (f *ffNameArrayField) Value() (driver.Value, error) { return f.na.String(), nil } func (f *ffNameArrayField) String() string { return f.na.String() } func (f *FFNameArrayField) getSerialization() FieldSerialization { return &ffNameArrayField{} } +func (f *FFNameArrayField) filterAsString() bool { return true } type BoolField struct{} type boolField struct{ b bool } @@ -311,3 +330,4 @@ func (f *boolField) Scan(src interface{}) (err error) { func (f *boolField) Value() (driver.Value, error) { return f.b, nil } func (f *boolField) String() string { return fmt.Sprintf("%t", f.b) } func (f *BoolField) getSerialization() FieldSerialization { return &boolField{} } +func (f *BoolField) filterAsString() bool { return false } diff --git a/pkg/database/query_fields_test.go b/pkg/database/query_fields_test.go new file mode 100644 index 0000000000..f0a4c13508 --- /dev/null +++ b/pkg/database/query_fields_test.go @@ -0,0 +1,164 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "testing" + "time" + + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" +) + +func TestNullField(t *testing.T) { + + f := nullField{} + v, err := f.Value() + assert.NoError(t, err) + assert.Nil(t, v) + + err = f.Scan("anything") + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.Nil(t, v) + + assert.Equal(t, "null", f.String()) +} + +func TestStringField(t *testing.T) { + + f := stringField{} + + err := f.Scan("test") + assert.NoError(t, err) + v, err := f.Value() + assert.NoError(t, err) + assert.Equal(t, "test", v) + + err = f.Scan(nil) + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.Equal(t, "", v) + +} + +func TestUUIDField(t *testing.T) { + + f := uuidField{} + + err := f.Scan("") + assert.NoError(t, err) + v, err := f.Value() + assert.NoError(t, err) + assert.Nil(t, v) + + u1 := fftypes.NewUUID() + err = f.Scan(u1.String()) + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.Equal(t, v, u1.String()) + + err = f.Scan(nil) + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.Nil(t, v) + +} + +func TestBytes32Field(t *testing.T) { + + f := bytes32Field{} + + err := f.Scan("") + assert.NoError(t, err) + v, err := f.Value() + assert.NoError(t, err) + assert.Nil(t, v) + + b1 := fftypes.NewRandB32() + err = f.Scan(b1.String()) + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.Equal(t, v, b1.String()) + + err = f.Scan(nil) + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.Nil(t, v) + +} + +func TestTimeField(t *testing.T) { + + f := timeField{} + + now := time.Now() + err := f.Scan(now.Format(time.RFC3339Nano)) + assert.NoError(t, err) + v, err := f.Value() + assert.NoError(t, err) + assert.Equal(t, v, now.UnixNano()) + + err = f.Scan(nil) + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.Nil(t, v) + +} + +func TestJSONField(t *testing.T) { + + f := jsonField{} + + err := f.Scan("{}") + assert.NoError(t, err) + v, err := f.Value() + assert.NoError(t, err) + assert.Equal(t, v, []byte("{}")) + + err = f.Scan(nil) + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.Nil(t, v) + +} + +func TestBoolField(t *testing.T) { + + f := boolField{} + + err := f.Scan("true") + assert.NoError(t, err) + v, err := f.Value() + assert.NoError(t, err) + assert.True(t, v.(bool)) + + err = f.Scan(nil) + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.False(t, v.(bool)) + +} diff --git a/pkg/fftypes/byteable.go b/pkg/fftypes/byteable.go index 7b4edf1316..6d7112ef6b 100644 --- a/pkg/fftypes/byteable.go +++ b/pkg/fftypes/byteable.go @@ -26,7 +26,7 @@ import ( ) const ( - nullString = "null" + NullString = "null" ) // Byteable uses raw encode/decode to preserve field order, and can handle any types of field. @@ -46,7 +46,7 @@ func (h *Byteable) UnmarshalJSON(b []byte) error { func (h Byteable) MarshalJSON() ([]byte, error) { if h == nil { - return []byte(nullString), nil + return []byte(NullString), nil } return h, nil } @@ -92,7 +92,7 @@ func (h Byteable) JSONObjectNowarn() JSONObject { func (h *Byteable) Scan(src interface{}) error { switch src := src.(type) { case nil: - nullVal := []byte(nullString) + nullVal := []byte(NullString) *h = nullVal return nil case []byte: diff --git a/pkg/fftypes/byteable_test.go b/pkg/fftypes/byteable_test.go index 3c5f848653..b1f3af56b4 100644 --- a/pkg/fftypes/byteable_test.go +++ b/pkg/fftypes/byteable_test.go @@ -77,7 +77,7 @@ func TestByteableMarshalNull(t *testing.T) { var pb Byteable b, err := pb.MarshalJSON() assert.NoError(t, err) - assert.Equal(t, nullString, string(b)) + assert.Equal(t, NullString, string(b)) } func TestByteableUnmarshalFail(t *testing.T) { @@ -94,7 +94,7 @@ func TestScan(t *testing.T) { var h Byteable assert.NoError(t, h.Scan(nil)) - assert.Equal(t, []byte(nullString), []byte(h)) + assert.Equal(t, []byte(NullString), []byte(h)) assert.NoError(t, h.Scan(`{"some": "stuff"}`)) assert.Equal(t, "stuff", h.JSONObject().GetString("some")) diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index e65d978894..7cad9d70d3 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -60,7 +60,7 @@ type DatatypeRef struct { func (dr *DatatypeRef) String() string { if dr == nil { - return nullString + return NullString } return fmt.Sprintf("%s/%s", dr.Name, dr.Version) } @@ -84,9 +84,9 @@ func CheckValidatorType(ctx context.Context, validator ValidatorType) error { func (d *Data) CalcHash(ctx context.Context) (*Bytes32, error) { if d.Value == nil { - d.Value = Byteable(nullString) + d.Value = Byteable(NullString) } - valueIsNull := d.Value.String() == nullString + valueIsNull := d.Value.String() == NullString if valueIsNull && (d.Blob == nil || d.Blob.Hash == nil) { return nil, i18n.NewError(ctx, i18n.MsgDataValueIsNull) } diff --git a/pkg/fftypes/data_test.go b/pkg/fftypes/data_test.go index 3b72fb317f..bb686f53cc 100644 --- a/pkg/fftypes/data_test.go +++ b/pkg/fftypes/data_test.go @@ -28,7 +28,7 @@ import ( func TestDatatypeReference(t *testing.T) { var dr *DatatypeRef - assert.Equal(t, nullString, dr.String()) + assert.Equal(t, NullString, dr.String()) dr = &DatatypeRef{ Name: "customer", Version: "0.0.1", diff --git a/pkg/fftypes/timeutils.go b/pkg/fftypes/timeutils.go index 92e2ae13a3..cfb40d212b 100644 --- a/pkg/fftypes/timeutils.go +++ b/pkg/fftypes/timeutils.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -67,7 +67,7 @@ func (ft *FFTime) MarshalJSON() ([]byte, error) { return json.Marshal(ft.String()) } -func ParseString(str string) (*FFTime, error) { +func ParseTimeString(str string) (*FFTime, error) { t, err := time.Parse(time.RFC3339Nano, str) if err != nil { var unixTime int64 @@ -96,7 +96,7 @@ func (ft *FFTime) UnixNano() int64 { } func (ft *FFTime) UnmarshalText(b []byte) error { - t, err := ParseString(string(b)) + t, err := ParseTimeString(string(b)) if err != nil { return err } @@ -112,7 +112,7 @@ func (ft *FFTime) Scan(src interface{}) error { return nil case string: - t, err := ParseString(src) + t, err := ParseTimeString(src) if err != nil { return err } From 4e0a334e08d74297833748117add472c45e3bcda Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 7 Jan 2022 09:07:11 -0500 Subject: [PATCH 05/21] Handle case sensitivity in SQLite Signed-off-by: Peter Broadhurst --- docs/swagger/swagger.yaml | 5 +++ internal/database/postgres/postgres.go | 9 ++++-- internal/database/postgres/postgres_test.go | 2 +- internal/database/sqlcommon/filter_sql.go | 32 ++++++++++++++----- .../database/sqlcommon/filter_sql_test.go | 28 ++++++++++++++++ internal/database/sqlcommon/provider.go | 18 +++++++++-- .../database/sqlcommon/provider_mock_test.go | 6 ++-- .../sqlcommon/provider_sqlitego_test.go | 7 ++-- internal/database/sqlcommon/sqlcommon.go | 18 +++++++---- internal/database/sqlite3/sqlite3.go | 22 ++++++++++--- internal/database/sqlite3/sqlite3_test.go | 8 ++++- pkg/database/plugin.go | 1 + 12 files changed, 124 insertions(+), 32 deletions(-) diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index db0af644d3..e416fa07f4 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -1070,6 +1070,11 @@ paths: name: validator schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: value + schema: + type: string - description: Sort field. For multi-field sort use comma separated values (or multiple query values) with '-' prefix for descending in: query diff --git a/internal/database/postgres/postgres.go b/internal/database/postgres/postgres.go index e1f2f5620c..4e88256135 100644 --- a/internal/database/postgres/postgres.go +++ b/internal/database/postgres/postgres.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -49,8 +49,11 @@ func (psql *Postgres) MigrationsDir() string { return psql.Name() } -func (psql *Postgres) PlaceholderFormat() sq.PlaceholderFormat { - return sq.Dollar +func (psql *Postgres) Features() sqlcommon.SQLFeatures { + features := sqlcommon.DefaultSQLProviderFeatures() + features.PlaceholderFormat = sq.Dollar + features.UseILIKE = false // slower than lower() + return features } func (psql *Postgres) UpdateInsertForSequenceReturn(insert sq.InsertBuilder) (sq.InsertBuilder, bool) { diff --git a/internal/database/postgres/postgres_test.go b/internal/database/postgres/postgres_test.go index 53cea8930b..67be063596 100644 --- a/internal/database/postgres/postgres_test.go +++ b/internal/database/postgres/postgres_test.go @@ -39,7 +39,7 @@ func TestPostgresProvider(t *testing.T) { assert.Error(t, err) assert.Equal(t, "postgres", psql.Name()) - assert.Equal(t, sq.Dollar, psql.PlaceholderFormat()) + assert.Equal(t, sq.Dollar, psql.Features().PlaceholderFormat) insert := sq.Insert("test").Columns("col1").Values("val1") insert, query := psql.UpdateInsertForSequenceReturn(insert) diff --git a/internal/database/sqlcommon/filter_sql.go b/internal/database/sqlcommon/filter_sql.go index a62569af7c..937a8e4ff6 100644 --- a/internal/database/sqlcommon/filter_sql.go +++ b/internal/database/sqlcommon/filter_sql.go @@ -141,6 +141,22 @@ func (s *SQLCommon) mapField(tableName, fieldName string, tm map[string]string) return field } +// newILike uses ILIKE if supported by DB, otherwise the "lower" approach +func (s *SQLCommon) newILike(field, value string) sq.Sqlizer { + if s.features.UseILIKE { + return sq.ILike{field: value} + } + return sq.Like{fmt.Sprintf("lower(%s)", field): strings.ToLower(value)} +} + +// newNotILike uses ILIKE if supported by DB, otherwise the "lower" approach +func (s *SQLCommon) newNotILike(field, value string) sq.Sqlizer { + if s.features.UseILIKE { + return sq.NotILike{field: value} + } + return sq.NotLike{fmt.Sprintf("lower(%s)", field): strings.ToLower(value)} +} + func (s *SQLCommon) filterOp(ctx context.Context, tableName string, op *database.FilterInfo, tm map[string]string) (sq.Sqlizer, error) { switch op.Op { case database.FilterOpOr: @@ -150,13 +166,13 @@ func (s *SQLCommon) filterOp(ctx context.Context, tableName string, op *database case database.FilterOpEq: return sq.Eq{s.mapField(tableName, op.Field, tm): op.Value}, nil case database.FilterOpIEq: - return sq.ILike{s.mapField(tableName, op.Field, tm): s.escapeLike(op.Value)}, nil + return s.newILike(s.mapField(tableName, op.Field, tm), s.escapeLike(op.Value)), nil case database.FilterOpIn: return sq.Eq{s.mapField(tableName, op.Field, tm): op.Values}, nil case database.FilterOpNeq: return sq.NotEq{s.mapField(tableName, op.Field, tm): op.Value}, nil case database.FilterOpNIeq: - return sq.NotILike{s.mapField(tableName, op.Field, tm): s.escapeLike(op.Value)}, nil + return s.newNotILike(s.mapField(tableName, op.Field, tm), s.escapeLike(op.Value)), nil case database.FilterOpNotIn: return sq.NotEq{s.mapField(tableName, op.Field, tm): op.Values}, nil case database.FilterOpCont: @@ -164,25 +180,25 @@ func (s *SQLCommon) filterOp(ctx context.Context, tableName string, op *database case database.FilterOpNotCont: return sq.NotLike{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%%%s%%", s.escapeLike(op.Value))}, nil case database.FilterOpICont: - return sq.ILike{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%%%s%%", s.escapeLike(op.Value))}, nil + return s.newILike(s.mapField(tableName, op.Field, tm), fmt.Sprintf("%%%s%%", s.escapeLike(op.Value))), nil case database.FilterOpNotICont: - return sq.NotILike{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%s%%", s.escapeLike(op.Value))}, nil + return s.newNotILike(s.mapField(tableName, op.Field, tm), fmt.Sprintf("%s%%", s.escapeLike(op.Value))), nil case database.FilterOpStartsWith: return sq.Like{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%s%%", s.escapeLike(op.Value))}, nil case database.FilterOpNotStartsWith: return sq.NotLike{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%s%%", s.escapeLike(op.Value))}, nil case database.FilterOpIStartsWith: - return sq.ILike{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%s%%", s.escapeLike(op.Value))}, nil + return s.newILike(s.mapField(tableName, op.Field, tm), fmt.Sprintf("%s%%", s.escapeLike(op.Value))), nil case database.FilterOpNotIStartsWith: - return sq.NotILike{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%s%%", s.escapeLike(op.Value))}, nil + return s.newNotILike(s.mapField(tableName, op.Field, tm), fmt.Sprintf("%s%%", s.escapeLike(op.Value))), nil case database.FilterOpEndsWith: return sq.Like{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%%%s", s.escapeLike(op.Value))}, nil case database.FilterOpNotEndsWith: return sq.NotLike{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%%%s", s.escapeLike(op.Value))}, nil case database.FilterOpIEndsWith: - return sq.ILike{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%%%s", s.escapeLike(op.Value))}, nil + return s.newILike(s.mapField(tableName, op.Field, tm), fmt.Sprintf("%%%s", s.escapeLike(op.Value))), nil case database.FilterOpNotIEndsWith: - return sq.NotILike{s.mapField(tableName, op.Field, tm): fmt.Sprintf("%%%s", s.escapeLike(op.Value))}, nil + return s.newNotILike(s.mapField(tableName, op.Field, tm), fmt.Sprintf("%%%s", s.escapeLike(op.Value))), nil case database.FilterOpGt: return sq.Gt{s.mapField(tableName, op.Field, tm): op.Value}, nil case database.FilterOpGte: diff --git a/internal/database/sqlcommon/filter_sql_test.go b/internal/database/sqlcommon/filter_sql_test.go index f1c241d49d..8ae9ff9544 100644 --- a/internal/database/sqlcommon/filter_sql_test.go +++ b/internal/database/sqlcommon/filter_sql_test.go @@ -197,3 +197,31 @@ func TestSQLQueryFactoryDefaultSortBadType(t *testing.T) { s.filterSelect(context.Background(), "", sel, f, nil, []interface{}{100}) }) } + +func TestILIKE(t *testing.T) { + s, _ := newMockProvider().init() + + s.features.UseILIKE = true + q := s.newILike("test", "value") + sqlString, _, _ := q.ToSql() + assert.Regexp(t, "ILIKE", sqlString) + + s.features.UseILIKE = false + q = s.newILike("test", "value") + sqlString, _, _ = q.ToSql() + assert.Regexp(t, "lower\\(test\\)", sqlString) +} + +func TestNotILIKE(t *testing.T) { + s, _ := newMockProvider().init() + + s.features.UseILIKE = true + q := s.newNotILike("test", "value") + sqlString, _, _ := q.ToSql() + assert.Regexp(t, "ILIKE", sqlString) + + s.features.UseILIKE = false + q = s.newNotILike("test", "value") + sqlString, _, _ = q.ToSql() + assert.Regexp(t, "lower\\(test\\)", sqlString) +} diff --git a/internal/database/sqlcommon/provider.go b/internal/database/sqlcommon/provider.go index 674cb0e0bc..ac6e14b05a 100644 --- a/internal/database/sqlcommon/provider.go +++ b/internal/database/sqlcommon/provider.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -27,6 +27,18 @@ const ( sequenceColumn = "seq" ) +type SQLFeatures struct { + UseILIKE bool + PlaceholderFormat sq.PlaceholderFormat +} + +func DefaultSQLProviderFeatures() SQLFeatures { + return SQLFeatures{ + UseILIKE: false, + PlaceholderFormat: sq.Dollar, + } +} + // Provider defines the interface an individual provider muse implement to customize the SQLCommon implementation type Provider interface { @@ -42,8 +54,8 @@ type Provider interface { // GetDriver returns the driver implementation GetMigrationDriver(*sql.DB) (migratedb.Driver, error) - // PlaceholderFormat gets the Squirrel placeholder format - PlaceholderFormat() sq.PlaceholderFormat + // Features returns fields + Features() SQLFeatures // UpdateInsertForSequenceReturn updates the INSERT query for returning the Sequence, and returns whether it needs to be run as a query to return the Sequence field UpdateInsertForSequenceReturn(insert sq.InsertBuilder) (updatedInsert sq.InsertBuilder, runAsQuery bool) diff --git a/internal/database/sqlcommon/provider_mock_test.go b/internal/database/sqlcommon/provider_mock_test.go index 19479d1876..8c3ac4a9e3 100644 --- a/internal/database/sqlcommon/provider_mock_test.go +++ b/internal/database/sqlcommon/provider_mock_test.go @@ -67,8 +67,10 @@ func (mp *mockProvider) MigrationsDir() string { return mp.Name() } -func (mp *mockProvider) PlaceholderFormat() sq.PlaceholderFormat { - return sq.Dollar +func (psql *mockProvider) Features() SQLFeatures { + features := DefaultSQLProviderFeatures() + features.UseILIKE = true + return features } func (mp *mockProvider) UpdateInsertForSequenceReturn(insert sq.InsertBuilder) (sq.InsertBuilder, bool) { diff --git a/internal/database/sqlcommon/provider_sqlitego_test.go b/internal/database/sqlcommon/provider_sqlitego_test.go index 45685a40bb..f67cee4da9 100644 --- a/internal/database/sqlcommon/provider_sqlitego_test.go +++ b/internal/database/sqlcommon/provider_sqlitego_test.go @@ -78,8 +78,11 @@ func (tp *sqliteGoTestProvider) MigrationsDir() string { return "sqlite" } -func (tp *sqliteGoTestProvider) PlaceholderFormat() sq.PlaceholderFormat { - return sq.Dollar +func (psql *sqliteGoTestProvider) Features() SQLFeatures { + features := DefaultSQLProviderFeatures() + features.PlaceholderFormat = sq.Dollar + features.UseILIKE = false // Not supported + return features } func (tp *sqliteGoTestProvider) UpdateInsertForSequenceReturn(insert sq.InsertBuilder) (sq.InsertBuilder, bool) { diff --git a/internal/database/sqlcommon/sqlcommon.go b/internal/database/sqlcommon/sqlcommon.go index 8fb49db4a6..d0e85c3761 100644 --- a/internal/database/sqlcommon/sqlcommon.go +++ b/internal/database/sqlcommon/sqlcommon.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -38,6 +38,7 @@ type SQLCommon struct { capabilities *database.Capabilities callbacks database.Callbacks provider Provider + features SQLFeatures } type txContextKey struct{} @@ -51,7 +52,10 @@ func (s *SQLCommon) Init(ctx context.Context, provider Provider, prefix config.P s.capabilities = capabilities s.callbacks = callbacks s.provider = provider - if s.provider == nil || s.provider.PlaceholderFormat() == nil || sequenceColumn == "" { + if s.provider != nil { + s.features = s.provider.Features() + } + if s.provider == nil || s.features.PlaceholderFormat == nil { log.L(ctx).Errorf("Invalid SQL options from provider '%T'", s.provider) return i18n.NewError(ctx, i18n.MsgDBInitFailed) } @@ -154,7 +158,7 @@ func (s *SQLCommon) queryTx(ctx context.Context, tx *txWrapper, q sq.SelectBuild } l := log.L(ctx) - sqlQuery, args, err := q.PlaceholderFormat(s.provider.PlaceholderFormat()).ToSql() + sqlQuery, args, err := q.PlaceholderFormat(s.features.PlaceholderFormat).ToSql() if err != nil { return nil, tx, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } @@ -190,7 +194,7 @@ func (s *SQLCommon) countQuery(ctx context.Context, tx *txWrapper, tableName str countExpr = "*" } q := sq.Select(fmt.Sprintf("COUNT(%s)", countExpr)).From(tableName).Where(fop) - sqlQuery, args, err := q.PlaceholderFormat(s.provider.PlaceholderFormat()).ToSql() + sqlQuery, args, err := q.PlaceholderFormat(s.features.PlaceholderFormat).ToSql() if err != nil { return count, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } @@ -233,7 +237,7 @@ func (s *SQLCommon) insertTx(ctx context.Context, tx *txWrapper, q sq.InsertBuil l := log.L(ctx) q, useQuery := s.provider.UpdateInsertForSequenceReturn(q) - sqlQuery, args, err := q.PlaceholderFormat(s.provider.PlaceholderFormat()).ToSql() + sqlQuery, args, err := q.PlaceholderFormat(s.features.PlaceholderFormat).ToSql() if err != nil { return -1, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } @@ -264,7 +268,7 @@ func (s *SQLCommon) insertTx(ctx context.Context, tx *txWrapper, q sq.InsertBuil func (s *SQLCommon) deleteTx(ctx context.Context, tx *txWrapper, q sq.DeleteBuilder, postCommit func()) error { l := log.L(ctx) - sqlQuery, args, err := q.PlaceholderFormat(s.provider.PlaceholderFormat()).ToSql() + sqlQuery, args, err := q.PlaceholderFormat(s.features.PlaceholderFormat).ToSql() if err != nil { return i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } @@ -289,7 +293,7 @@ func (s *SQLCommon) deleteTx(ctx context.Context, tx *txWrapper, q sq.DeleteBuil func (s *SQLCommon) updateTx(ctx context.Context, tx *txWrapper, q sq.UpdateBuilder, postCommit func()) (int64, error) { l := log.L(ctx) - sqlQuery, args, err := q.PlaceholderFormat(s.provider.PlaceholderFormat()).ToSql() + sqlQuery, args, err := q.PlaceholderFormat(s.features.PlaceholderFormat).ToSql() if err != nil { return -1, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } diff --git a/internal/database/sqlite3/sqlite3.go b/internal/database/sqlite3/sqlite3.go index dd21962135..99cc4b6ad9 100644 --- a/internal/database/sqlite3/sqlite3.go +++ b/internal/database/sqlite3/sqlite3.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -32,15 +32,24 @@ import ( "github.com/hyperledger/firefly/pkg/database" // Import the derivation of SQLite3 CGO suported by golang-migrate - _ "github.com/mattn/go-sqlite3" + "github.com/mattn/go-sqlite3" ) type SQLite3 struct { sqlcommon.SQLCommon } +func connHook(conn *sqlite3.SQLiteConn) error { + _, err := conn.Exec("PRAGMA case_sensitive_like=ON;", nil) + return err +} + func (sqlite *SQLite3) Init(ctx context.Context, prefix config.Prefix, callbacks database.Callbacks) error { capabilities := &database.Capabilities{} + sql.Register("sqlite3_ff", + &sqlite3.SQLiteDriver{ + ConnectHook: connHook, + }) return sqlite.SQLCommon.Init(ctx, sqlite, prefix, callbacks, capabilities) } @@ -52,8 +61,11 @@ func (sqlite *SQLite3) MigrationsDir() string { return "sqlite" } -func (sqlite *SQLite3) PlaceholderFormat() sq.PlaceholderFormat { - return sq.Dollar +func (sqlite *SQLite3) Features() sqlcommon.SQLFeatures { + features := sqlcommon.DefaultSQLProviderFeatures() + features.PlaceholderFormat = sq.Dollar + features.UseILIKE = false // Not supported + return features } func (sqlite *SQLite3) UpdateInsertForSequenceReturn(insert sq.InsertBuilder) (sq.InsertBuilder, bool) { @@ -61,7 +73,7 @@ func (sqlite *SQLite3) UpdateInsertForSequenceReturn(insert sq.InsertBuilder) (s } func (sqlite *SQLite3) Open(url string) (*sql.DB, error) { - return sql.Open("sqlite3", url) + return sql.Open("sqlite3_ff", url) } func (sqlite *SQLite3) GetMigrationDriver(db *sql.DB) (migratedb.Driver, error) { diff --git a/internal/database/sqlite3/sqlite3_test.go b/internal/database/sqlite3/sqlite3_test.go index a5390829f6..26d10ae5fe 100644 --- a/internal/database/sqlite3/sqlite3_test.go +++ b/internal/database/sqlite3/sqlite3_test.go @@ -40,8 +40,14 @@ func TestSQLite3GoProvider(t *testing.T) { _, err = sqlite.GetMigrationDriver(sqlite.DB()) assert.Error(t, err) + db, err := sqlite.Open("file::memory:") + assert.NoError(t, err) + conn, err := db.Conn(context.Background()) + assert.NoError(t, err) + conn.Close() + assert.Equal(t, "sqlite3", sqlite.Name()) - assert.Equal(t, sq.Dollar, sqlite.PlaceholderFormat()) + assert.Equal(t, sq.Dollar, sqlite.Features().PlaceholderFormat) insert := sq.Insert("test").Columns("col1").Values("val1") insert, query := sqlite.UpdateInsertForSequenceReturn(insert) diff --git a/pkg/database/plugin.go b/pkg/database/plugin.go index dea35386e4..cfe0479f40 100644 --- a/pkg/database/plugin.go +++ b/pkg/database/plugin.go @@ -646,6 +646,7 @@ var DataQueryFactory = &queryFields{ "blob.name": &StringField{}, "blob.size": &Int64Field{}, "created": &TimeField{}, + "value": &JSONField{}, } // DatatypeQueryFactory filter fields for data definitions From d97fed193a44a63780af9a24d1fdbaaf98b42b51 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 7 Jan 2022 09:19:19 -0500 Subject: [PATCH 06/21] Avoid double reigstering SQLite driver Signed-off-by: Peter Broadhurst --- internal/database/sqlite3/sqlite3.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/internal/database/sqlite3/sqlite3.go b/internal/database/sqlite3/sqlite3.go index 99cc4b6ad9..90e4cf3463 100644 --- a/internal/database/sqlite3/sqlite3.go +++ b/internal/database/sqlite3/sqlite3.go @@ -35,6 +35,8 @@ import ( "github.com/mattn/go-sqlite3" ) +var ffSQLiteRegistered = false + type SQLite3 struct { sqlcommon.SQLCommon } @@ -46,10 +48,13 @@ func connHook(conn *sqlite3.SQLiteConn) error { func (sqlite *SQLite3) Init(ctx context.Context, prefix config.Prefix, callbacks database.Callbacks) error { capabilities := &database.Capabilities{} - sql.Register("sqlite3_ff", - &sqlite3.SQLiteDriver{ - ConnectHook: connHook, - }) + if !ffSQLiteRegistered { + sql.Register("sqlite3_ff", + &sqlite3.SQLiteDriver{ + ConnectHook: connHook, + }) + ffSQLiteRegistered = true + } return sqlite.SQLCommon.Init(ctx, sqlite, prefix, callbacks, capabilities) } From f59e6866109353c3c67a5a70fb62f7533fdd9494 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 7 Jan 2022 12:57:27 -0500 Subject: [PATCH 07/21] Provide error messages in the case of bad combinations Signed-off-by: Peter Broadhurst --- internal/i18n/en_translations.go | 2 ++ pkg/database/filter.go | 32 +++++++++++++++++-- pkg/database/filter_test.go | 16 ++++++++++ pkg/database/query_fields.go | 13 +++++++- pkg/database/query_fields_test.go | 52 +++++++++++++++++++++++++++++++ pkg/fftypes/namearray.go | 3 +- 6 files changed, 114 insertions(+), 4 deletions(-) diff --git a/internal/i18n/en_translations.go b/internal/i18n/en_translations.go index c5db04345a..84ee6b7f77 100644 --- a/internal/i18n/en_translations.go +++ b/internal/i18n/en_translations.go @@ -222,4 +222,6 @@ var ( MsgQueryOpUnsupportedMod = ffm("FF10302", "Operation '%s' on '%s' does not support modifiers", 400) MsgDXBadSize = ffm("FF10303", "Unexpected size returned from data exchange upload. Size=%d Expected=%d") MsgBlobMismatchSealingData = ffm("FF10304", "Blob mismatch when sealing data") + MsgFieldTypeNoStringMatching = ffm("FF10305", "Field '%s' of type '%s' does not support partial or case-insensitive string matching via '%s' operator", 400) + MsgFieldMatchNoNull = ffm("FF10306", "Comparison operator '%s' for field '%s' cannot accept a null value", 400) ) diff --git a/pkg/database/filter.go b/pkg/database/filter.go index 6354ef52ff..cd6ab206f6 100644 --- a/pkg/database/filter.go +++ b/pkg/database/filter.go @@ -121,6 +121,28 @@ const ( FilterOpNotIEndsWith FilterOp = ";$" ) +func filterOpIsStringMatch(op FilterOp) bool { + for _, r := range string(op) { + switch r { + case '%', '^', '$', ':': + // Partial or case-insensitive matches all need a string + return true + } + } + return false +} + +func filterCannotAcceptNull(op FilterOp) bool { + for _, r := range string(op) { + switch r { + case '%', '^', '$', ':', '>', '<': + // string based matching, or gt/lt cannot accept null + return true + } + } + return false +} + // FilterBuilder is the syntax used to build the filter, where And() and Or() can be nested type FilterBuilder interface { // Fields is the list of available fields @@ -344,12 +366,18 @@ func (f *baseFilter) Finalize() (fi *FilterInfo, err error) { skipScan := false switch f.value.(type) { case nil: + if filterCannotAcceptNull(f.op) { + return nil, i18n.NewError(f.fb.ctx, i18n.MsgFieldMatchNoNull, f.op, name) + } value = &nullField{} skipScan = true case string: - if field.filterAsString() { + switch { + case field.filterAsString(): value = &stringField{} - } else { + case filterOpIsStringMatch(f.op): + return nil, i18n.NewError(f.fb.ctx, i18n.MsgFieldTypeNoStringMatching, name, field.description(), f.op) + default: value = field.getSerialization() } default: diff --git a/pkg/database/filter_test.go b/pkg/database/filter_test.go index 9041663077..d8bdf27097 100644 --- a/pkg/database/filter_test.go +++ b/pkg/database/filter_test.go @@ -297,6 +297,22 @@ func TestQueryFactoryBadNestedValue(t *testing.T) { assert.Regexp(t, "FF10149.*sequence", err) } +func TestQueryFactoryStringMatchNonString(t *testing.T) { + fb := DataQueryFactory.NewFilter(context.Background()) + _, err := fb.And( + fb.Contains("value", "stuff"), + ).Finalize() + assert.Regexp(t, "FF10305", err) +} + +func TestQueryFactoryNullGreaterThan(t *testing.T) { + fb := DataQueryFactory.NewFilter(context.Background()) + _, err := fb.And( + fb.Gt("created", nil), + ).Finalize() + assert.Regexp(t, "FF10306", err) +} + func TestQueryFactoryGetFields(t *testing.T) { fb := MessageQueryFactory.NewFilter(context.Background()) assert.NotNil(t, fb.Fields()) diff --git a/pkg/database/query_fields.go b/pkg/database/query_fields.go index 8f088e3fb6..aa60374e5f 100644 --- a/pkg/database/query_fields.go +++ b/pkg/database/query_fields.go @@ -68,6 +68,7 @@ type FieldSerialization interface { type Field interface { getSerialization() FieldSerialization + description() string filterAsString() bool } @@ -127,6 +128,7 @@ func (f *stringField) Value() (driver.Value, error) { return f.s, nil } func (f *stringField) String() string { return f.s } func (f *StringField) getSerialization() FieldSerialization { return &stringField{} } func (f *StringField) filterAsString() bool { return true } +func (f *StringField) description() string { return "String" } type UUIDField struct{} type uuidField struct{ u *fftypes.UUID } @@ -168,6 +170,7 @@ func (f *uuidField) Value() (driver.Value, error) { return f.u.Value() } func (f *uuidField) String() string { return fmt.Sprintf("%v", f.u) } func (f *UUIDField) getSerialization() FieldSerialization { return &uuidField{} } func (f *UUIDField) filterAsString() bool { return true } +func (f *UUIDField) description() string { return "UUID" } type Bytes32Field struct{} type bytes32Field struct{ b32 *fftypes.Bytes32 } @@ -197,6 +200,7 @@ func (f *bytes32Field) Value() (driver.Value, error) { return f.b32.Valu func (f *bytes32Field) String() string { return fmt.Sprintf("%v", f.b32) } func (f *Bytes32Field) getSerialization() FieldSerialization { return &bytes32Field{} } func (f *Bytes32Field) filterAsString() bool { return true } +func (f *Bytes32Field) description() string { return "Byte-Array" } type Int64Field struct{} type int64Field struct{ i int64 } @@ -220,6 +224,8 @@ func (f *int64Field) Scan(src interface{}) (err error) { if err != nil { return i18n.WrapError(context.Background(), err, i18n.MsgScanFailed, src, int64(0)) } + case nil: + f.i = 0 default: return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, f.i) } @@ -229,6 +235,7 @@ func (f *int64Field) Value() (driver.Value, error) { return f.i, nil } func (f *int64Field) String() string { return fmt.Sprintf("%d", f.i) } func (f *Int64Field) getSerialization() FieldSerialization { return &int64Field{} } func (f *Int64Field) filterAsString() bool { return false } +func (f *Int64Field) description() string { return "Integer" } type TimeField struct{} type timeField struct{ t *fftypes.FFTime } @@ -264,6 +271,7 @@ func (f *timeField) Value() (driver.Value, error) { func (f *timeField) String() string { return fmt.Sprintf("%v", f.t) } func (f *TimeField) getSerialization() FieldSerialization { return &timeField{} } func (f *TimeField) filterAsString() bool { return false } +func (f *TimeField) description() string { return "Date-time" } type JSONField struct{} type jsonField struct{ b []byte } @@ -286,7 +294,8 @@ func (f *jsonField) Scan(src interface{}) (err error) { func (f *jsonField) Value() (driver.Value, error) { return f.b, nil } func (f *jsonField) String() string { return string(f.b) } func (f *JSONField) getSerialization() FieldSerialization { return &jsonField{} } -func (f *JSONField) filterAsString() bool { return true } +func (f *JSONField) filterAsString() bool { return false } +func (f *JSONField) description() string { return "JSON-blob" } type FFNameArrayField struct{} type ffNameArrayField struct{ na fftypes.FFNameArray } @@ -298,6 +307,7 @@ func (f *ffNameArrayField) Value() (driver.Value, error) { return f.na.S func (f *ffNameArrayField) String() string { return f.na.String() } func (f *FFNameArrayField) getSerialization() FieldSerialization { return &ffNameArrayField{} } func (f *FFNameArrayField) filterAsString() bool { return true } +func (f *FFNameArrayField) description() string { return "String-array" } type BoolField struct{} type boolField struct{ b bool } @@ -331,3 +341,4 @@ func (f *boolField) Value() (driver.Value, error) { return f.b, nil } func (f *boolField) String() string { return fmt.Sprintf("%t", f.b) } func (f *BoolField) getSerialization() FieldSerialization { return &boolField{} } func (f *BoolField) filterAsString() bool { return false } +func (f *BoolField) description() string { return "Boolean" } diff --git a/pkg/database/query_fields_test.go b/pkg/database/query_fields_test.go index f0a4c13508..82e5510098 100644 --- a/pkg/database/query_fields_test.go +++ b/pkg/database/query_fields_test.go @@ -42,6 +42,8 @@ func TestNullField(t *testing.T) { func TestStringField(t *testing.T) { + fd := &StringField{} + assert.NotEmpty(t, fd.description()) f := stringField{} err := f.Scan("test") @@ -60,6 +62,8 @@ func TestStringField(t *testing.T) { func TestUUIDField(t *testing.T) { + fd := &UUIDField{} + assert.NotEmpty(t, fd.description()) f := uuidField{} err := f.Scan("") @@ -85,6 +89,8 @@ func TestUUIDField(t *testing.T) { func TestBytes32Field(t *testing.T) { + fd := &Bytes32Field{} + assert.NotEmpty(t, fd.description()) f := bytes32Field{} err := f.Scan("") @@ -108,8 +114,30 @@ func TestBytes32Field(t *testing.T) { } +func TestInt64Field(t *testing.T) { + + fd := &Int64Field{} + assert.NotEmpty(t, fd.description()) + f := int64Field{} + + err := f.Scan("12345") + assert.NoError(t, err) + v, err := f.Value() + assert.NoError(t, err) + assert.Equal(t, int64(12345), v) + + err = f.Scan(nil) + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.Equal(t, int64(0), v) + +} + func TestTimeField(t *testing.T) { + fd := &TimeField{} + assert.NotEmpty(t, fd.description()) f := timeField{} now := time.Now() @@ -129,6 +157,8 @@ func TestTimeField(t *testing.T) { func TestJSONField(t *testing.T) { + fd := &JSONField{} + assert.NotEmpty(t, fd.description()) f := jsonField{} err := f.Scan("{}") @@ -147,6 +177,8 @@ func TestJSONField(t *testing.T) { func TestBoolField(t *testing.T) { + fd := &BoolField{} + assert.NotEmpty(t, fd.description()) f := boolField{} err := f.Scan("true") @@ -162,3 +194,23 @@ func TestBoolField(t *testing.T) { assert.False(t, v.(bool)) } + +func TestFFNameArrayField(t *testing.T) { + + fd := &FFNameArrayField{} + assert.NotEmpty(t, fd.description()) + f := ffNameArrayField{} + + err := f.Scan("a,b") + assert.NoError(t, err) + v, err := f.Value() + assert.NoError(t, err) + assert.Equal(t, v, "a,b") + + err = f.Scan(nil) + assert.NoError(t, err) + v, err = f.Value() + assert.NoError(t, err) + assert.Equal(t, "", v) + +} diff --git a/pkg/fftypes/namearray.go b/pkg/fftypes/namearray.go index c3e7b9089d..2bda91ace6 100644 --- a/pkg/fftypes/namearray.go +++ b/pkg/fftypes/namearray.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -59,6 +59,7 @@ func (na *FFNameArray) Scan(src interface{}) error { *na = st return nil case nil: + *na = []string{} return nil default: return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, na) From bc09eea1c7488cc7e1419afb748770362ad02f6a Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 7 Jan 2022 16:55:35 -0500 Subject: [PATCH 08/21] Make bytables JSONAny, and serialize to string Signed-off-by: Peter Broadhurst --- docs/swagger/swagger.yaml | 14 ---- .../route_admin_get_config_record.go | 4 +- .../route_admin_get_config_record_test.go | 2 +- .../route_admin_get_config_records_test.go | 2 +- .../route_admin_put_config_record.go | 6 +- .../route_admin_put_config_record_test.go | 2 +- .../apiserver/route_admin_put_config_reset.go | 4 +- internal/apiserver/route_post_data.go | 4 +- internal/apiserver/route_post_data_test.go | 4 +- internal/assets/token_transfer_test.go | 12 +-- internal/blockchain/fabric/fabric.go | 4 +- internal/broadcast/datatype_test.go | 14 ++-- internal/broadcast/definition.go | 3 +- internal/broadcast/manager_test.go | 4 +- internal/broadcast/message.go | 10 ++- internal/broadcast/message_test.go | 16 ++-- internal/config/config.go | 12 +-- internal/config/config_test.go | 14 ++-- internal/data/blobstore.go | 3 +- internal/data/blobstore_test.go | 4 +- internal/data/data_manager.go | 4 +- internal/data/data_manager_test.go | 26 +++---- internal/data/json_validator.go | 11 ++- internal/data/json_validator_test.go | 4 +- internal/data/validator.go | 4 +- .../sqlcommon/config_record_sql_test.go | 4 +- internal/database/sqlcommon/data_sql_test.go | 4 +- .../database/sqlcommon/datatype_sql_test.go | 4 +- internal/dataexchange/dxhttps/dxhttps.go | 2 +- internal/definitions/definition_handler.go | 4 +- .../definition_handler_datatype_test.go | 30 +++---- .../definition_handler_namespace_test.go | 18 ++--- .../definition_handler_network_node_test.go | 22 +++--- .../definition_handler_network_org_test.go | 22 +++--- .../definition_handler_tokenpool_test.go | 2 +- internal/events/batch_pin_complete_test.go | 12 +-- internal/events/dx_callbacks_test.go | 8 +- internal/events/event_dispatcher_test.go | 4 +- internal/events/persist_batch.go | 4 +- internal/events/persist_batch_test.go | 4 +- internal/events/webhooks/webhooks.go | 15 ++-- internal/events/webhooks/webhooks_test.go | 18 ++--- internal/orchestrator/config.go | 4 +- internal/orchestrator/config_test.go | 12 +-- internal/orchestrator/data_query_test.go | 4 +- internal/orchestrator/orchestrator.go | 4 +- internal/orchestrator/orchestrator_test.go | 2 +- internal/privatemessaging/groupmanager.go | 5 +- .../privatemessaging/groupmanager_test.go | 10 +-- internal/privatemessaging/message.go | 4 +- internal/privatemessaging/message_test.go | 22 +++--- internal/privatemessaging/privatemessaging.go | 10 +-- .../privatemessaging/privatemessaging_test.go | 12 +-- internal/privatemessaging/recipients_test.go | 2 +- internal/syncasync/sync_async_bridge_test.go | 8 +- mocks/orchestratormocks/orchestrator.go | 10 +-- pkg/fftypes/config.go | 4 +- pkg/fftypes/data.go | 4 +- pkg/fftypes/data_test.go | 10 +-- pkg/fftypes/datatype.go | 6 +- pkg/fftypes/datatype_test.go | 2 +- pkg/fftypes/{byteable.go => jsonany.go} | 55 +++++++++---- .../{byteable_test.go => jsonany_test.go} | 26 ++++--- pkg/fftypes/{jsondata.go => jsonobject.go} | 41 +--------- .../{jsondata_test.go => jsonobject_test.go} | 22 +----- pkg/fftypes/jsonobjectarray.go | 78 +++++++++++++++++++ pkg/fftypes/jsonobjectarray_test.go | 77 ++++++++++++++++++ pkg/fftypes/message.go | 4 +- pkg/fftypes/message_test.go | 2 +- test/e2e/onchain_offchain_test.go | 24 +++--- test/e2e/tokens_test.go | 4 +- 71 files changed, 476 insertions(+), 356 deletions(-) rename pkg/fftypes/{byteable.go => jsonany.go} (68%) rename pkg/fftypes/{byteable_test.go => jsonany_test.go} (82%) rename pkg/fftypes/{jsondata.go => jsonobject.go} (84%) rename pkg/fftypes/{jsondata_test.go => jsonobject_test.go} (88%) create mode 100644 pkg/fftypes/jsonobjectarray.go create mode 100644 pkg/fftypes/jsonobjectarray_test.go diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index e416fa07f4..46b8137422 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -479,7 +479,6 @@ paths: validator: type: string value: - format: byte type: string type: object type: array @@ -633,7 +632,6 @@ paths: validator: type: string value: - format: byte type: string type: object type: array @@ -744,7 +742,6 @@ paths: - definition type: string value: - format: byte type: string version: type: string @@ -1142,7 +1139,6 @@ paths: validator: type: string value: - format: byte type: string type: object type: array @@ -1195,7 +1191,6 @@ paths: validator: type: string value: - format: byte type: string type: object multipart/form-data: @@ -1252,7 +1247,6 @@ paths: validator: type: string value: - format: byte type: string type: object description: Success @@ -1315,7 +1309,6 @@ paths: validator: type: string value: - format: byte type: string type: object description: Success @@ -1792,7 +1785,6 @@ paths: - definition type: string value: - format: byte type: string version: type: string @@ -1839,7 +1831,6 @@ paths: - definition type: string value: - format: byte type: string version: type: string @@ -1865,7 +1856,6 @@ paths: - definition type: string value: - format: byte type: string version: type: string @@ -1891,7 +1881,6 @@ paths: - definition type: string value: - format: byte type: string version: type: string @@ -1946,7 +1935,6 @@ paths: - definition type: string value: - format: byte type: string version: type: string @@ -2006,7 +1994,6 @@ paths: - definition type: string value: - format: byte type: string version: type: string @@ -2731,7 +2718,6 @@ paths: validator: type: string value: - format: byte type: string type: object type: array diff --git a/internal/apiserver/route_admin_get_config_record.go b/internal/apiserver/route_admin_get_config_record.go index 5b0f349af5..7cc239bf40 100644 --- a/internal/apiserver/route_admin_get_config_record.go +++ b/internal/apiserver/route_admin_get_config_record.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -36,7 +36,7 @@ var getConfigRecord = &oapispec.Route{ FilterFactory: database.ConfigRecordQueryFactory, Description: i18n.MsgTBD, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return fftypes.Byteable{} }, + JSONOutputValue: func() interface{} { return fftypes.JSONAnyPtr("{}") }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { configRecord, err := r.Or.GetConfigRecord(r.Ctx, r.PP["key"]) diff --git a/internal/apiserver/route_admin_get_config_record_test.go b/internal/apiserver/route_admin_get_config_record_test.go index 0be29fa9fc..e2006973a0 100644 --- a/internal/apiserver/route_admin_get_config_record_test.go +++ b/internal/apiserver/route_admin_get_config_record_test.go @@ -41,7 +41,7 @@ func TestGetConfigRecord(t *testing.T) { o.On("GetConfigRecord", mock.Anything, u.String()). Return(&fftypes.ConfigRecord{ Key: u.String(), - Value: fftypes.Byteable(`{"foo": "bar"}`), + Value: fftypes.JSONAnyPtr(`{"foo": "bar"}`), }, nil) r.ServeHTTP(res, req) diff --git a/internal/apiserver/route_admin_get_config_records_test.go b/internal/apiserver/route_admin_get_config_records_test.go index 50bd00e44b..1cbfe35613 100644 --- a/internal/apiserver/route_admin_get_config_records_test.go +++ b/internal/apiserver/route_admin_get_config_records_test.go @@ -40,7 +40,7 @@ func TestGetConfigRecords(t *testing.T) { Return([]*fftypes.ConfigRecord{ { Key: "foo", - Value: fftypes.Byteable(`{"foo": "bar"}`), + Value: fftypes.JSONAnyPtr(`{"foo": "bar"}`), }, }, nil, nil) r.ServeHTTP(res, req) diff --git a/internal/apiserver/route_admin_put_config_record.go b/internal/apiserver/route_admin_put_config_record.go index d8fd73c75a..eb3c8df529 100644 --- a/internal/apiserver/route_admin_put_config_record.go +++ b/internal/apiserver/route_admin_put_config_record.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -57,12 +57,12 @@ var putConfigRecord = &oapispec.Route{ QueryParams: nil, FilterFactory: nil, Description: i18n.MsgTBD, - JSONInputValue: func() interface{} { return &fftypes.Byteable{} }, + JSONInputValue: func() interface{} { return fftypes.JSONAnyPtr("{}") }, JSONOutputValue: nil, JSONOutputCodes: []int{http.StatusOK}, JSONInputSchema: func(ctx context.Context) string { return anyJSONSchema }, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { - output, err = r.Or.PutConfigRecord(r.Ctx, r.PP["key"], *r.Input.(*fftypes.Byteable)) + output, err = r.Or.PutConfigRecord(r.Ctx, r.PP["key"], r.Input.(*fftypes.JSONAny)) return output, err }, } diff --git a/internal/apiserver/route_admin_put_config_record_test.go b/internal/apiserver/route_admin_put_config_record_test.go index b86c529a25..4b84b176ee 100644 --- a/internal/apiserver/route_admin_put_config_record_test.go +++ b/internal/apiserver/route_admin_put_config_record_test.go @@ -31,7 +31,7 @@ func TestPutConfigRecord(t *testing.T) { o, r := newTestAdminServer() input := &fftypes.ConfigRecord{ Key: "foo", - Value: fftypes.Byteable(`{"foo": "bar"}`), + Value: fftypes.JSONAnyPtr(`{"foo": "bar"}`), } var buf bytes.Buffer json.NewEncoder(&buf).Encode(&input) diff --git a/internal/apiserver/route_admin_put_config_reset.go b/internal/apiserver/route_admin_put_config_reset.go index 4a7c1d63d7..7878355e48 100644 --- a/internal/apiserver/route_admin_put_config_reset.go +++ b/internal/apiserver/route_admin_put_config_reset.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -33,7 +33,7 @@ var postResetConfig = &oapispec.Route{ QueryParams: nil, FilterFactory: nil, Description: i18n.MsgTBD, - JSONInputValue: func() interface{} { return &fftypes.Byteable{} }, + JSONInputValue: func() interface{} { return fftypes.JSONAnyPtr("{}") }, JSONOutputValue: nil, JSONOutputCodes: []int{http.StatusNoContent}, JSONInputSchema: func(ctx context.Context) string { return emptyObjectSchema }, diff --git a/internal/apiserver/route_post_data.go b/internal/apiserver/route_post_data.go index 322966faa4..63711b56dc 100644 --- a/internal/apiserver/route_post_data.go +++ b/internal/apiserver/route_post_data.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -72,7 +72,7 @@ var postData = &oapispec.Route{ if err := json.Unmarshal([]byte(metadata), &marshalCheck); err != nil { metadata = fmt.Sprintf(`"%s"`, metadata) } - data.Value = fftypes.Byteable(metadata) + data.Value = fftypes.JSONAnyPtr(metadata) } output, err = r.Or.Data().UploadBLOB(r.Ctx, r.PP["ns"], data, r.Part, strings.EqualFold(r.FP["autometa"], "true")) return output, err diff --git a/internal/apiserver/route_post_data_test.go b/internal/apiserver/route_post_data_test.go index 13cb3f6caa..8916b1b0ac 100644 --- a/internal/apiserver/route_post_data_test.go +++ b/internal/apiserver/route_post_data_test.go @@ -108,7 +108,7 @@ func TestPostDataBinaryObjAutoMeta(t *testing.T) { res := httptest.NewRecorder() mdm.On("UploadBLOB", mock.Anything, "ns1", mock.MatchedBy(func(d *fftypes.DataRefOrValue) bool { - assert.Equal(t, `{"filename":"anything"}`, string(d.Value)) + assert.Equal(t, `{"filename":"anything"}`, string(*d.Value)) assert.Equal(t, fftypes.ValidatorTypeJSON, d.Validator) assert.Equal(t, "fileinfo", d.Datatype.Name) assert.Equal(t, "0.0.1", d.Datatype.Version) @@ -142,7 +142,7 @@ func TestPostDataBinaryStringMetadata(t *testing.T) { res := httptest.NewRecorder() mdm.On("UploadBLOB", mock.Anything, "ns1", mock.MatchedBy(func(d *fftypes.DataRefOrValue) bool { - assert.Equal(t, `"string metadata"`, string(d.Value)) + assert.Equal(t, `"string metadata"`, string(*d.Value)) assert.Equal(t, "", string(d.Validator)) assert.Nil(t, d.Datatype) return true diff --git a/internal/assets/token_transfer_test.go b/internal/assets/token_transfer_test.go index f6d4a78673..f8502947d8 100644 --- a/internal/assets/token_transfer_test.go +++ b/internal/assets/token_transfer_test.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in comdiliance with the License. @@ -912,7 +912,7 @@ func TestTransferTokensWithBroadcastMessage(t *testing.T) { }, InlineData: fftypes.InlineData{ { - Value: []byte("test data"), + Value: fftypes.JSONAnyPtr("test data"), }, }, }, @@ -966,7 +966,7 @@ func TestTransferTokensWithBroadcastPrepareFail(t *testing.T) { Message: &fftypes.MessageInOut{ InlineData: fftypes.InlineData{ { - Value: []byte("test data"), + Value: fftypes.JSONAnyPtr("test data"), }, }, }, @@ -1010,7 +1010,7 @@ func TestTransferTokensWithPrivateMessage(t *testing.T) { }, InlineData: fftypes.InlineData{ { - Value: []byte("test data"), + Value: fftypes.JSONAnyPtr("test data"), }, }, }, @@ -1069,7 +1069,7 @@ func TestTransferTokensWithInvalidMessage(t *testing.T) { }, InlineData: fftypes.InlineData{ { - Value: []byte("test data"), + Value: fftypes.JSONAnyPtr("test data"), }, }, }, @@ -1151,7 +1151,7 @@ func TestTransferTokensWithBroadcastConfirm(t *testing.T) { }, InlineData: fftypes.InlineData{ { - Value: []byte("test data"), + Value: fftypes.JSONAnyPtr("test data"), }, }, }, diff --git a/internal/blockchain/fabric/fabric.go b/internal/blockchain/fabric/fabric.go index 9450357eb8..907bbc1517 100644 --- a/internal/blockchain/fabric/fabric.go +++ b/internal/blockchain/fabric/fabric.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -232,7 +232,7 @@ func (f *Fabric) handleBatchPinEvent(ctx context.Context, msgJSON fftypes.JSONOb log.L(ctx).Errorf("BatchPin event is not valid - bad payload content: %s", payloadString) return nil // move on } - dataBytes := fftypes.Byteable(bytes) + dataBytes := fftypes.JSONAnyPtrBytes(bytes) payload, ok := dataBytes.JSONObjectOk() if !ok { log.L(ctx).Errorf("BatchPin event is not valid - bad JSON payload: %s", bytes) diff --git a/internal/broadcast/datatype_test.go b/internal/broadcast/datatype_test.go index 9f6edaf283..977e087415 100644 --- a/internal/broadcast/datatype_test.go +++ b/internal/broadcast/datatype_test.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -48,7 +48,7 @@ func TestBroadcastDatatypeNSGetFail(t *testing.T) { Name: "name1", Namespace: "ns1", Version: "0.0.1", - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), }, false) assert.EqualError(t, err, "pop") } @@ -65,7 +65,7 @@ func TestBroadcastDatatypeBadValue(t *testing.T) { Namespace: "ns1", Name: "ent1", Version: "0.0.1", - Value: fftypes.Byteable(`!unparsable`), + Value: fftypes.JSONAnyPtr(`!unparsable`), }, false) assert.Regexp(t, "FF10137.*value", err) } @@ -86,7 +86,7 @@ func TestBroadcastUpsertFail(t *testing.T) { Namespace: "ns1", Name: "ent1", Version: "0.0.1", - Value: fftypes.Byteable(`{"some": "data"}`), + Value: fftypes.JSONAnyPtr(`{"some": "data"}`), }, false) assert.EqualError(t, err, "pop") } @@ -107,7 +107,7 @@ func TestBroadcastDatatypeInvalid(t *testing.T) { Namespace: "ns1", Name: "ent1", Version: "0.0.1", - Value: fftypes.Byteable(`{"some": "data"}`), + Value: fftypes.JSONAnyPtr(`{"some": "data"}`), }, false) assert.EqualError(t, err, "pop") } @@ -129,7 +129,7 @@ func TestBroadcastBroadcastFail(t *testing.T) { Namespace: "ns1", Name: "ent1", Version: "0.0.1", - Value: fftypes.Byteable(`{"some": "data"}`), + Value: fftypes.JSONAnyPtr(`{"some": "data"}`), }, false) assert.EqualError(t, err, "pop") } @@ -151,7 +151,7 @@ func TestBroadcastOk(t *testing.T) { Namespace: "ns1", Name: "ent1", Version: "0.0.1", - Value: fftypes.Byteable(`{"some": "data"}`), + Value: fftypes.JSONAnyPtr(`{"some": "data"}`), }, false) assert.NoError(t, err) } diff --git a/internal/broadcast/definition.go b/internal/broadcast/definition.go index 46818ac3ab..8bf6001113 100644 --- a/internal/broadcast/definition.go +++ b/internal/broadcast/definition.go @@ -55,8 +55,9 @@ func (bm *broadcastManager) broadcastDefinitionCommon(ctx context.Context, ns st Namespace: ns, Created: fftypes.Now(), } - data.Value, err = json.Marshal(&def) + b, err := json.Marshal(&def) if err == nil { + data.Value = fftypes.JSONAnyPtrBytes(b) err = data.Seal(ctx, nil) } if err != nil { diff --git a/internal/broadcast/manager_test.go b/internal/broadcast/manager_test.go index df51ffeb35..ad393f3026 100644 --- a/internal/broadcast/manager_test.go +++ b/internal/broadcast/manager_test.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -125,7 +125,7 @@ func TestDispatchBatchInvalidData(t *testing.T) { err := bm.dispatchBatch(context.Background(), &fftypes.Batch{ Payload: fftypes.BatchPayload{ Data: []*fftypes.Data{ - {Value: fftypes.Byteable(`!json`)}, + {Value: fftypes.JSONAnyPtr(`!json`)}, }, }, }, []*fftypes.Bytes32{fftypes.NewRandB32()}) diff --git a/internal/broadcast/message.go b/internal/broadcast/message.go index ba49fc461b..0f38f8b5a3 100644 --- a/internal/broadcast/message.go +++ b/internal/broadcast/message.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -170,9 +170,11 @@ func (s *broadcastSender) isRootOrgBroadcast(ctx context.Context) bool { dataItem := messageData[0] if dataItem.Validator == fftypes.MessageTypeDefinition { var org *fftypes.Organization - err := json.Unmarshal(dataItem.Value, &org) - if err != nil { - return false + if dataItem.Value != nil { + err := json.Unmarshal([]byte(*dataItem.Value), &org) + if err != nil { + return false + } } if org != nil && org.Name != "" && org.ID != nil && org.Parent == "" { return true diff --git a/internal/broadcast/message_test.go b/internal/broadcast/message_test.go index da193aa297..92a0a04030 100644 --- a/internal/broadcast/message_test.go +++ b/internal/broadcast/message_test.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -68,7 +68,7 @@ func TestBroadcastMessageOk(t *testing.T) { }, }, InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"hello": "world"}`)}, + {Value: fftypes.JSONAnyPtr(`{"hello": "world"}`)}, }, }, false) assert.NoError(t, err) @@ -104,7 +104,7 @@ func TestBroadcastRootOrg(t *testing.T) { data := &fftypes.Data{ ID: fftypes.NewUUID(), - Value: orgBytes, + Value: fftypes.JSONAnyPtrBytes(orgBytes), Validator: fftypes.MessageTypeDefinition, } @@ -152,7 +152,7 @@ func TestBroadcastRootOrgBadData(t *testing.T) { ctx := context.Background() data := &fftypes.Data{ ID: fftypes.NewUUID(), - Value: []byte("not an org"), + Value: fftypes.JSONAnyPtr("not an org"), Validator: fftypes.MessageTypeDefinition, } @@ -226,7 +226,7 @@ func TestBroadcastMessageWaitConfirmOk(t *testing.T) { }, }, InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"hello": "world"}`)}, + {Value: fftypes.JSONAnyPtr(`{"hello": "world"}`)}, }, }, true) assert.NoError(t, err) @@ -324,7 +324,7 @@ func TestBroadcastMessageBadInput(t *testing.T) { _, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"hello": "world"}`)}, + {Value: fftypes.JSONAnyPtr(`{"hello": "world"}`)}, }, }, false) assert.EqualError(t, err, "pop") @@ -343,7 +343,7 @@ func TestBroadcastMessageBadIdentity(t *testing.T) { _, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"hello": "world"}`)}, + {Value: fftypes.JSONAnyPtr(`{"hello": "world"}`)}, }, }, false) assert.Regexp(t, "FF10206", err) @@ -438,7 +438,7 @@ func TestBroadcastPrepare(t *testing.T) { }, }, InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"hello": "world"}`)}, + {Value: fftypes.JSONAnyPtr(`{"hello": "world"}`)}, }, } sender := bm.NewBroadcast("ns1", msg) diff --git a/internal/config/config.go b/internal/config/config.go index 089c7b83c6..bb3719bda7 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -381,12 +381,14 @@ func MergeConfig(configRecords []*fftypes.ConfigRecord) error { s := viper.New() s.SetConfigType("json") var val interface{} - if err := json.Unmarshal(c.Value, &val); err != nil { - return err + if c.Value != nil { + if err := json.Unmarshal([]byte(*c.Value), &val); err != nil { + return err + } } switch v := val.(type) { case map[string]interface{}: - _ = s.ReadConfig(bytes.NewBuffer(c.Value)) + _ = s.ReadConfig(bytes.NewBuffer([]byte(*c.Value))) for _, k := range s.AllKeys() { value := s.Get(k) if reflect.TypeOf(value).Kind() == reflect.Slice { @@ -399,7 +401,7 @@ func MergeConfig(configRecords []*fftypes.ConfigRecord) error { } } case []interface{}: - _ = s.ReadConfig(bytes.NewBuffer(c.Value)) + _ = s.ReadConfig(bytes.NewBuffer([]byte(*c.Value))) for i := range v { viper.Set(fmt.Sprintf("%s.%d", c.Key, i), v[i]) } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 240af2c8d5..7c92c405a2 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -205,22 +205,22 @@ func TestSetupLogging(t *testing.T) { func TestMergeConfigOk(t *testing.T) { - conf1 := fftypes.Byteable(`{ + conf1 := fftypes.JSONAnyPtr(`{ "some": { "nested": { "stuff": "value1" } } }`) - confNumber := fftypes.Byteable(`{ + confNumber := fftypes.JSONAnyPtr(`{ "some": { "more": { "stuff": 15 } } }`) - conf3 := fftypes.Byteable(`"value3"`) - confNestedSlice := fftypes.Byteable(`{ + conf3 := fftypes.JSONAnyPtr(`"value3"`) + confNestedSlice := fftypes.JSONAnyPtr(`{ "nestedslice": [ { "firstitemfirstkey": "firstitemfirstkeyvalue", @@ -232,7 +232,7 @@ func TestMergeConfigOk(t *testing.T) { } ] }`) - confBaseSlice := fftypes.Byteable(`[ + confBaseSlice := fftypes.JSONAnyPtr(`[ { "firstitemfirstkey": "firstitemfirstkeyvalue", "firstitemsecondkey": "firstitemsecondkeyvalue" @@ -267,7 +267,7 @@ func TestMergeConfigOk(t *testing.T) { func TestMergeConfigBadJSON(t *testing.T) { err := MergeConfig([]*fftypes.ConfigRecord{ - {Key: "base", Value: fftypes.Byteable(`!json`)}, + {Key: "base", Value: fftypes.JSONAnyPtr(`!json`)}, }) assert.Error(t, err) } diff --git a/internal/data/blobstore.go b/internal/data/blobstore.go index 338aadfb34..8c857aefb1 100644 --- a/internal/data/blobstore.go +++ b/internal/data/blobstore.go @@ -106,7 +106,8 @@ func (bs *blobStore) UploadBLOB(ctx context.Context, ns string, inData *fftypes. do := data.Value.JSONObject() do["filename"] = mpart.Filename do["mimetype"] = mpart.Mimetype - data.Value, _ = json.Marshal(&do) + b, _ := json.Marshal(&do) + data.Value = fftypes.JSONAnyPtrBytes(b) } if data.Validator == "" { data.Validator = fftypes.ValidatorTypeJSON diff --git a/internal/data/blobstore_test.go b/internal/data/blobstore_test.go index ad9fca99e6..d2b644ae4c 100644 --- a/internal/data/blobstore_test.go +++ b/internal/data/blobstore_test.go @@ -111,7 +111,7 @@ func TestUploadBlobAutoMetaOk(t *testing.T) { } data, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{ - Value: fftypes.Byteable(`{"custom": "value1"}`), + Value: fftypes.JSONAnyPtr(`{"custom": "value1"}`), }, &fftypes.Multipart{ Data: bytes.NewReader([]byte(`hello`)), Filename: "myfile.csv", @@ -146,7 +146,7 @@ func TestUploadBlobBadValidator(t *testing.T) { } _, err := dm.UploadBLOB(ctx, "ns1", &fftypes.DataRefOrValue{ - Value: fftypes.Byteable(`{"custom": "value1"}`), + Value: fftypes.JSONAnyPtr(`{"custom": "value1"}`), Validator: "wrong", }, &fftypes.Multipart{ Data: bytes.NewReader([]byte(`hello`)), diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index fb728661c0..4036b33852 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -211,7 +211,7 @@ func (dm *dataManager) resolveBlob(ctx context.Context, blobRef *fftypes.BlobRef return nil, nil } -func (dm *dataManager) checkValidation(ctx context.Context, ns string, validator fftypes.ValidatorType, datatype *fftypes.DatatypeRef, value fftypes.Byteable) error { +func (dm *dataManager) checkValidation(ctx context.Context, ns string, validator fftypes.ValidatorType, datatype *fftypes.DatatypeRef, value *fftypes.JSONAny) error { if validator == "" { validator = fftypes.ValidatorTypeJSON } @@ -240,7 +240,7 @@ func (dm *dataManager) checkValidation(ctx context.Context, ns string, validator return nil } -func (dm *dataManager) validateAndStore(ctx context.Context, ns string, validator fftypes.ValidatorType, datatype *fftypes.DatatypeRef, value fftypes.Byteable, blobRef *fftypes.BlobRef) (data *fftypes.Data, blob *fftypes.Blob, err error) { +func (dm *dataManager) validateAndStore(ctx context.Context, ns string, validator fftypes.ValidatorType, datatype *fftypes.DatatypeRef, value *fftypes.JSONAny, blobRef *fftypes.BlobRef) (data *fftypes.Data, blob *fftypes.Blob, err error) { if err := dm.checkValidation(ctx, ns, validator, datatype, value); err != nil { return nil, nil, err diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index 36f373e355..bc223bafc7 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -54,13 +54,13 @@ func TestValidateE2E(t *testing.T) { Name: "customer", Version: "0.0.1", }, - Value: fftypes.Byteable(`{"some":"json"}`), + Value: fftypes.JSONAnyPtr(`{"some":"json"}`), } data.Seal(ctx, nil) dt := &fftypes.Datatype{ ID: fftypes.NewUUID(), Validator: fftypes.ValidatorTypeJSON, - Value: fftypes.Byteable(`{ + Value: fftypes.JSONAnyPtr(`{ "properties": { "field1": { "type": "string" @@ -81,7 +81,7 @@ func TestValidateE2E(t *testing.T) { err = v.Validate(ctx, data) assert.Regexp(t, "FF10198", err) - data.Value = fftypes.Byteable(`{"field1":"value1"}`) + data.Value = fftypes.JSONAnyPtr(`{"field1":"value1"}`) data.Seal(context.Background(), nil) err = v.Validate(ctx, data) assert.NoError(t, err) @@ -110,7 +110,7 @@ func TestValidatorLookupCached(t *testing.T) { dt := &fftypes.Datatype{ ID: fftypes.NewUUID(), Validator: fftypes.ValidatorTypeJSON, - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), Name: "customer", Namespace: "0.0.1", } @@ -138,13 +138,13 @@ func TestValidateBadHash(t *testing.T) { Name: "customer", Version: "0.0.1", }, - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), Hash: fftypes.NewRandB32(), } dt := &fftypes.Datatype{ ID: fftypes.NewUUID(), Validator: fftypes.ValidatorTypeJSON, - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), Name: "customer", Namespace: "0.0.1", } @@ -409,7 +409,7 @@ func TestResolveInlineDataValueNoValidatorOK(t *testing.T) { mdi.On("UpsertData", ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ - {Value: fftypes.Byteable(`{"some":"json"}`)}, + {Value: fftypes.JSONAnyPtr(`{"some":"json"}`)}, }) assert.NoError(t, err) assert.Len(t, refs, 1) @@ -425,7 +425,7 @@ func TestResolveInlineDataValueNoValidatorStoreFail(t *testing.T) { mdi.On("UpsertData", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) _, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ - {Value: fftypes.Byteable(`{"some":"json"}`)}, + {Value: fftypes.JSONAnyPtr(`{"some":"json"}`)}, }) assert.EqualError(t, err, "pop") } @@ -442,7 +442,7 @@ func TestResolveInlineDataValueWithValidation(t *testing.T) { Namespace: "ns1", Name: "customer", Version: "0.0.1", - Value: fftypes.Byteable(`{ + Value: fftypes.JSONAnyPtr(`{ "properties": { "field1": { "type": "string" @@ -458,7 +458,7 @@ func TestResolveInlineDataValueWithValidation(t *testing.T) { Name: "customer", Version: "0.0.1", }, - Value: fftypes.Byteable(`{"field1":"value1"}`), + Value: fftypes.JSONAnyPtr(`{"field1":"value1"}`), }, }) assert.NoError(t, err) @@ -472,7 +472,7 @@ func TestResolveInlineDataValueWithValidation(t *testing.T) { Name: "customer", Version: "0.0.1", }, - Value: fftypes.Byteable(`{"not_allowed":"value"}`), + Value: fftypes.JSONAnyPtr(`{"not_allowed":"value"}`), }, }) assert.Regexp(t, "FF10198", err) @@ -603,7 +603,7 @@ func TestValidateAllLookupError(t *testing.T) { Name: "customer", Version: "0.0.1", }, - Value: fftypes.Byteable(`anything`), + Value: fftypes.JSONAnyPtr(`anything`), } data.Seal(ctx, nil) _, err := dm.ValidateAll(ctx, []*fftypes.Data{data}) @@ -627,7 +627,7 @@ func TestValidateAllStoredValidatorInvalid(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(&fftypes.Datatype{ - Value: fftypes.Byteable(`{"not": "a", "schema": true}`), + Value: fftypes.JSONAnyPtr(`{"not": "a", "schema": true}`), }, nil) data := &fftypes.Data{ Namespace: "ns1", diff --git a/internal/data/json_validator.go b/internal/data/json_validator.go index 0a79d4ff1c..16518efa1f 100644 --- a/internal/data/json_validator.go +++ b/internal/data/json_validator.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -44,7 +44,10 @@ func newJSONValidator(ctx context.Context, ns string, datatype *fftypes.Datatype }, } - schemaBytes := []byte(datatype.Value) + var schemaBytes []byte + if datatype.Value != nil { + schemaBytes = []byte(*datatype.Value) + } sl := gojsonschema.NewBytesLoader(schemaBytes) schema, err := gojsonschema.NewSchema(sl) if err != nil { @@ -61,7 +64,7 @@ func (jv *jsonValidator) Validate(ctx context.Context, data *fftypes.Data) error return jv.ValidateValue(ctx, data.Value, data.Hash) } -func (jv *jsonValidator) ValidateValue(ctx context.Context, value fftypes.Byteable, expectedHash *fftypes.Bytes32) error { +func (jv *jsonValidator) ValidateValue(ctx context.Context, value *fftypes.JSONAny, expectedHash *fftypes.Bytes32) error { if value == nil { return i18n.NewError(ctx, i18n.MsgDataValueIsNull) } @@ -73,7 +76,7 @@ func (jv *jsonValidator) ValidateValue(ctx context.Context, value fftypes.Byteab } } - err := jv.validateBytes(ctx, []byte(value)) + err := jv.validateBytes(ctx, []byte(*value)) if err != nil { log.L(ctx).Warnf("JSON schema %s [%v] validation failed: %s", jv.datatype, jv.id, err) } diff --git a/internal/data/json_validator_test.go b/internal/data/json_validator_test.go index a9efe17668..c669c73ed7 100644 --- a/internal/data/json_validator_test.go +++ b/internal/data/json_validator_test.go @@ -39,7 +39,7 @@ func TestJSONValidator(t *testing.T) { Validator: fftypes.ValidatorTypeJSON, Name: "customer", Version: "0.0.1", - Value: fftypes.Byteable(schemaBinary), + Value: fftypes.JSONAnyPtrBytes(schemaBinary), } jv, err := newJSONValidator(context.Background(), "ns1", dt) @@ -64,7 +64,7 @@ func TestJSONValidatorParseSchemaFail(t *testing.T) { Validator: fftypes.ValidatorTypeJSON, Name: "customer", Version: "0.0.1", - Value: fftypes.Byteable(`{!json`), + Value: fftypes.JSONAnyPtr(`{!json`), } _, err := newJSONValidator(context.Background(), "ns1", dt) diff --git a/internal/data/validator.go b/internal/data/validator.go index 5cd70da2de..b34a64a613 100644 --- a/internal/data/validator.go +++ b/internal/data/validator.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -24,6 +24,6 @@ import ( type Validator interface { Validate(ctx context.Context, data *fftypes.Data) error - ValidateValue(ctx context.Context, value fftypes.Byteable, expectedHash *fftypes.Bytes32) error + ValidateValue(ctx context.Context, value *fftypes.JSONAny, expectedHash *fftypes.Bytes32) error Size() int64 // for cache management } diff --git a/internal/database/sqlcommon/config_record_sql_test.go b/internal/database/sqlcommon/config_record_sql_test.go index f9eb4ab195..87f0d7a541 100644 --- a/internal/database/sqlcommon/config_record_sql_test.go +++ b/internal/database/sqlcommon/config_record_sql_test.go @@ -39,7 +39,7 @@ func TestConfigRecordE2EWithDB(t *testing.T) { // Create a new namespace entry configRecord := &fftypes.ConfigRecord{ Key: "foo", - Value: fftypes.Byteable(`{"foo":"bar"}`), + Value: fftypes.JSONAnyPtr(`{"foo":"bar"}`), } err := s.UpsertConfigRecord(ctx, configRecord, true) assert.NoError(t, err) @@ -56,7 +56,7 @@ func TestConfigRecordE2EWithDB(t *testing.T) { // and does not account for the verification that happens at the higher level) configRecordUpdated := &fftypes.ConfigRecord{ Key: "foo", - Value: fftypes.Byteable(`{"fiz":"buzz"}`), + Value: fftypes.JSONAnyPtr(`{"fiz":"buzz"}`), } err = s.UpsertConfigRecord(context.Background(), configRecordUpdated, true) assert.NoError(t, err) diff --git a/internal/database/sqlcommon/data_sql_test.go b/internal/database/sqlcommon/data_sql_test.go index 0f84b82ab4..71e7bbbf92 100644 --- a/internal/database/sqlcommon/data_sql_test.go +++ b/internal/database/sqlcommon/data_sql_test.go @@ -51,7 +51,7 @@ func TestDataE2EWithDB(t *testing.T) { Namespace: "ns1", Hash: fftypes.NewRandB32(), Created: fftypes.Now(), - Value: []byte(val.String()), + Value: fftypes.JSONAnyPtr(val.String()), } s.callbacks.On("UUIDCollectionNSEvent", database.CollectionData, fftypes.ChangeEventTypeCreated, "ns1", dataID, mock.Anything).Return() @@ -92,7 +92,7 @@ func TestDataE2EWithDB(t *testing.T) { }, Hash: fftypes.NewRandB32(), Created: fftypes.Now(), - Value: []byte(val2.String()), + Value: fftypes.JSONAnyPtr(val2.String()), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), Public: "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD", diff --git a/internal/database/sqlcommon/datatype_sql_test.go b/internal/database/sqlcommon/datatype_sql_test.go index 4f4eeb6fe9..143617c780 100644 --- a/internal/database/sqlcommon/datatype_sql_test.go +++ b/internal/database/sqlcommon/datatype_sql_test.go @@ -53,7 +53,7 @@ func TestDatatypeE2EWithDB(t *testing.T) { Namespace: "ns1", Hash: randB32, Created: fftypes.Now(), - Value: []byte(val.String()), + Value: fftypes.JSONAnyPtr(val.String()), } s.callbacks.On("UUIDCollectionNSEvent", database.CollectionDataTypes, fftypes.ChangeEventTypeCreated, "ns1", datatypeID, mock.Anything).Return() @@ -87,7 +87,7 @@ func TestDatatypeE2EWithDB(t *testing.T) { Version: "0.0.1", Hash: randB32, Created: fftypes.Now(), - Value: []byte(val2.String()), + Value: fftypes.JSONAnyPtr(val2.String()), } err = s.UpsertDatatype(context.Background(), datatypeUpdated, true) assert.NoError(t, err) diff --git a/internal/dataexchange/dxhttps/dxhttps.go b/internal/dataexchange/dxhttps/dxhttps.go index 352e792694..cc9812dc71 100644 --- a/internal/dataexchange/dxhttps/dxhttps.go +++ b/internal/dataexchange/dxhttps/dxhttps.go @@ -258,7 +258,7 @@ func (h *HTTPS) eventLoop() { case messageDelivered: err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusSucceeded, "", nil) case messageReceived: - err = h.callbacks.MessageReceived(msg.Sender, fftypes.Byteable(msg.Message)) + err = h.callbacks.MessageReceived(msg.Sender, []byte(msg.Message)) case blobFailed: err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusFailed, msg.Error, nil) case blobDelivered: diff --git a/internal/definitions/definition_handler.go b/internal/definitions/definition_handler.go index d13f75e86e..d2029fdba4 100644 --- a/internal/definitions/definition_handler.go +++ b/internal/definitions/definition_handler.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -122,7 +122,7 @@ func (dh *definitionHandlers) getSystemBroadcastPayload(ctx context.Context, msg l.Warnf("Unable to process system broadcast %s - expecting 1 attachement, found %d", msg.Header.ID, len(data)) return false } - err := json.Unmarshal(data[0].Value, &res) + err := json.Unmarshal(data[0].Value.Bytes(), &res) if err != nil { l.Warnf("Unable to process system broadcast %s - unmarshal failed: %s", msg.Header.ID, err) return false diff --git a/internal/definitions/definition_handler_datatype_test.go b/internal/definitions/definition_handler_datatype_test.go index 2a53bf737f..5933e1dabc 100644 --- a/internal/definitions/definition_handler_datatype_test.go +++ b/internal/definitions/definition_handler_datatype_test.go @@ -38,13 +38,13 @@ func TestHandleDefinitionBroadcastDatatypeOk(t *testing.T) { Namespace: "ns1", Name: "name1", Version: "ver1", - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } dt.Hash = dt.Value.Hash() b, err := json.Marshal(&dt) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdm := dh.data.(*datamocks.Manager) @@ -74,13 +74,13 @@ func TestHandleDefinitionBroadcastDatatypeEventFail(t *testing.T) { Namespace: "ns1", Name: "name1", Version: "ver1", - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } dt.Hash = dt.Value.Hash() b, err := json.Marshal(&dt) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdm := dh.data.(*datamocks.Manager) @@ -109,13 +109,13 @@ func TestHandleDefinitionBroadcastDatatypeMissingID(t *testing.T) { Namespace: "ns1", Name: "name1", Version: "ver1", - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } dt.Hash = dt.Value.Hash() b, err := json.Marshal(&dt) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } action, err := dh.HandleSystemBroadcast(context.Background(), &fftypes.Message{ @@ -136,13 +136,13 @@ func TestHandleDefinitionBroadcastBadSchema(t *testing.T) { Namespace: "ns1", Name: "name1", Version: "ver1", - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } dt.Hash = dt.Value.Hash() b, err := json.Marshal(&dt) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdm := dh.data.(*datamocks.Manager) @@ -167,7 +167,7 @@ func TestHandleDefinitionBroadcastMissingData(t *testing.T) { Namespace: "ns1", Name: "name1", Version: "ver1", - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } dt.Hash = dt.Value.Hash() @@ -189,13 +189,13 @@ func TestHandleDefinitionBroadcastDatatypeLookupFail(t *testing.T) { Namespace: "ns1", Name: "name1", Version: "ver1", - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } dt.Hash = dt.Value.Hash() b, err := json.Marshal(&dt) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdm := dh.data.(*datamocks.Manager) @@ -224,13 +224,13 @@ func TestHandleDefinitionBroadcastUpsertFail(t *testing.T) { Namespace: "ns1", Name: "name1", Version: "ver1", - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } dt.Hash = dt.Value.Hash() b, err := json.Marshal(&dt) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdm := dh.data.(*datamocks.Manager) @@ -259,13 +259,13 @@ func TestHandleDefinitionBroadcastDatatypeDuplicate(t *testing.T) { Namespace: "ns1", Name: "name1", Version: "ver1", - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } dt.Hash = dt.Value.Hash() b, err := json.Marshal(&dt) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdm := dh.data.(*datamocks.Manager) diff --git a/internal/definitions/definition_handler_namespace_test.go b/internal/definitions/definition_handler_namespace_test.go index 87c306b206..a40bee861f 100644 --- a/internal/definitions/definition_handler_namespace_test.go +++ b/internal/definitions/definition_handler_namespace_test.go @@ -38,7 +38,7 @@ func TestHandleDefinitionBroadcastNSOk(t *testing.T) { b, err := json.Marshal(&ns) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -66,7 +66,7 @@ func TestHandleDefinitionBroadcastNSEventFail(t *testing.T) { b, err := json.Marshal(&ns) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -94,7 +94,7 @@ func TestHandleDefinitionBroadcastNSUpsertFail(t *testing.T) { b, err := json.Marshal(&ns) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -130,7 +130,7 @@ func TestHandleDefinitionBroadcastNSBadID(t *testing.T) { b, err := json.Marshal(&ns) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } action, err := dh.HandleSystemBroadcast(context.Background(), &fftypes.Message{ @@ -146,7 +146,7 @@ func TestHandleDefinitionBroadcastNSBadData(t *testing.T) { dh := newTestDefinitionHandlers(t) data := &fftypes.Data{ - Value: fftypes.Byteable(`!{json`), + Value: fftypes.JSONAnyPtr(`!{json`), } action, err := dh.HandleSystemBroadcast(context.Background(), &fftypes.Message{ @@ -168,7 +168,7 @@ func TestHandleDefinitionBroadcastDuplicate(t *testing.T) { b, err := json.Marshal(&ns) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -195,7 +195,7 @@ func TestHandleDefinitionBroadcastDuplicateOverrideLocal(t *testing.T) { b, err := json.Marshal(&ns) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -225,7 +225,7 @@ func TestHandleDefinitionBroadcastDuplicateOverrideLocalFail(t *testing.T) { b, err := json.Marshal(&ns) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -252,7 +252,7 @@ func TestHandleDefinitionBroadcastDupCheckFail(t *testing.T) { b, err := json.Marshal(&ns) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) diff --git a/internal/definitions/definition_handler_network_node_test.go b/internal/definitions/definition_handler_network_node_test.go index d670c235ab..5c6b98efb2 100644 --- a/internal/definitions/definition_handler_network_node_test.go +++ b/internal/definitions/definition_handler_network_node_test.go @@ -45,7 +45,7 @@ func TestHandleDefinitionBroadcastNodeOk(t *testing.T) { b, err := json.Marshal(&node) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -87,7 +87,7 @@ func TestHandleDefinitionBroadcastNodeUpsertFail(t *testing.T) { b, err := json.Marshal(&node) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -127,7 +127,7 @@ func TestHandleDefinitionBroadcastNodeAddPeerFail(t *testing.T) { b, err := json.Marshal(&node) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -169,7 +169,7 @@ func TestHandleDefinitionBroadcastNodeDupMismatch(t *testing.T) { b, err := json.Marshal(&node) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -207,7 +207,7 @@ func TestHandleDefinitionBroadcastNodeDupOK(t *testing.T) { b, err := json.Marshal(&node) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -248,7 +248,7 @@ func TestHandleDefinitionBroadcastNodeGetFail(t *testing.T) { b, err := json.Marshal(&node) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -286,7 +286,7 @@ func TestHandleDefinitionBroadcastNodeBadAuthor(t *testing.T) { b, err := json.Marshal(&node) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -323,7 +323,7 @@ func TestHandleDefinitionBroadcastNodeGetOrgNotFound(t *testing.T) { b, err := json.Marshal(&node) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -360,7 +360,7 @@ func TestHandleDefinitionBroadcastNodeGetOrgFail(t *testing.T) { b, err := json.Marshal(&node) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -397,7 +397,7 @@ func TestHandleDefinitionBroadcastNodeValidateFail(t *testing.T) { b, err := json.Marshal(&node) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } action, err := dh.HandleSystemBroadcast(context.Background(), &fftypes.Message{ @@ -418,7 +418,7 @@ func TestHandleDefinitionBroadcastNodeUnmarshalFail(t *testing.T) { dh := newTestDefinitionHandlers(t) data := &fftypes.Data{ - Value: fftypes.Byteable(`!json`), + Value: fftypes.JSONAnyPtr(`!json`), } action, err := dh.HandleSystemBroadcast(context.Background(), &fftypes.Message{ diff --git a/internal/definitions/definition_handler_network_org_test.go b/internal/definitions/definition_handler_network_org_test.go index 18b7379baa..12b665129d 100644 --- a/internal/definitions/definition_handler_network_org_test.go +++ b/internal/definitions/definition_handler_network_org_test.go @@ -49,7 +49,7 @@ func TestHandleDefinitionBroadcastChildOrgOk(t *testing.T) { b, err := json.Marshal(&org) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -95,7 +95,7 @@ func TestHandleDefinitionBroadcastChildOrgDupOk(t *testing.T) { b, err := json.Marshal(&org) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -139,7 +139,7 @@ func TestHandleDefinitionBroadcastChildOrgBadKey(t *testing.T) { b, err := json.Marshal(&org) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -174,7 +174,7 @@ func TestHandleDefinitionBroadcastOrgDupMismatch(t *testing.T) { b, err := json.Marshal(&org) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -208,7 +208,7 @@ func TestHandleDefinitionBroadcastOrgUpsertFail(t *testing.T) { b, err := json.Marshal(&org) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -244,7 +244,7 @@ func TestHandleDefinitionBroadcastOrgGetOrgFail(t *testing.T) { b, err := json.Marshal(&org) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -277,7 +277,7 @@ func TestHandleDefinitionBroadcastOrgAuthorMismatch(t *testing.T) { b, err := json.Marshal(&org) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -312,7 +312,7 @@ func TestHandleDefinitionBroadcastGetParentFail(t *testing.T) { b, err := json.Marshal(&org) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -347,7 +347,7 @@ func TestHandleDefinitionBroadcastGetParentNotFound(t *testing.T) { b, err := json.Marshal(&org) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } mdi := dh.database.(*databasemocks.Plugin) @@ -380,7 +380,7 @@ func TestHandleDefinitionBroadcastValidateFail(t *testing.T) { b, err := json.Marshal(&org) assert.NoError(t, err) data := &fftypes.Data{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), } action, err := dh.HandleSystemBroadcast(context.Background(), &fftypes.Message{ @@ -401,7 +401,7 @@ func TestHandleDefinitionBroadcastUnmarshalFail(t *testing.T) { dh := newTestDefinitionHandlers(t) data := &fftypes.Data{ - Value: fftypes.Byteable(`!json`), + Value: fftypes.JSONAnyPtr(`!json`), } action, err := dh.HandleSystemBroadcast(context.Background(), &fftypes.Message{ diff --git a/internal/definitions/definition_handler_tokenpool_test.go b/internal/definitions/definition_handler_tokenpool_test.go index 449a851846..d0203c7f42 100644 --- a/internal/definitions/definition_handler_tokenpool_test.go +++ b/internal/definitions/definition_handler_tokenpool_test.go @@ -61,7 +61,7 @@ func buildPoolDefinitionMessage(announce *fftypes.TokenPoolAnnouncement) (*fftyp return nil, nil, err } data := []*fftypes.Data{{ - Value: fftypes.Byteable(b), + Value: fftypes.JSONAnyPtrBytes(b), }} return msg, data, nil } diff --git a/internal/events/batch_pin_complete_test.go b/internal/events/batch_pin_complete_test.go index c9a4e46764..fbcec13f5a 100644 --- a/internal/events/batch_pin_complete_test.go +++ b/internal/events/batch_pin_complete_test.go @@ -397,7 +397,7 @@ func TestPersistBatchGoodDataUpsertOptimizeExistingFail(t *testing.T) { ID: fftypes.NewUUID(), }, Data: []*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"test"`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)}, }, }, } @@ -430,7 +430,7 @@ func TestPersistBatchGoodDataUpsertOptimizeNewFail(t *testing.T) { ID: fftypes.NewUUID(), }, Data: []*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"test"`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)}, }, }, } @@ -544,7 +544,7 @@ func TestPersistBatchDataBadHash(t *testing.T) { } data := &fftypes.Data{ ID: fftypes.NewUUID(), - Value: fftypes.Byteable(`"test"`), + Value: fftypes.JSONAnyPtr(`"test"`), Hash: fftypes.NewRandB32(), } err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) @@ -558,7 +558,7 @@ func TestPersistBatchDataUpsertHashMismatch(t *testing.T) { ID: fftypes.NewUUID(), } - data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"test"`)} + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} data.Hash = data.Value.Hash() mdi := em.database.(*databasemocks.Plugin) @@ -576,7 +576,7 @@ func TestPersistBatchDataUpsertDataError(t *testing.T) { ID: fftypes.NewUUID(), } - data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"test"`)} + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} data.Hash = data.Value.Hash() mdi := em.database.(*databasemocks.Plugin) @@ -593,7 +593,7 @@ func TestPersistBatchDataOk(t *testing.T) { ID: fftypes.NewUUID(), } - data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"test"`)} + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} data.Hash = data.Value.Hash() mdi := em.database.(*databasemocks.Plugin) diff --git a/internal/events/dx_callbacks_test.go b/internal/events/dx_callbacks_test.go index df0f5b22e8..cb1497ee92 100644 --- a/internal/events/dx_callbacks_test.go +++ b/internal/events/dx_callbacks_test.go @@ -707,7 +707,7 @@ func TestMessageReceiveMessagePersistDataFail(t *testing.T) { } data := &fftypes.Data{ ID: fftypes.NewUUID(), - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } err := msg.Seal(em.ctx) assert.NoError(t, err) @@ -757,7 +757,7 @@ func TestMessageReceiveMessagePersistEventFail(t *testing.T) { } data := &fftypes.Data{ ID: fftypes.NewUUID(), - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } err := msg.Seal(em.ctx) assert.NoError(t, err) @@ -809,7 +809,7 @@ func TestMessageReceiveMessageEnsureLocalGroupFail(t *testing.T) { } data := &fftypes.Data{ ID: fftypes.NewUUID(), - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } err := msg.Seal(em.ctx) assert.NoError(t, err) @@ -851,7 +851,7 @@ func TestMessageReceiveMessageEnsureLocalGroupReject(t *testing.T) { } data := &fftypes.Data{ ID: fftypes.NewUUID(), - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } err := msg.Seal(em.ctx) assert.NoError(t, err) diff --git a/internal/events/event_dispatcher_test.go b/internal/events/event_dispatcher_test.go index 079a1a9dd5..3feb780b14 100644 --- a/internal/events/event_dispatcher_test.go +++ b/internal/events/event_dispatcher_test.go @@ -884,7 +884,7 @@ func TestEventDispatcherWithReply(t *testing.T) { }, }, InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`"my reply"`)}, + {Value: fftypes.JSONAnyPtr(`"my reply"`)}, }, }, }) @@ -900,7 +900,7 @@ func TestEventDispatcherWithReply(t *testing.T) { }, }, InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`"my reply"`)}, + {Value: fftypes.JSONAnyPtr(`"my reply"`)}, }, }, }) diff --git a/internal/events/persist_batch.go b/internal/events/persist_batch.go index 95ac9929a6..54bfd98686 100644 --- a/internal/events/persist_batch.go +++ b/internal/events/persist_batch.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -73,7 +73,7 @@ func (em *eventManager) isRootOrgBroadcast(batch *fftypes.Batch) bool { if batchDataItem.ID.Equals(messageDataItem.ID) { if batchDataItem.Validator == fftypes.MessageTypeDefinition { var org *fftypes.Organization - err := json.Unmarshal(batchDataItem.Value, &org) + err := json.Unmarshal(batchDataItem.Value.Bytes(), &org) if err != nil { return false } diff --git a/internal/events/persist_batch_test.go b/internal/events/persist_batch_test.go index ba5af86050..65b439b433 100644 --- a/internal/events/persist_batch_test.go +++ b/internal/events/persist_batch_test.go @@ -48,7 +48,7 @@ func TestPersistBatchFromBroadcastRootOrg(t *testing.T) { assert.NoError(t, err) data := &fftypes.Data{ ID: fftypes.NewUUID(), - Value: orgBytes, + Value: fftypes.JSONAnyPtrBytes(orgBytes), Validator: fftypes.MessageTypeDefinition, } @@ -103,7 +103,7 @@ func TestPersistBatchFromBroadcastRootOrgBadData(t *testing.T) { data := &fftypes.Data{ ID: fftypes.NewUUID(), - Value: []byte("!badness"), + Value: fftypes.JSONAnyPtr("!badness"), Validator: fftypes.MessageTypeDefinition, } diff --git a/internal/events/webhooks/webhooks.go b/internal/events/webhooks/webhooks.go index 863fd6893d..d952207186 100644 --- a/internal/events/webhooks/webhooks.go +++ b/internal/events/webhooks/webhooks.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -56,7 +56,7 @@ type whRequest struct { type whResponse struct { Status int `json:"status"` Headers fftypes.JSONObject `json:"headers"` - Body fftypes.Byteable `json:"body"` + Body *fftypes.JSONAny `json:"body"` } func (wh *WebHooks) Name() string { return "webhooks" } @@ -263,7 +263,7 @@ func (wh *WebHooks) ValidateOptions(options *fftypes.SubscriptionOptions) error func (wh *WebHooks) attemptRequest(sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) (req *whRequest, res *whResponse, err error) { withData := sub.Options.WithData != nil && *sub.Options.WithData - allData := make([]fftypes.Byteable, 0, len(data)) + allData := make([]*fftypes.JSONAny, 0, len(data)) var firstData fftypes.JSONObject var valid bool if withData { @@ -334,7 +334,8 @@ func (wh *WebHooks) attemptRequest(sub *fftypes.Subscription, event *fftypes.Eve if err != nil { return nil, nil, i18n.WrapError(wh.ctx, err, i18n.MsgWebhooksReplyBadJSON) } - res.Body, _ = json.Marshal(&resData) // we know we can re-marshal it + b, _ := json.Marshal(&resData) // we know we can re-marshal It + res.Body = fftypes.JSONAnyPtrBytes(b) } else { // Anything other than JSON, gets returned as a JSON string in base64 encoding buf := &bytes.Buffer{} @@ -343,7 +344,7 @@ func (wh *WebHooks) attemptRequest(sub *fftypes.Subscription, event *fftypes.Eve _, _ = io.Copy(b64Encoder, resp.RawBody()) _ = b64Encoder.Close() buf.WriteByte('"') - res.Body = buf.Bytes() + res.Body = fftypes.JSONAnyPtrBytes(buf.Bytes()) } return req, res, nil @@ -363,7 +364,7 @@ func (wh *WebHooks) doDelivery(connID string, reply bool, sub *fftypes.Subscript Headers: fftypes.JSONObject{ "Content-Type": "application/json", }, - Body: b, + Body: fftypes.JSONAnyPtrBytes(b), } } b, _ := json.Marshal(&res) @@ -390,7 +391,7 @@ func (wh *WebHooks) doDelivery(connID string, reply bool, sub *fftypes.Subscript }, }, InlineData: fftypes.InlineData{ - {Value: b}, + {Value: fftypes.JSONAnyPtrBytes(b)}, }, }, }) diff --git a/internal/events/webhooks/webhooks_test.go b/internal/events/webhooks/webhooks_test.go index 075d4ebb1b..6c65bc3f56 100644 --- a/internal/events/webhooks/webhooks_test.go +++ b/internal/events/webhooks/webhooks_test.go @@ -189,7 +189,7 @@ func TestRequestWithBodyReplyEndToEnd(t *testing.T) { } data := &fftypes.Data{ ID: dataID, - Value: fftypes.Byteable(`{ + Value: fftypes.JSONAnyPtr(`{ "in_body": { "inputfield": "inputvalue" }, @@ -294,7 +294,7 @@ func TestRequestWithEmptyStringBodyReplyEndToEnd(t *testing.T) { } data := &fftypes.Data{ ID: dataID, - Value: fftypes.Byteable(`{ + Value: fftypes.JSONAnyPtr(`{ "in_body": { "inputfield": "" }, @@ -371,7 +371,7 @@ func TestRequestNoBodyNoReply(t *testing.T) { } data := &fftypes.Data{ ID: dataID, - Value: fftypes.Byteable(`{ + Value: fftypes.JSONAnyPtr(`{ "inputfield": "inputvalue" }`), } @@ -541,8 +541,8 @@ func TestRequestReplyDataArrayBadStatusB64(t *testing.T) { })).Return(nil) err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"value1"`)}, - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"value2"`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) assert.NoError(t, err) assert.True(t, called) @@ -589,8 +589,8 @@ func TestRequestReplyDataArrayError(t *testing.T) { })).Return(nil) err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"value1"`)}, - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"value2"`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) assert.NoError(t, err) @@ -639,8 +639,8 @@ func TestRequestReplyBuildRequestFailFastAsk(t *testing.T) { } err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"value1"`)}, - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`"value2"`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) assert.NoError(t, err) <-waiter diff --git a/internal/orchestrator/config.go b/internal/orchestrator/config.go index fffd989543..4e9a7b3e94 100644 --- a/internal/orchestrator/config.go +++ b/internal/orchestrator/config.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -36,7 +36,7 @@ func (or *orchestrator) GetConfigRecords(ctx context.Context, filter database.An return or.database.GetConfigRecords(ctx, filter) } -func (or *orchestrator) PutConfigRecord(ctx context.Context, key string, value fftypes.Byteable) (outputValue fftypes.Byteable, err error) { +func (or *orchestrator) PutConfigRecord(ctx context.Context, key string, value *fftypes.JSONAny) (outputValue *fftypes.JSONAny, err error) { configRecord := &fftypes.ConfigRecord{ Key: key, Value: value, diff --git a/internal/orchestrator/config_test.go b/internal/orchestrator/config_test.go index 1103e41b38..d8b1af825b 100644 --- a/internal/orchestrator/config_test.go +++ b/internal/orchestrator/config_test.go @@ -31,12 +31,12 @@ func TestGetConfigRecord(t *testing.T) { or := newTestOrchestrator() or.mdi.On("GetConfigRecord", mock.Anything, mock.Anything).Return(&fftypes.ConfigRecord{ Key: "foobar", - Value: []byte(`{"foo": "bar"}`), + Value: fftypes.JSONAnyPtr(`{"foo": "bar"}`), }, nil) ctx := context.Background() configRecord, err := or.GetConfigRecord(ctx, "foo") assert.NoError(t, err) - assert.Equal(t, fftypes.Byteable(`{"foo": "bar"}`), configRecord.Value) + assert.Equal(t, fftypes.JSONAnyPtr(`{"foo": "bar"}`), configRecord.Value) } func TestGetConfigRecords(t *testing.T) { @@ -44,17 +44,17 @@ func TestGetConfigRecords(t *testing.T) { or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{ { Key: "foobar", - Value: []byte(`{"foo": "bar"}`), + Value: fftypes.JSONAnyPtr(`{"foo": "bar"}`), }, }, nil, nil) ctx := context.Background() configRecords, _, err := or.GetConfigRecords(ctx, nil) assert.NoError(t, err) - assert.Equal(t, fftypes.Byteable(`{"foo": "bar"}`), configRecords[0].Value) + assert.Equal(t, fftypes.JSONAnyPtr(`{"foo": "bar"}`), configRecords[0].Value) } func TestPutConfigRecord(t *testing.T) { - testValue := fftypes.Byteable(`{"foo": "bar"}`) + testValue := fftypes.JSONAnyPtr(`{"foo": "bar"}`) or := newTestOrchestrator() or.mdi.On("UpsertConfigRecord", mock.Anything, mock.Anything, mock.Anything).Return(nil) ctx := context.Background() @@ -64,7 +64,7 @@ func TestPutConfigRecord(t *testing.T) { } func TestPutConfigRecordFail(t *testing.T) { - testValue := fftypes.Byteable(`{"foo": "bar"}`) + testValue := fftypes.JSONAnyPtr(`{"foo": "bar"}`) or := newTestOrchestrator() or.mdi.On("UpsertConfigRecord", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) ctx := context.Background() diff --git a/internal/orchestrator/data_query_test.go b/internal/orchestrator/data_query_test.go index bc6d6413c6..2658f10800 100644 --- a/internal/orchestrator/data_query_test.go +++ b/internal/orchestrator/data_query_test.go @@ -123,8 +123,8 @@ func TestGetMessageByIDWithDataOk(t *testing.T) { } or.mdi.On("GetMessageByID", mock.Anything, mock.MatchedBy(func(u *fftypes.UUID) bool { return u.Equals(msgID) })).Return(msg, nil) or.mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Value: fftypes.Byteable("{}")}, - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Value: fftypes.Byteable("{}")}, + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Value: fftypes.JSONAnyPtr("{}")}, + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Value: fftypes.JSONAnyPtr("{}")}, }, true, nil) msgI, err := or.GetMessageByIDWithData(context.Background(), "ns1", msgID.String()) diff --git a/internal/orchestrator/orchestrator.go b/internal/orchestrator/orchestrator.go index cc7d06d589..97427b6dd0 100644 --- a/internal/orchestrator/orchestrator.go +++ b/internal/orchestrator/orchestrator.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -115,7 +115,7 @@ type Orchestrator interface { GetConfig(ctx context.Context) fftypes.JSONObject GetConfigRecord(ctx context.Context, key string) (*fftypes.ConfigRecord, error) GetConfigRecords(ctx context.Context, filter database.AndFilter) ([]*fftypes.ConfigRecord, *database.FilterResult, error) - PutConfigRecord(ctx context.Context, key string, configRecord fftypes.Byteable) (outputValue fftypes.Byteable, err error) + PutConfigRecord(ctx context.Context, key string, configRecord *fftypes.JSONAny) (outputValue *fftypes.JSONAny, err error) DeleteConfigRecord(ctx context.Context, key string) (err error) ResetConfig(ctx context.Context) diff --git a/internal/orchestrator/orchestrator_test.go b/internal/orchestrator/orchestrator_test.go index 661899601a..d79fb7d164 100644 --- a/internal/orchestrator/orchestrator_test.go +++ b/internal/orchestrator/orchestrator_test.go @@ -208,7 +208,7 @@ func TestBlockchaiInitMergeConfigRecordsFail(t *testing.T) { or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{ { Key: "pizza.toppings", - Value: []byte("cheese, pepperoni, mushrooms"), + Value: fftypes.JSONAnyPtr("cheese, pepperoni, mushrooms"), }, }, nil, nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) diff --git a/internal/privatemessaging/groupmanager.go b/internal/privatemessaging/groupmanager.go index 354f4e50e5..c9d2be9973 100644 --- a/internal/privatemessaging/groupmanager.go +++ b/internal/privatemessaging/groupmanager.go @@ -82,8 +82,9 @@ func (gm *groupManager) groupInit(ctx context.Context, signer *fftypes.Identity, Namespace: group.Namespace, // must go in the same ordering context as the message Created: fftypes.Now(), } - data.Value, err = json.Marshal(&group) + b, err := json.Marshal(&group) if err == nil { + data.Value = fftypes.JSONAnyPtrBytes(b) err = group.Validate(ctx, true) if err == nil { err = data.Seal(ctx, nil) @@ -207,7 +208,7 @@ func (gm *groupManager) ResolveInitGroup(ctx context.Context, msg *fftypes.Messa return nil, err } var newGroup fftypes.Group - err = json.Unmarshal(data[0].Value, &newGroup) + err = json.Unmarshal(data[0].Value.Bytes(), &newGroup) if err != nil { log.L(ctx).Warnf("Group %s definition in message %s invalid: %s", msg.Header.Group, msg.Header.ID, err) return nil, nil diff --git a/internal/privatemessaging/groupmanager_test.go b/internal/privatemessaging/groupmanager_test.go index 29e2aa5cfd..5993dc01f9 100644 --- a/internal/privatemessaging/groupmanager_test.go +++ b/internal/privatemessaging/groupmanager_test.go @@ -110,7 +110,7 @@ func TestResolveInitGroupBadData(t *testing.T) { mdm := pm.data.(*datamocks.Manager) mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`!json`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`!json`)}, }, true, nil) _, err := pm.ResolveInitGroup(pm.ctx, &fftypes.Message{ @@ -135,7 +135,7 @@ func TestResolveInitGroupBadValidation(t *testing.T) { mdm := pm.data.(*datamocks.Manager) mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`{}`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`{}`)}, }, true, nil) _, err := pm.ResolveInitGroup(pm.ctx, &fftypes.Message{ @@ -173,7 +173,7 @@ func TestResolveInitGroupBadGroupID(t *testing.T) { mdm := pm.data.(*datamocks.Manager) mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(b)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) _, err := pm.ResolveInitGroup(pm.ctx, &fftypes.Message{ @@ -211,7 +211,7 @@ func TestResolveInitGroupUpsertFail(t *testing.T) { mdm := pm.data.(*datamocks.Manager) mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(b)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) mdi := pm.database.(*databasemocks.Plugin) mdi.On("UpsertGroup", pm.ctx, mock.Anything, true).Return(fmt.Errorf("pop")) @@ -251,7 +251,7 @@ func TestResolveInitGroupNewOk(t *testing.T) { mdm := pm.data.(*datamocks.Manager) mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(b)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) mdi := pm.database.(*databasemocks.Plugin) mdi.On("UpsertGroup", pm.ctx, mock.Anything, true).Return(nil) diff --git a/internal/privatemessaging/message.go b/internal/privatemessaging/message.go index b06ebf6179..1644a108e2 100644 --- a/internal/privatemessaging/message.go +++ b/internal/privatemessaging/message.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -219,5 +219,5 @@ func (s *messageSender) sendUnpinned(ctx context.Context) (err error) { return i18n.WrapError(ctx, err, i18n.MsgSerializationFailed) } - return s.mgr.sendData(ctx, "message", s.msg.Header.ID, s.msg.Header.Group, s.namespace, nodes, payload, nil, data) + return s.mgr.sendData(ctx, "message", s.msg.Header.ID, s.msg.Header.Group, s.namespace, nodes, fftypes.JSONAnyPtrBytes(payload), nil, data) } diff --git a/internal/privatemessaging/message_test.go b/internal/privatemessaging/message_test.go index d60ef5e308..6c76b38de2 100644 --- a/internal/privatemessaging/message_test.go +++ b/internal/privatemessaging/message_test.go @@ -81,7 +81,7 @@ func TestSendConfirmMessageE2EOk(t *testing.T) { msg, err := pm.SendMessage(pm.ctx, "ns1", &fftypes.MessageInOut{ InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"some": "data"}`)}, + {Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, }, Group: &fftypes.InputGroup{ Members: []fftypes.MemberInput{ @@ -116,7 +116,7 @@ func TestSendUnpinnedMessageE2EOk(t *testing.T) { {ID: dataID, Hash: fftypes.NewRandB32()}, }, nil) mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: dataID, Value: fftypes.Byteable(`{"some": "data"}`)}, + {ID: dataID, Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, }, true, nil).Once() mdi := pm.database.(*databasemocks.Plugin) @@ -149,7 +149,7 @@ func TestSendUnpinnedMessageE2EOk(t *testing.T) { }, }, InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"some": "data"}`)}, + {Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, }, Group: &fftypes.InputGroup{ Members: []fftypes.MemberInput{ @@ -177,7 +177,7 @@ func TestSendMessageBadGroup(t *testing.T) { _, err := pm.SendMessage(pm.ctx, "ns1", &fftypes.MessageInOut{ InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"some": "data"}`)}, + {Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, }, Group: &fftypes.InputGroup{}, }, true) @@ -197,7 +197,7 @@ func TestSendMessageBadIdentity(t *testing.T) { _, err := pm.SendMessage(pm.ctx, "ns1", &fftypes.MessageInOut{ InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"some": "data"}`)}, + {Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, }, Group: &fftypes.InputGroup{ Members: []fftypes.MemberInput{ @@ -245,7 +245,7 @@ func TestSendMessageFail(t *testing.T) { _, err := pm.SendMessage(pm.ctx, "ns1", &fftypes.MessageInOut{ InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"some": "data"}`)}, + {Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, }, Group: &fftypes.InputGroup{ Members: []fftypes.MemberInput{ @@ -401,7 +401,7 @@ func TestSendUnpinnedMessageMarshalFail(t *testing.T) { nodeID2 := fftypes.NewUUID() mdm := pm.data.(*datamocks.Manager) mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.Byteable(`!Invalid JSON`)}, + {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`!Invalid JSON`)}, }, true, nil).Once() mdi := pm.database.(*databasemocks.Plugin) @@ -555,7 +555,7 @@ func TestSendUnpinnedMessageInsertFail(t *testing.T) { }, }, InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"some": "data"}`)}, + {Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, }, Group: &fftypes.InputGroup{ Members: []fftypes.MemberInput{ @@ -628,7 +628,7 @@ func TestSendUnpinnedMessageResolveGroupFail(t *testing.T) { }, }, InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"some": "data"}`)}, + {Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, }, Group: &fftypes.InputGroup{ Members: []fftypes.MemberInput{ @@ -662,7 +662,7 @@ func TestSendUnpinnedMessageEventFail(t *testing.T) { {ID: dataID, Hash: fftypes.NewRandB32()}, }, nil) mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: dataID, Value: fftypes.Byteable(`{"some": "data"}`)}, + {ID: dataID, Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, }, true, nil).Once() mdi := pm.database.(*databasemocks.Plugin) @@ -695,7 +695,7 @@ func TestSendUnpinnedMessageEventFail(t *testing.T) { }, }, InlineData: fftypes.InlineData{ - {Value: fftypes.Byteable(`{"some": "data"}`)}, + {Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, }, Group: &fftypes.InputGroup{ Members: []fftypes.MemberInput{ diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index de68b41576..697b077ee8 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -135,7 +135,7 @@ func (pm *privateMessaging) dispatchBatch(ctx context.Context, batch *fftypes.Ba } return pm.database.RunAsGroup(ctx, func(ctx context.Context) error { - return pm.sendAndSubmitBatch(ctx, batch, nodes, payload, contexts) + return pm.sendAndSubmitBatch(ctx, batch, nodes, fftypes.JSONAnyPtrBytes(payload), contexts) }) } @@ -174,7 +174,7 @@ func (pm *privateMessaging) transferBlobs(ctx context.Context, data []*fftypes.D return nil } -func (pm *privateMessaging) sendData(ctx context.Context, mType string, mID *fftypes.UUID, group *fftypes.Bytes32, ns string, nodes []*fftypes.Node, payload fftypes.Byteable, txid *fftypes.UUID, data []*fftypes.Data) (err error) { +func (pm *privateMessaging) sendData(ctx context.Context, mType string, mID *fftypes.UUID, group *fftypes.Bytes32, ns string, nodes []*fftypes.Node, payload *fftypes.JSONAny, txid *fftypes.UUID, data []*fftypes.Data) (err error) { l := log.L(ctx) // TODO: move to using DIDs consistently as the way to reference the node/organization (i.e. node.Owner becomes a DID) @@ -199,7 +199,7 @@ func (pm *privateMessaging) sendData(ctx context.Context, mType string, mID *fft } // Send the payload itself - trackingID, err := pm.exchange.SendMessage(ctx, node.DX.Peer, payload) + trackingID, err := pm.exchange.SendMessage(ctx, node.DX.Peer, payload.Bytes()) if err != nil { return err } @@ -222,7 +222,7 @@ func (pm *privateMessaging) sendData(ctx context.Context, mType string, mID *fft return nil } -func (pm *privateMessaging) sendAndSubmitBatch(ctx context.Context, batch *fftypes.Batch, nodes []*fftypes.Node, payload fftypes.Byteable, contexts []*fftypes.Bytes32) (err error) { +func (pm *privateMessaging) sendAndSubmitBatch(ctx context.Context, batch *fftypes.Batch, nodes []*fftypes.Node, payload *fftypes.JSONAny, contexts []*fftypes.Bytes32) (err error) { if err = pm.sendData(ctx, "batch", batch.ID, batch.Group, batch.Namespace, nodes, payload, batch.Payload.TX.ID, batch.Payload.Data); err != nil { return err } diff --git a/internal/privatemessaging/privatemessaging_test.go b/internal/privatemessaging/privatemessaging_test.go index 53e6b96297..c1b9072564 100644 --- a/internal/privatemessaging/privatemessaging_test.go +++ b/internal/privatemessaging/privatemessaging_test.go @@ -184,7 +184,7 @@ func TestDispatchBatchBadData(t *testing.T) { err := pm.dispatchBatch(pm.ctx, &fftypes.Batch{ Payload: fftypes.BatchPayload{ Data: []*fftypes.Data{ - {Value: fftypes.Byteable(`{!json}`)}, + {Value: fftypes.JSONAnyPtr(`{!json}`)}, }, }, }, []*fftypes.Bytes32{}) @@ -223,7 +223,7 @@ func TestSendAndSubmitBatchBadID(t *testing.T) { Identity: fftypes.Identity{ Author: "badauthor", }, - }, []*fftypes.Node{}, fftypes.Byteable(`{}`), []*fftypes.Bytes32{}) + }, []*fftypes.Node{}, fftypes.JSONAnyPtr(`{}`), []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) } @@ -241,7 +241,7 @@ func TestSendAndSubmitBatchUnregisteredNode(t *testing.T) { Identity: fftypes.Identity{ Author: "badauthor", }, - }, []*fftypes.Node{}, fftypes.Byteable(`{}`), []*fftypes.Bytes32{}) + }, []*fftypes.Node{}, fftypes.JSONAnyPtr(`{}`), []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) } @@ -266,7 +266,7 @@ func TestSendImmediateFail(t *testing.T) { Endpoint: fftypes.JSONObject{"url": "https://node1.example.com"}, }, }, - }, fftypes.Byteable(`{}`), []*fftypes.Bytes32{}) + }, fftypes.JSONAnyPtr(`{}`), []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) } @@ -299,7 +299,7 @@ func TestSendSubmitInsertOperationFail(t *testing.T) { Endpoint: fftypes.JSONObject{"url": "https://node1.example.com"}, }, }, - }, fftypes.Byteable(`{}`), []*fftypes.Bytes32{}) + }, fftypes.JSONAnyPtr(`{}`), []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) } @@ -329,7 +329,7 @@ func TestSendSubmitBlobTransferFail(t *testing.T) { Endpoint: fftypes.JSONObject{"url": "https://node1.example.com"}, }, }, - }, fftypes.Byteable(`{}`), []*fftypes.Bytes32{}) + }, fftypes.JSONAnyPtr(`{}`), []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) } diff --git a/internal/privatemessaging/recipients_test.go b/internal/privatemessaging/recipients_test.go index 03a64f956d..c79fd2d78e 100644 --- a/internal/privatemessaging/recipients_test.go +++ b/internal/privatemessaging/recipients_test.go @@ -63,7 +63,7 @@ func TestResolveMemberListNewGroupE2E(t *testing.T) { assert.Equal(t, fftypes.ValidatorTypeSystemDefinition, data.Validator) assert.Equal(t, "ns1", data.Namespace) var group fftypes.Group - err := json.Unmarshal(data.Value, &group) + err := json.Unmarshal(data.Value.Bytes(), &group) assert.NoError(t, err) assert.Len(t, group.Members, 2) // Group identiy is sorted by group members DIDs so check them in that order diff --git a/internal/syncasync/sync_async_bridge_test.go b/internal/syncasync/sync_async_bridge_test.go index 1f53b55a43..b24d74c8a6 100644 --- a/internal/syncasync/sync_async_bridge_test.go +++ b/internal/syncasync/sync_async_bridge_test.go @@ -69,7 +69,7 @@ func TestRequestReplyOk(t *testing.T) { mdm := sa.data.(*datamocks.Manager) mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: dataID, Value: fftypes.Byteable(`"response data"`)}, + {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) reply, err := sa.WaitForReply(sa.ctx, "ns1", requestID, func(ctx context.Context) error { @@ -87,7 +87,7 @@ func TestRequestReplyOk(t *testing.T) { }) assert.NoError(t, err) assert.Equal(t, *replyID, *reply.Header.ID) - assert.Equal(t, `"response data"`, string(reply.InlineData[0].Value)) + assert.Equal(t, `"response data"`, reply.InlineData[0].Value.String()) } @@ -116,7 +116,7 @@ func TestAwaitConfirmationOk(t *testing.T) { mdm := sa.data.(*datamocks.Manager) mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: dataID, Value: fftypes.Byteable(`"response data"`)}, + {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) reply, err := sa.WaitForMessage(sa.ctx, "ns1", requestID, func(ctx context.Context) error { @@ -162,7 +162,7 @@ func TestAwaitConfirmationRejected(t *testing.T) { mdm := sa.data.(*datamocks.Manager) mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: dataID, Value: fftypes.Byteable(`"response data"`)}, + {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) _, err := sa.WaitForMessage(sa.ctx, "ns1", requestID, func(ctx context.Context) error { diff --git a/mocks/orchestratormocks/orchestrator.go b/mocks/orchestratormocks/orchestrator.go index e950ae5a6d..6c54a71e98 100644 --- a/mocks/orchestratormocks/orchestrator.go +++ b/mocks/orchestratormocks/orchestrator.go @@ -1091,20 +1091,20 @@ func (_m *Orchestrator) PrivateMessaging() privatemessaging.Manager { } // PutConfigRecord provides a mock function with given fields: ctx, key, configRecord -func (_m *Orchestrator) PutConfigRecord(ctx context.Context, key string, configRecord fftypes.Byteable) (fftypes.Byteable, error) { +func (_m *Orchestrator) PutConfigRecord(ctx context.Context, key string, configRecord *fftypes.JSONAny) (*fftypes.JSONAny, error) { ret := _m.Called(ctx, key, configRecord) - var r0 fftypes.Byteable - if rf, ok := ret.Get(0).(func(context.Context, string, fftypes.Byteable) fftypes.Byteable); ok { + var r0 *fftypes.JSONAny + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.JSONAny) *fftypes.JSONAny); ok { r0 = rf(ctx, key, configRecord) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(fftypes.Byteable) + r0 = ret.Get(0).(*fftypes.JSONAny) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, fftypes.Byteable) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, *fftypes.JSONAny) error); ok { r1 = rf(ctx, key, configRecord) } else { r1 = ret.Error(1) diff --git a/pkg/fftypes/config.go b/pkg/fftypes/config.go index ad83565a90..d316da8601 100644 --- a/pkg/fftypes/config.go +++ b/pkg/fftypes/config.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -18,5 +18,5 @@ package fftypes type ConfigRecord struct { Key string `json:"key,omitempty"` - Value Byteable `json:"value,omitempty"` + Value *JSONAny `json:"value,omitempty"` } diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index 7cad9d70d3..59ec9387cd 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -44,7 +44,7 @@ type Data struct { Hash *Bytes32 `json:"hash,omitempty"` Created *FFTime `json:"created,omitempty"` Datatype *DatatypeRef `json:"datatype,omitempty"` - Value Byteable `json:"value"` + Value *JSONAny `json:"value"` Blob *BlobRef `json:"blob,omitempty"` } @@ -84,7 +84,7 @@ func CheckValidatorType(ctx context.Context, validator ValidatorType) error { func (d *Data) CalcHash(ctx context.Context) (*Bytes32, error) { if d.Value == nil { - d.Value = Byteable(NullString) + d.Value = JSONAnyPtr(NullString) } valueIsNull := d.Value.String() == NullString if valueIsNull && (d.Blob == nil || d.Blob.Hash == nil) { diff --git a/pkg/fftypes/data_test.go b/pkg/fftypes/data_test.go index bb686f53cc..ce7c7901ed 100644 --- a/pkg/fftypes/data_test.go +++ b/pkg/fftypes/data_test.go @@ -50,7 +50,7 @@ func TestSealNoData(t *testing.T) { func TestSealValueOnly(t *testing.T) { d := &Data{ - Value: []byte("{}"), + Value: JSONAnyPtr("{}"), Blob: &BlobRef{}, } err := d.Seal(context.Background(), nil) @@ -78,7 +78,7 @@ func TestSealBlobExplictlyNamed(t *testing.T) { Blob: &BlobRef{ Hash: blobHash, }, - Value: Byteable(`{ + Value: JSONAnyPtr(`{ "name": "use this", "filename": "ignore this", "path": "ignore this too" @@ -98,7 +98,7 @@ func TestSealBlobPathNamed(t *testing.T) { Blob: &BlobRef{ Hash: blobHash, }, - Value: Byteable(`{ + Value: JSONAnyPtr(`{ "filename": "file.ext", "path": "/path/to" }`), @@ -117,7 +117,7 @@ func TestSealBlobFileNamed(t *testing.T) { Blob: &BlobRef{ Hash: blobHash, }, - Value: Byteable(`{ + Value: JSONAnyPtr(`{ "filename": "file.ext" }`), } @@ -156,7 +156,7 @@ func TestSealBlobAndHashOnly(t *testing.T) { Blob: &BlobRef{ Hash: blobHash, }, - Value: []byte("{}"), + Value: JSONAnyPtr("{}"), } h := sha256.Sum256([]byte(`44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a22440fcf4ee9ac8c1a83de36c3a9ef39f838d960971dc79b274718392f1735f9`)) err := d.Seal(context.Background(), &Blob{ diff --git a/pkg/fftypes/datatype.go b/pkg/fftypes/datatype.go index ccd04585e6..60da473f09 100644 --- a/pkg/fftypes/datatype.go +++ b/pkg/fftypes/datatype.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -43,7 +43,7 @@ type Datatype struct { Version string `json:"version,omitempty"` Hash *Bytes32 `json:"hash,omitempty"` Created *FFTime `json:"created,omitempty"` - Value Byteable `json:"value,omitempty"` + Value *JSONAny `json:"value,omitempty"` } func (dt *Datatype) Validate(ctx context.Context, existing bool) (err error) { @@ -59,7 +59,7 @@ func (dt *Datatype) Validate(ctx context.Context, existing bool) (err error) { if err = ValidateFFNameField(ctx, dt.Version, "version"); err != nil { return err } - if len(dt.Value) == 0 { + if dt.Value == nil || len(*dt.Value) == 0 { return i18n.NewError(ctx, i18n.MsgMissingRequiredField, "value") } if existing { diff --git a/pkg/fftypes/datatype_test.go b/pkg/fftypes/datatype_test.go index e4a238cd3f..f325fcc8fd 100644 --- a/pkg/fftypes/datatype_test.go +++ b/pkg/fftypes/datatype_test.go @@ -64,7 +64,7 @@ func TestDatatypeValidation(t *testing.T) { Namespace: "ok", Name: "ok", Version: "ok", - Value: Byteable(`{}`), + Value: JSONAnyPtr(`{}`), } assert.NoError(t, dt.Validate(context.Background(), false)) diff --git a/pkg/fftypes/byteable.go b/pkg/fftypes/jsonany.go similarity index 68% rename from pkg/fftypes/byteable.go rename to pkg/fftypes/jsonany.go index 6d7112ef6b..cf13c1ad1e 100644 --- a/pkg/fftypes/byteable.go +++ b/pkg/fftypes/jsonany.go @@ -29,41 +29,63 @@ const ( NullString = "null" ) -// Byteable uses raw encode/decode to preserve field order, and can handle any types of field. +// JSONAny uses raw encode/decode to preserve field order, and can handle any types of field. // It validates the JSON can be unmarshalled, but does not change the order. // It does however trim out whitespace -type Byteable []byte +type JSONAny string -func (h *Byteable) UnmarshalJSON(b []byte) error { +func JSONAnyPtr(str string) *JSONAny { + return (*JSONAny)(&str) +} + +func JSONAnyPtrBytes(b []byte) *JSONAny { + if b == nil { + return nil + } + ja := JSONAny(b) + return &ja +} + +func (h *JSONAny) UnmarshalJSON(b []byte) error { var flattener json.RawMessage err := json.Unmarshal(b, &flattener) if err != nil { return err } - *h, err = json.Marshal(flattener) + standardizedBytes, err := json.Marshal(flattener) + if err == nil { + *h = JSONAny(standardizedBytes) + } return err } -func (h Byteable) MarshalJSON() ([]byte, error) { - if h == nil { - return []byte(NullString), nil +func (h JSONAny) MarshalJSON() ([]byte, error) { + if h == "" { + h = NullString } - return h, nil + return []byte(h), nil } -func (h Byteable) Hash() *Bytes32 { +func (h JSONAny) Hash() *Bytes32 { var b32 Bytes32 = sha256.Sum256([]byte(h)) return &b32 } -func (h Byteable) String() string { +func (h JSONAny) String() string { b, _ := h.MarshalJSON() return string(b) } -func (h Byteable) JSONObjectOk(noWarn ...bool) (JSONObject, bool) { +func (h *JSONAny) Bytes() []byte { + if h == nil { + return nil + } + return []byte(*h) +} + +func (h JSONAny) JSONObjectOk(noWarn ...bool) (JSONObject, bool) { var jo JSONObject - err := json.Unmarshal(h, &jo) + err := json.Unmarshal([]byte(h), &jo) if err != nil { if len(noWarn) == 0 || !noWarn[0] { log.L(context.Background()).Warnf("Unable to deserialize as JSON object: %s", string(h)) @@ -76,24 +98,23 @@ func (h Byteable) JSONObjectOk(noWarn ...bool) (JSONObject, bool) { // JSONObject attempts to de-serailize the contained structure as a JSON Object (map) // Safe and will never return nil // Will return an empty object if the type is array, string, bool, number etc. -func (h Byteable) JSONObject() JSONObject { +func (h JSONAny) JSONObject() JSONObject { jo, _ := h.JSONObjectOk() return jo } // JSONObjectNowarn acts the same as JSONObject, but does not warn if the value cannot // be parsed as an object -func (h Byteable) JSONObjectNowarn() JSONObject { +func (h JSONAny) JSONObjectNowarn() JSONObject { jo, _ := h.JSONObjectOk(true) return jo } // Scan implements sql.Scanner -func (h *Byteable) Scan(src interface{}) error { +func (h *JSONAny) Scan(src interface{}) error { switch src := src.(type) { case nil: - nullVal := []byte(NullString) - *h = nullVal + *h = NullString return nil case []byte: return h.UnmarshalJSON(src) diff --git a/pkg/fftypes/byteable_test.go b/pkg/fftypes/jsonany_test.go similarity index 82% rename from pkg/fftypes/byteable_test.go rename to pkg/fftypes/jsonany_test.go index b1f3af56b4..292e23d7a0 100644 --- a/pkg/fftypes/byteable_test.go +++ b/pkg/fftypes/jsonany_test.go @@ -23,11 +23,11 @@ import ( "github.com/stretchr/testify/assert" ) -func TestByteableSerializeNull(t *testing.T) { +func TestJSONAnySerializeNull(t *testing.T) { type testStruct struct { - Prop1 *Byteable `json:"prop1"` - Prop2 *Byteable `json:"prop2,omitempty"` + Prop1 *JSONAny `json:"prop1"` + Prop2 *JSONAny `json:"prop2,omitempty"` } ts := &testStruct{} @@ -41,10 +41,10 @@ func TestByteableSerializeNull(t *testing.T) { } -func TestByteableSerializeObjects(t *testing.T) { +func TestJSONAnySerializeObjects(t *testing.T) { type testStruct struct { - Prop *Byteable `json:"prop,omitempty"` + Prop *JSONAny `json:"prop,omitempty"` } ts := &testStruct{} @@ -72,17 +72,17 @@ func TestByteableSerializeObjects(t *testing.T) { } -func TestByteableMarshalNull(t *testing.T) { +func TestJSONAnyMarshalNull(t *testing.T) { - var pb Byteable + var pb JSONAny b, err := pb.MarshalJSON() assert.NoError(t, err) assert.Equal(t, NullString, string(b)) } -func TestByteableUnmarshalFail(t *testing.T) { +func TestJSONAnyUnmarshalFail(t *testing.T) { - var b Byteable + var b JSONAny err := b.UnmarshalJSON([]byte(`!json`)) assert.Error(t, err) @@ -92,7 +92,7 @@ func TestByteableUnmarshalFail(t *testing.T) { func TestScan(t *testing.T) { - var h Byteable + var h JSONAny assert.NoError(t, h.Scan(nil)) assert.Equal(t, []byte(NullString), []byte(h)) @@ -107,4 +107,10 @@ func TestScan(t *testing.T) { assert.Regexp(t, "FF10125", h.Scan(12345)) + assert.Equal(t, "test", JSONAnyPtrBytes([]byte(`{"val": "test"}`)).JSONObject().GetString("val")) + assert.Nil(t, JSONAnyPtrBytes(nil)) + + assert.Nil(t, JSONAnyPtrBytes(nil).Bytes()) + assert.NotEmpty(t, JSONAnyPtr("{}").Bytes()) + } diff --git a/pkg/fftypes/jsondata.go b/pkg/fftypes/jsonobject.go similarity index 84% rename from pkg/fftypes/jsondata.go rename to pkg/fftypes/jsonobject.go index 489cae0d37..0b8729e06b 100644 --- a/pkg/fftypes/jsondata.go +++ b/pkg/fftypes/jsonobject.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -183,42 +183,3 @@ func (jd JSONObject) Hash(jsonDesc string) (*Bytes32, error) { var b32 Bytes32 = sha256.Sum256(b) return &b32, nil } - -// JSONObjectArray is an array of JSONObject -type JSONObjectArray []JSONObject - -// Scan implements sql.Scanner -func (jd *JSONObjectArray) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string, []byte: - if src == "" { - return nil - } - return json.Unmarshal(src.([]byte), &jd) - - default: - return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, jd) - } - -} - -func (jd JSONObjectArray) Value() (driver.Value, error) { - return json.Marshal(&jd) -} - -func (jd JSONObjectArray) String() string { - b, _ := json.Marshal(&jd) - return string(b) -} - -func (jd JSONObjectArray) Hash(jsonDesc string) (*Bytes32, error) { - b, err := json.Marshal(&jd) - if err != nil { - return nil, i18n.NewError(context.Background(), i18n.MsgJSONObjectParseFailed, jsonDesc) - } - var b32 Bytes32 = sha256.Sum256(b) - return &b32, nil -} diff --git a/pkg/fftypes/jsondata_test.go b/pkg/fftypes/jsonobject_test.go similarity index 88% rename from pkg/fftypes/jsondata_test.go rename to pkg/fftypes/jsonobject_test.go index 8fb50ae6ac..52501cf873 100644 --- a/pkg/fftypes/jsondata_test.go +++ b/pkg/fftypes/jsonobject_test.go @@ -84,26 +84,6 @@ func TestJSONObject(t *testing.T) { assert.Equal(t, "", v) } -func TestJSONObjectArray(t *testing.T) { - - data := Byteable(`{ - "field1": true, - "field2": false, - "field3": "True", - "field4": "not true", - "field5": { "not": "boolable" }, - "field6": null - }`) - dataJSON := data.JSONObject() - assert.True(t, dataJSON.GetBool("field1")) - assert.False(t, dataJSON.GetBool("field2")) - assert.True(t, dataJSON.GetBool("field3")) - assert.False(t, dataJSON.GetBool("field4")) - assert.False(t, dataJSON.GetBool("field5")) - assert.False(t, dataJSON.GetBool("field6")) - assert.False(t, dataJSON.GetBool("field7")) -} - func TestJSONObjectBool(t *testing.T) { data := JSONObjectArray{ @@ -112,7 +92,7 @@ func TestJSONObjectBool(t *testing.T) { b, err := data.Value() assert.NoError(t, err) - assert.IsType(t, []byte{}, b) + assert.Equal(t, "[{\"some\":\"data\"}]", b) var dataRead JSONObjectArray err = dataRead.Scan(b) diff --git a/pkg/fftypes/jsonobjectarray.go b/pkg/fftypes/jsonobjectarray.go new file mode 100644 index 0000000000..c24cf12662 --- /dev/null +++ b/pkg/fftypes/jsonobjectarray.go @@ -0,0 +1,78 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fftypes + +import ( + "context" + "crypto/sha256" + "database/sql/driver" + "encoding/json" + + "github.com/hyperledger/firefly/internal/i18n" +) + +// JSONObjectArray is an array of JSONObject +type JSONObjectArray []JSONObject + +// Scan implements sql.Scanner +func (jd *JSONObjectArray) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + *jd = JSONObjectArray{} + return nil + + case []byte: + if src == nil { + *jd = JSONObjectArray{} + return nil + } + return json.Unmarshal(src, &jd) + + case string: + if src == "" { + *jd = JSONObjectArray{} + return nil + } + return json.Unmarshal([]byte(src), &jd) + + default: + return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, jd) + } + +} + +func (jd JSONObjectArray) Value() (driver.Value, error) { + b, err := json.Marshal(&jd) + if err != nil { + return nil, err + } + return string(b), err +} + +func (jd JSONObjectArray) String() string { + b, _ := json.Marshal(&jd) + return string(b) +} + +func (jd JSONObjectArray) Hash(jsonDesc string) (*Bytes32, error) { + b, err := json.Marshal(&jd) + if err != nil { + return nil, i18n.NewError(context.Background(), i18n.MsgJSONObjectParseFailed, jsonDesc) + } + var b32 Bytes32 = sha256.Sum256(b) + return &b32, nil +} diff --git a/pkg/fftypes/jsonobjectarray_test.go b/pkg/fftypes/jsonobjectarray_test.go new file mode 100644 index 0000000000..2329ff6cf3 --- /dev/null +++ b/pkg/fftypes/jsonobjectarray_test.go @@ -0,0 +1,77 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fftypes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestJSONObjectArray(t *testing.T) { + + data := JSONAnyPtr(`{ + "field1": true, + "field2": false, + "field3": "True", + "field4": "not true", + "field5": { "not": "boolable" }, + "field6": null + }`) + dataJSON := data.JSONObject() + assert.True(t, dataJSON.GetBool("field1")) + assert.False(t, dataJSON.GetBool("field2")) + assert.True(t, dataJSON.GetBool("field3")) + assert.False(t, dataJSON.GetBool("field4")) + assert.False(t, dataJSON.GetBool("field5")) + assert.False(t, dataJSON.GetBool("field6")) + assert.False(t, dataJSON.GetBool("field7")) +} + +func TestJSONObjectArrayScan(t *testing.T) { + + var joa JSONObjectArray + + err := joa.Scan(`[{"test": 1}]`) + assert.NoError(t, err) + assert.Equal(t, "1", joa[0].GetString("test")) + + err = joa.Scan([]byte(`[{"test": 1}]`)) + assert.NoError(t, err) + assert.Equal(t, "1", joa[0].GetString("test")) + + err = joa.Scan(nil) + assert.NoError(t, err) + assert.Empty(t, joa) + + err = joa.Scan("") + assert.NoError(t, err) + assert.Empty(t, joa) + + err = joa.Scan([]byte(nil)) + assert.NoError(t, err) + assert.Empty(t, joa) + + joa = JSONObjectArray([]JSONObject{ + JSONObject(map[string]interface{}{ + "bad": map[bool]bool{false: true}, + }), + }) + _, err = joa.Value() + assert.Error(t, err) + +} diff --git a/pkg/fftypes/message.go b/pkg/fftypes/message.go index bac967494b..28b3f697a8 100644 --- a/pkg/fftypes/message.go +++ b/pkg/fftypes/message.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -118,7 +118,7 @@ type DataRefOrValue struct { Validator ValidatorType `json:"validator,omitempty"` Datatype *DatatypeRef `json:"datatype,omitempty"` - Value Byteable `json:"value,omitempty"` + Value *JSONAny `json:"value,omitempty"` Blob *BlobRef `json:"blob,omitempty"` } diff --git a/pkg/fftypes/message_test.go b/pkg/fftypes/message_test.go index f13132ceb2..63101c4833 100644 --- a/pkg/fftypes/message_test.go +++ b/pkg/fftypes/message_test.go @@ -202,7 +202,7 @@ func TestSealKnownMessage(t *testing.T) { func TestSetInlineData(t *testing.T) { msg := &MessageInOut{} msg.SetInlineData([]*Data{ - {ID: NewUUID(), Value: Byteable(`"some data"`)}, + {ID: NewUUID(), Value: JSONAnyPtr(`"some data"`)}, }) b, err := json.Marshal(&msg) assert.NoError(t, err) diff --git a/test/e2e/onchain_offchain_test.go b/test/e2e/onchain_offchain_test.go index 15d20a66ec..ad30c6b2c4 100644 --- a/test/e2e/onchain_offchain_test.go +++ b/test/e2e/onchain_offchain_test.go @@ -50,7 +50,7 @@ func (suite *OnChainOffChainTestSuite) TestE2EBroadcast() { received2, changes2 := wsReader(suite.T(), suite.testState.ws2) var resp *resty.Response - value := fftypes.Byteable(`"Hello"`) + value := fftypes.JSONAnyPtr(`"Hello"`) data := fftypes.DataRefOrValue{ Value: value, } @@ -78,7 +78,7 @@ func (suite *OnChainOffChainTestSuite) TestStrongDatatypesBroadcast() { received2, changes2 := wsReader(suite.T(), suite.testState.ws2) var resp *resty.Response - value := fftypes.Byteable(`"Hello"`) + value := fftypes.JSONAnyPtr(`"Hello"`) randVer, _ := rand.Int(rand.Reader, big.NewInt(100000000)) version := fmt.Sprintf("0.0.%d", randVer.Int64()) data := fftypes.DataRefOrValue{ @@ -98,7 +98,7 @@ func (suite *OnChainOffChainTestSuite) TestStrongDatatypesBroadcast() { dt := &fftypes.Datatype{ Name: "widget", Version: version, - Value: widgetSchemaJSON, + Value: fftypes.JSONAnyPtrBytes(widgetSchemaJSON), } dt = CreateDatatype(suite.T(), suite.testState.client1, dt, true) @@ -107,7 +107,7 @@ func (suite *OnChainOffChainTestSuite) TestStrongDatatypesBroadcast() { assert.Equal(suite.T(), 400, resp.StatusCode()) assert.Contains(suite.T(), resp.String(), "FF10198") // does not conform - data.Value = fftypes.Byteable(`{ + data.Value = fftypes.JSONAnyPtr(`{ "id": "widget12345", "name": "mywidget" }`) @@ -129,7 +129,7 @@ func (suite *OnChainOffChainTestSuite) TestStrongDatatypesPrivate() { received2, changes2 := wsReader(suite.T(), suite.testState.ws2) var resp *resty.Response - value := fftypes.Byteable(`{"foo":"bar"}`) + value := fftypes.JSONAnyPtr(`{"foo":"bar"}`) randVer, _ := rand.Int(rand.Reader, big.NewInt(100000000)) version := fmt.Sprintf("0.0.%d", randVer.Int64()) data := fftypes.DataRefOrValue{ @@ -152,7 +152,7 @@ func (suite *OnChainOffChainTestSuite) TestStrongDatatypesPrivate() { dt := &fftypes.Datatype{ Name: "widget", Version: version, - Value: widgetSchemaJSON, + Value: fftypes.JSONAnyPtrBytes(widgetSchemaJSON), } dt = CreateDatatype(suite.T(), suite.testState.client1, dt, true) @@ -164,7 +164,7 @@ func (suite *OnChainOffChainTestSuite) TestStrongDatatypesPrivate() { assert.Equal(suite.T(), 400, resp.StatusCode()) assert.Contains(suite.T(), resp.String(), "FF10198") // does not conform - data.Value = fftypes.Byteable(`{ + data.Value = fftypes.JSONAnyPtr(`{ "id": "widget12345", "name": "mywidget" }`) @@ -189,7 +189,7 @@ func (suite *OnChainOffChainTestSuite) TestE2EPrivate() { received2, _ := wsReader(suite.T(), suite.testState.ws2) var resp *resty.Response - value := fftypes.Byteable(`"Hello"`) + value := fftypes.JSONAnyPtr(`"Hello"`) data := fftypes.DataRefOrValue{ Value: value, } @@ -224,13 +224,13 @@ func (suite *OnChainOffChainTestSuite) TestE2EBroadcastBlob() { waitForMessageConfirmed(suite.T(), received1, fftypes.MessageTypeBroadcast) val1 := validateReceivedMessages(suite.testState, suite.testState.client1, fftypes.MessageTypeBroadcast, fftypes.TransactionTypeBatchPin, 1, 0) - assert.Regexp(suite.T(), "myfile.txt", string(val1.Value)) + assert.Regexp(suite.T(), "myfile.txt", val1.Value.String()) assert.Equal(suite.T(), "myfile.txt", val1.Blob.Name) assert.Equal(suite.T(), data.Blob.Size, val1.Blob.Size) waitForMessageConfirmed(suite.T(), received2, fftypes.MessageTypeBroadcast) val2 := validateReceivedMessages(suite.testState, suite.testState.client2, fftypes.MessageTypeBroadcast, fftypes.TransactionTypeBatchPin, 1, 0) - assert.Regexp(suite.T(), "myfile.txt", string(val2.Value)) + assert.Regexp(suite.T(), "myfile.txt", val2.Value.String()) assert.Equal(suite.T(), "myfile.txt", val2.Blob.Name) assert.Equal(suite.T(), data.Blob.Size, val2.Blob.Size) @@ -292,7 +292,7 @@ func (suite *OnChainOffChainTestSuite) TestE2EWebhookExchange() { assert.NotNil(suite.T(), sub.ID) data := fftypes.DataRefOrValue{ - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } var resp *resty.Response @@ -345,7 +345,7 @@ func (suite *OnChainOffChainTestSuite) TestE2EWebhookRequestReplyNoTx() { assert.NotNil(suite.T(), sub.ID) data := fftypes.DataRefOrValue{ - Value: fftypes.Byteable(`{}`), + Value: fftypes.JSONAnyPtr(`{}`), } reply := RequestReply(suite.T(), suite.testState.client1, &data, []string{ diff --git a/test/e2e/tokens_test.go b/test/e2e/tokens_test.go index f50566ff95..7e5727547f 100644 --- a/test/e2e/tokens_test.go +++ b/test/e2e/tokens_test.go @@ -107,7 +107,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { Message: &fftypes.MessageInOut{ InlineData: fftypes.InlineData{ { - Value: fftypes.Byteable(`"payment for data"`), + Value: fftypes.JSONAnyPtr(`"payment for data"`), }, }, }, @@ -238,7 +238,7 @@ func (suite *TokensTestSuite) TestE2ENonFungibleTokensSync() { Message: &fftypes.MessageInOut{ InlineData: fftypes.InlineData{ { - Value: fftypes.Byteable(`"ownership change"`), + Value: fftypes.JSONAnyPtr(`"ownership change"`), }, }, }, From 4283dc7f0408c0de6d8a61b01a54741b96dccca5 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 7 Jan 2022 16:59:25 -0500 Subject: [PATCH 09/21] Err tweaks Signed-off-by: Peter Broadhurst --- internal/i18n/en_translations.go | 4 ++-- pkg/database/filter.go | 2 +- pkg/database/query_fields.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/i18n/en_translations.go b/internal/i18n/en_translations.go index 84ee6b7f77..021347be5a 100644 --- a/internal/i18n/en_translations.go +++ b/internal/i18n/en_translations.go @@ -222,6 +222,6 @@ var ( MsgQueryOpUnsupportedMod = ffm("FF10302", "Operation '%s' on '%s' does not support modifiers", 400) MsgDXBadSize = ffm("FF10303", "Unexpected size returned from data exchange upload. Size=%d Expected=%d") MsgBlobMismatchSealingData = ffm("FF10304", "Blob mismatch when sealing data") - MsgFieldTypeNoStringMatching = ffm("FF10305", "Field '%s' of type '%s' does not support partial or case-insensitive string matching via '%s' operator", 400) - MsgFieldMatchNoNull = ffm("FF10306", "Comparison operator '%s' for field '%s' cannot accept a null value", 400) + MsgFieldTypeNoStringMatching = ffm("FF10305", "Field '%s' of type '%s' does not support partial or case-insensitive string matching", 400) + MsgFieldMatchNoNull = ffm("FF10306", "Comparison operator for field '%s' cannot accept a null value", 400) ) diff --git a/pkg/database/filter.go b/pkg/database/filter.go index cd6ab206f6..57741f4165 100644 --- a/pkg/database/filter.go +++ b/pkg/database/filter.go @@ -376,7 +376,7 @@ func (f *baseFilter) Finalize() (fi *FilterInfo, err error) { case field.filterAsString(): value = &stringField{} case filterOpIsStringMatch(f.op): - return nil, i18n.NewError(f.fb.ctx, i18n.MsgFieldTypeNoStringMatching, name, field.description(), f.op) + return nil, i18n.NewError(f.fb.ctx, i18n.MsgFieldTypeNoStringMatching, name, field.description()) default: value = field.getSerialization() } diff --git a/pkg/database/query_fields.go b/pkg/database/query_fields.go index aa60374e5f..96c768a52d 100644 --- a/pkg/database/query_fields.go +++ b/pkg/database/query_fields.go @@ -294,7 +294,7 @@ func (f *jsonField) Scan(src interface{}) (err error) { func (f *jsonField) Value() (driver.Value, error) { return f.b, nil } func (f *jsonField) String() string { return string(f.b) } func (f *JSONField) getSerialization() FieldSerialization { return &jsonField{} } -func (f *JSONField) filterAsString() bool { return false } +func (f *JSONField) filterAsString() bool { return true } func (f *JSONField) description() string { return "JSON-blob" } type FFNameArrayField struct{} From 7b254c9f6f9a2e604970cbc0348f015b7b9c705d Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 7 Jan 2022 17:20:15 -0500 Subject: [PATCH 10/21] Move to TEXT fields Signed-off-by: Peter Broadhurst --- db/migrations/postgres/000002_create_data_table.up.sql | 2 +- db/migrations/postgres/000004_create_batches_table.up.sql | 2 +- .../postgres/000005_create_transactions_table.up.sql | 2 +- db/migrations/postgres/000006_create_datatypes_table.up.sql | 2 +- db/migrations/postgres/000008_create_operations_table.up.sql | 2 +- .../postgres/000010_create_subscriptions_table.up.sql | 2 +- db/migrations/postgres/000013_create_orgs_table.up.sql | 2 +- db/migrations/postgres/000014_create_nodes_table.up.sql | 2 +- db/migrations/postgres/000015_create_config_table.up.sql | 2 +- db/migrations/postgres/000027_add_operations_input.up.sql | 2 +- db/migrations/sqlite/000002_create_data_table.up.sql | 2 +- db/migrations/sqlite/000004_create_batches_table.up.sql | 2 +- db/migrations/sqlite/000005_create_transactions_table.up.sql | 2 +- db/migrations/sqlite/000006_create_datatypes_table.up.sql | 2 +- db/migrations/sqlite/000008_create_operations_table.up.sql | 2 +- db/migrations/sqlite/000010_create_subscriptions_table.up.sql | 2 +- db/migrations/sqlite/000013_create_orgs_table.up.sql | 2 +- db/migrations/sqlite/000014_create_nodes_table.up.sql | 2 +- db/migrations/sqlite/000015_create_config_table.up.sql | 2 +- db/migrations/sqlite/000027_add_operations_input.up.sql | 2 +- pkg/database/filter_test.go | 4 ++-- 21 files changed, 22 insertions(+), 22 deletions(-) diff --git a/db/migrations/postgres/000002_create_data_table.up.sql b/db/migrations/postgres/000002_create_data_table.up.sql index 0cbe156780..d761c23a24 100644 --- a/db/migrations/postgres/000002_create_data_table.up.sql +++ b/db/migrations/postgres/000002_create_data_table.up.sql @@ -8,7 +8,7 @@ CREATE TABLE data ( datatype_version VARCHAR(64) NOT NULL, hash CHAR(64) NOT NULL, created BIGINT NOT NULL, - value BYTEA NOT NULL, + value TEXT NOT NULL, blobstore BOOLEAN NOT NULL ); CREATE UNIQUE INDEX data_id ON data(id); diff --git a/db/migrations/postgres/000004_create_batches_table.up.sql b/db/migrations/postgres/000004_create_batches_table.up.sql index 2f8a06c561..84973cf5f2 100644 --- a/db/migrations/postgres/000004_create_batches_table.up.sql +++ b/db/migrations/postgres/000004_create_batches_table.up.sql @@ -8,7 +8,7 @@ CREATE TABLE batches ( group_hash CHAR(64), hash CHAR(64), created BIGINT NOT NULL, - payload BYTEA NOT NULL, + payload TEXT NOT NULL, payload_ref CHAR(64), confirmed BIGINT, tx_type VARCHAR(64) NOT NULL, diff --git a/db/migrations/postgres/000005_create_transactions_table.up.sql b/db/migrations/postgres/000005_create_transactions_table.up.sql index 79b4b6d144..3d0a40b0d0 100644 --- a/db/migrations/postgres/000005_create_transactions_table.up.sql +++ b/db/migrations/postgres/000005_create_transactions_table.up.sql @@ -10,7 +10,7 @@ CREATE TABLE transactions ( created BIGINT NOT NULL, protocol_id VARCHAR(256), status VARCHAR(64) NOT NULL, - info BYTEA + info TEXT ); CREATE UNIQUE INDEX transactions_id ON data(id); diff --git a/db/migrations/postgres/000006_create_datatypes_table.up.sql b/db/migrations/postgres/000006_create_datatypes_table.up.sql index 0eeadd7458..4d2ca8cba4 100644 --- a/db/migrations/postgres/000006_create_datatypes_table.up.sql +++ b/db/migrations/postgres/000006_create_datatypes_table.up.sql @@ -9,7 +9,7 @@ CREATE TABLE datatypes ( version VARCHAR(64) NOT NULL, hash CHAR(64) NOT NULL, created BIGINT NOT NULL, - value BYTEA + value TEXT ); CREATE UNIQUE INDEX datatypes_id ON data(id); diff --git a/db/migrations/postgres/000008_create_operations_table.up.sql b/db/migrations/postgres/000008_create_operations_table.up.sql index 6f125b8284..14011cd450 100644 --- a/db/migrations/postgres/000008_create_operations_table.up.sql +++ b/db/migrations/postgres/000008_create_operations_table.up.sql @@ -12,7 +12,7 @@ CREATE TABLE operations ( created BIGINT NOT NULL, updated BIGINT, error VARCHAR NOT NULL, - info BYTEA + info TEXT ); CREATE UNIQUE INDEX operations_id ON operations(id); diff --git a/db/migrations/postgres/000010_create_subscriptions_table.up.sql b/db/migrations/postgres/000010_create_subscriptions_table.up.sql index cdcc026a46..96af8c8509 100644 --- a/db/migrations/postgres/000010_create_subscriptions_table.up.sql +++ b/db/migrations/postgres/000010_create_subscriptions_table.up.sql @@ -9,7 +9,7 @@ CREATE TABLE subscriptions ( filter_topics VARCHAR(256) NOT NULL, filter_tag VARCHAR(256) NOT NULL, filter_group VARCHAR(256) NOT NULL, - options BYTEA NOT NULL, + options TEXT NOT NULL, created BIGINT NOT NULL ); diff --git a/db/migrations/postgres/000013_create_orgs_table.up.sql b/db/migrations/postgres/000013_create_orgs_table.up.sql index 98144d2dcf..edf34ec6b4 100644 --- a/db/migrations/postgres/000013_create_orgs_table.up.sql +++ b/db/migrations/postgres/000013_create_orgs_table.up.sql @@ -7,7 +7,7 @@ CREATE TABLE orgs ( parent VARCHAR(1024), identity VARCHAR(1024) NOT NULL, description VARCHAR(4096) NOT NULL, - profile BYTEA, + profile TEXT, created BIGINT NOT NULL ); diff --git a/db/migrations/postgres/000014_create_nodes_table.up.sql b/db/migrations/postgres/000014_create_nodes_table.up.sql index 89ac79ccda..4e382ef1f2 100644 --- a/db/migrations/postgres/000014_create_nodes_table.up.sql +++ b/db/migrations/postgres/000014_create_nodes_table.up.sql @@ -7,7 +7,7 @@ CREATE TABLE nodes ( name VARCHAR(64) NOT NULL, description VARCHAR(4096) NOT NULL, dx_peer VARCHAR(256), - dx_endpoint BYTEA, + dx_endpoint TEXT, created BIGINT NOT NULL ); diff --git a/db/migrations/postgres/000015_create_config_table.up.sql b/db/migrations/postgres/000015_create_config_table.up.sql index 7b25511af7..663928c435 100644 --- a/db/migrations/postgres/000015_create_config_table.up.sql +++ b/db/migrations/postgres/000015_create_config_table.up.sql @@ -2,7 +2,7 @@ BEGIN; CREATE TABLE config ( seq SERIAL PRIMARY KEY, config_key VARCHAR(512) NOT NULL, - config_value BYTEA NOT NULL + config_value TEXT NOT NULL ); CREATE UNIQUE INDEX config_sequence ON config(seq); CREATE UNIQUE INDEX config_config_key ON config(config_key); diff --git a/db/migrations/postgres/000027_add_operations_input.up.sql b/db/migrations/postgres/000027_add_operations_input.up.sql index 1b1d3bf160..3ae8dd62d3 100644 --- a/db/migrations/postgres/000027_add_operations_input.up.sql +++ b/db/migrations/postgres/000027_add_operations_input.up.sql @@ -1,4 +1,4 @@ BEGIN; ALTER TABLE operations RENAME COLUMN info TO output; -ALTER TABLE operations ADD COLUMN input BYTEA; +ALTER TABLE operations ADD COLUMN input TEXT; COMMIT; diff --git a/db/migrations/sqlite/000002_create_data_table.up.sql b/db/migrations/sqlite/000002_create_data_table.up.sql index bf91dc743c..f420b15d4c 100644 --- a/db/migrations/sqlite/000002_create_data_table.up.sql +++ b/db/migrations/sqlite/000002_create_data_table.up.sql @@ -7,7 +7,7 @@ CREATE TABLE data ( datatype_version VARCHAR(64) NOT NULL, hash CHAR(64) NOT NULL, created BIGINT NOT NULL, - value BYTEA NOT NULL, + value TEXT NOT NULL, blob_hash CHAR(64), blob_public VARCHAR(1024) ); diff --git a/db/migrations/sqlite/000004_create_batches_table.up.sql b/db/migrations/sqlite/000004_create_batches_table.up.sql index 6bce877bed..c5aab6f9e0 100644 --- a/db/migrations/sqlite/000004_create_batches_table.up.sql +++ b/db/migrations/sqlite/000004_create_batches_table.up.sql @@ -7,7 +7,7 @@ CREATE TABLE batches ( group_hash CHAR(64), hash CHAR(64), created BIGINT NOT NULL, - payload BYTEA NOT NULL, + payload TEXT NOT NULL, payload_ref VARCHAR(256), confirmed BIGINT, tx_type VARCHAR(64) NOT NULL, diff --git a/db/migrations/sqlite/000005_create_transactions_table.up.sql b/db/migrations/sqlite/000005_create_transactions_table.up.sql index 2867f7f581..96ce69af3f 100644 --- a/db/migrations/sqlite/000005_create_transactions_table.up.sql +++ b/db/migrations/sqlite/000005_create_transactions_table.up.sql @@ -9,7 +9,7 @@ CREATE TABLE transactions ( created BIGINT NOT NULL, protocol_id VARCHAR(256), status VARCHAR(64) NOT NULL, - info BYTEA + info TEXT ); CREATE UNIQUE INDEX transactions_id ON data(id); diff --git a/db/migrations/sqlite/000006_create_datatypes_table.up.sql b/db/migrations/sqlite/000006_create_datatypes_table.up.sql index 3127e3340b..26570f5d5d 100644 --- a/db/migrations/sqlite/000006_create_datatypes_table.up.sql +++ b/db/migrations/sqlite/000006_create_datatypes_table.up.sql @@ -8,7 +8,7 @@ CREATE TABLE datatypes ( version VARCHAR(64) NOT NULL, hash CHAR(64) NOT NULL, created BIGINT NOT NULL, - value BYTEA + value TEXT ); CREATE UNIQUE INDEX datatypes_id ON data(id); diff --git a/db/migrations/sqlite/000008_create_operations_table.up.sql b/db/migrations/sqlite/000008_create_operations_table.up.sql index e7a60aac0b..ce67be59ce 100644 --- a/db/migrations/sqlite/000008_create_operations_table.up.sql +++ b/db/migrations/sqlite/000008_create_operations_table.up.sql @@ -11,7 +11,7 @@ CREATE TABLE operations ( created BIGINT NOT NULL, updated BIGINT, error VARCHAR NOT NULL, - info BYTEA + info TEXT ); CREATE UNIQUE INDEX operations_id ON operations(id); diff --git a/db/migrations/sqlite/000010_create_subscriptions_table.up.sql b/db/migrations/sqlite/000010_create_subscriptions_table.up.sql index 3d41e99f62..e017bbd4d4 100644 --- a/db/migrations/sqlite/000010_create_subscriptions_table.up.sql +++ b/db/migrations/sqlite/000010_create_subscriptions_table.up.sql @@ -8,7 +8,7 @@ CREATE TABLE subscriptions ( filter_topics VARCHAR(256) NOT NULL, filter_tag VARCHAR(256) NOT NULL, filter_group VARCHAR(256) NOT NULL, - options BYTEA NOT NULL, + options TEXT NOT NULL, created BIGINT NOT NULL ); diff --git a/db/migrations/sqlite/000013_create_orgs_table.up.sql b/db/migrations/sqlite/000013_create_orgs_table.up.sql index 01424d4b1b..1b9711db33 100644 --- a/db/migrations/sqlite/000013_create_orgs_table.up.sql +++ b/db/migrations/sqlite/000013_create_orgs_table.up.sql @@ -6,7 +6,7 @@ CREATE TABLE orgs ( parent VARCHAR(1024), identity VARCHAR(1024) NOT NULL, description VARCHAR(4096) NOT NULL, - profile BYTEA, + profile TEXT, created BIGINT NOT NULL ); diff --git a/db/migrations/sqlite/000014_create_nodes_table.up.sql b/db/migrations/sqlite/000014_create_nodes_table.up.sql index 7680618114..a802abac1e 100644 --- a/db/migrations/sqlite/000014_create_nodes_table.up.sql +++ b/db/migrations/sqlite/000014_create_nodes_table.up.sql @@ -6,7 +6,7 @@ CREATE TABLE nodes ( name VARCHAR(64) NOT NULL, description VARCHAR(4096) NOT NULL, dx_peer VARCHAR(256), - dx_endpoint BYTEA, + dx_endpoint TEXT, created BIGINT NOT NULL ); diff --git a/db/migrations/sqlite/000015_create_config_table.up.sql b/db/migrations/sqlite/000015_create_config_table.up.sql index fed78211eb..1037cba812 100644 --- a/db/migrations/sqlite/000015_create_config_table.up.sql +++ b/db/migrations/sqlite/000015_create_config_table.up.sql @@ -1,7 +1,7 @@ CREATE TABLE config ( seq INTEGER PRIMARY KEY AUTOINCREMENT, config_key VARCHAR(512) NOT NULL, - config_value BYTEA NOT NULL + config_value TEXT NOT NULL ); CREATE UNIQUE INDEX config_sequence ON config(seq); CREATE UNIQUE INDEX config_config_key ON config(config_key); diff --git a/db/migrations/sqlite/000027_add_operations_input.up.sql b/db/migrations/sqlite/000027_add_operations_input.up.sql index d372ba0312..54c76c5dc5 100644 --- a/db/migrations/sqlite/000027_add_operations_input.up.sql +++ b/db/migrations/sqlite/000027_add_operations_input.up.sql @@ -1,2 +1,2 @@ ALTER TABLE operations RENAME COLUMN info TO output; -ALTER TABLE operations ADD COLUMN input BYTEA; +ALTER TABLE operations ADD COLUMN input TEXT; diff --git a/pkg/database/filter_test.go b/pkg/database/filter_test.go index d8bdf27097..fb9d3f9a12 100644 --- a/pkg/database/filter_test.go +++ b/pkg/database/filter_test.go @@ -298,9 +298,9 @@ func TestQueryFactoryBadNestedValue(t *testing.T) { } func TestQueryFactoryStringMatchNonString(t *testing.T) { - fb := DataQueryFactory.NewFilter(context.Background()) + fb := MessageQueryFactory.NewFilter(context.Background()) _, err := fb.And( - fb.Contains("value", "stuff"), + fb.Contains("sequence", "stuff"), ).Finalize() assert.Regexp(t, "FF10305", err) } From f4663c0aeaf19546b5103cd3a12788dc6eacbff4 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Mon, 10 Jan 2022 22:28:02 -0500 Subject: [PATCH 11/21] Byte limit on batch size Signed-off-by: Peter Broadhurst --- internal/batch/batch_manager.go | 3 +- internal/batch/batch_processor.go | 77 +++++++++++++------ internal/batch/batch_processor_test.go | 62 ++++++++++++++- internal/broadcast/manager.go | 1 + internal/config/config.go | 8 +- internal/privatemessaging/privatemessaging.go | 3 +- pkg/fftypes/data.go | 8 ++ pkg/fftypes/data_test.go | 9 +++ pkg/fftypes/message.go | 9 ++- pkg/fftypes/message_test.go | 5 ++ 10 files changed, 153 insertions(+), 32 deletions(-) diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index 79f1c8db4c..1b3b44df3c 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -92,6 +92,7 @@ type DispatchHandler func(context.Context, *fftypes.Batch, []*fftypes.Bytes32) e type Options struct { BatchMaxSize uint + BatchMaxBytes int64 BatchTimeout time.Duration DisposeTimeout time.Duration } diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index b9dae1c72b..43a943e818 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -67,6 +67,8 @@ type batchProcessor struct { conf *batchProcessorConf } +const batchSizeEstimateBase = int64(512) + func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, conf *batchProcessorConf, retry *retry.Retry) *batchProcessor { pCtx := log.WithLogField(ctx, "role", fmt.Sprintf("batchproc-%s:%s:%s", conf.namespace, conf.identity.Author, conf.identity.Key)) pCtx, cancelCtx := context.WithCancel(pCtx) @@ -88,6 +90,14 @@ func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di da return bp } +func (bw *batchWork) estimateSize() int64 { + sizeEstimate := bw.msg.EstimateSize() + for _, d := range bw.data { + sizeEstimate += d.EstimateSize() + } + return sizeEstimate +} + // The assemblyLoop accepts work into the pipe as quickly as possible. // It dispatches work asynchronously to the persistenceLoop, which is responsible for // calling back each piece of work once persisted into a batch @@ -98,37 +108,55 @@ func (bp *batchProcessor) assemblyLoop() { defer close(bp.sealBatch) // close persitenceLoop when we exit l := log.L(bp.ctx) var batchSize uint + var batchPayloadEstimate = batchSizeEstimateBase var lastBatchSealed = time.Now() var quiescing bool + var overflowedWork *batchWork for { - // We timeout waiting at the point we think we're ready for disposal, - // unless we've started a batch in which case we wait for what's left - // of the batch timeout - timeToWait := bp.conf.DisposeTimeout - if quiescing { - timeToWait = 100 * time.Millisecond - } else if batchSize > 0 { - timeToWait = bp.conf.BatchTimeout - time.Since(lastBatchSealed) - } - timeout := time.NewTimer(timeToWait) - - // Wait for work, the timeout, or close var timedOut, closed bool - select { - case <-timeout.C: - timedOut = true - case work, ok := <-bp.newWork: - if ok && !work.abandoned { - batchSize++ - bp.persistWork <- work - } else { - closed = true + if overflowedWork != nil { + // We overflowed the size cap when we took this message out the newWork + // queue last time round the lop + bp.persistWork <- overflowedWork + batchSize++ + batchPayloadEstimate += overflowedWork.estimateSize() + overflowedWork = nil + } else { + // We timeout waiting at the point we think we're ready for disposal, + // unless we've started a batch in which case we wait for what's left + // of the batch timeout + timeToWait := bp.conf.DisposeTimeout + if quiescing { + timeToWait = 100 * time.Millisecond + } else if batchSize > 0 { + timeToWait = bp.conf.BatchTimeout - time.Since(lastBatchSealed) } + timeout := time.NewTimer(timeToWait) + + // Wait for work, the timeout, or close + select { + case <-timeout.C: + timedOut = true + case work, ok := <-bp.newWork: + if ok && !work.abandoned { + workSize := work.estimateSize() + if batchSize > 0 && batchPayloadEstimate+workSize > bp.conf.BatchMaxBytes { + overflowedWork = work + } else { + batchSize++ + batchPayloadEstimate += workSize + bp.persistWork <- work + } + } else { + closed = true + } + } + } // Don't include the sealing time in the duration - batchFull := batchSize >= bp.conf.BatchMaxSize - l.Debugf("Assembly batch loop: Size=%d Full=%t", batchSize, batchFull) + batchFull := overflowedWork != nil || batchSize >= bp.conf.BatchMaxSize + l.Debugf("Assembly batch loop: Size=%d Full=%t Bytes=%.2fkb (est) Overflow=%t", batchSize, batchFull, float64(batchPayloadEstimate)/1024, overflowedWork != nil) batchDuration := time.Since(lastBatchSealed) if quiescing && batchSize == 0 { @@ -147,6 +175,7 @@ func (bp *batchProcessor) assemblyLoop() { l.Debugf("Assembly batch sealed") lastBatchSealed = time.Now() batchSize = 0 + batchPayloadEstimate = batchSizeEstimateBase } } diff --git a/internal/batch/batch_processor_test.go b/internal/batch/batch_processor_test.go index e7010a116f..1fdc95aac1 100644 --- a/internal/batch/batch_processor_test.go +++ b/internal/batch/batch_processor_test.go @@ -41,6 +41,7 @@ func newTestBatchProcessor(dispatch DispatchHandler) (*databasemocks.Plugin, *ba processorQuiescing: func() {}, Options: Options{ BatchMaxSize: 10, + BatchMaxBytes: 1024 * 1024, BatchTimeout: 10 * time.Millisecond, DisposeTimeout: 20 * time.Millisecond, }, @@ -78,7 +79,7 @@ func TestUnfilledBatch(t *testing.T) { // Generate the work the work work := make([]*batchWork, 5) - for i := 0; i < 5; i++ { + for i := 0; i < len(work); i++ { msgid := fftypes.NewUUID() work[i] = &batchWork{ msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgid}}, @@ -88,14 +89,14 @@ func TestUnfilledBatch(t *testing.T) { // Kick off a go routine to consume the confirmations go func() { - for i := 0; i < 5; i++ { + for i := 0; i < len(work); i++ { <-work[i].dispatched } wg.Done() }() // Dispatch the work - for i := 0; i < 5; i++ { + for i := 0; i < len(work); i++ { bp.newWork <- work[i] } @@ -103,7 +104,60 @@ func TestUnfilledBatch(t *testing.T) { wg.Wait() // Check we got all the messages in a single batch - assert.Equal(t, len(dispatched[0].Payload.Messages), 5) + assert.Equal(t, len(dispatched[0].Payload.Messages), len(work)) + + bp.close() + bp.waitClosed() + +} + +func TestBatchSizeOverflow(t *testing.T) { + log.SetLevel("debug") + + wg := sync.WaitGroup{} + wg.Add(3) + + dispatched := []*fftypes.Batch{} + mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + dispatched = append(dispatched, b) + wg.Done() + return nil + }) + bp.conf.BatchMaxBytes = 1 + mockRunAsGroupPassthrough(mdi) + mdi.On("UpdateMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil) + mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) + mdi.On("UpdateBatch", mock.Anything, mock.Anything).Return(nil) + + // Generate the work the work + work := make([]*batchWork, 2) + for i := 0; i < 2; i++ { + msgid := fftypes.NewUUID() + work[i] = &batchWork{ + msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgid}}, + dispatched: make(chan *batchDispatch), + } + } + + // Kick off a go routine to consume the confirmations + go func() { + for i := 0; i < len(work); i++ { + <-work[i].dispatched + } + wg.Done() + }() + + // Dispatch the work + for i := 0; i < len(work); i++ { + bp.newWork <- work[i] + } + + // Wait for the confirmations, and the dispatch + wg.Wait() + + // Check we got all messages across two batches + assert.Equal(t, len(dispatched[0].Payload.Messages), 1) + assert.Equal(t, len(dispatched[1].Payload.Messages), 1) bp.close() bp.waitClosed() diff --git a/internal/broadcast/manager.go b/internal/broadcast/manager.go index 96378380f1..6a74c50c84 100644 --- a/internal/broadcast/manager.go +++ b/internal/broadcast/manager.go @@ -81,6 +81,7 @@ func NewBroadcastManager(ctx context.Context, di database.Plugin, im identity.Ma } bo := batch.Options{ BatchMaxSize: config.GetUint(config.BroadcastBatchSize), + BatchMaxBytes: config.GetByteSize(config.BroadcastBatchSize), BatchTimeout: config.GetDuration(config.BroadcastBatchTimeout), DisposeTimeout: config.GetDuration(config.BroadcastBatchAgentTimeout), } diff --git a/internal/config/config.go b/internal/config/config.go index 089c7b83c6..b830d23a5a 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -68,14 +68,18 @@ var ( BlockchainType = rootKey("blockchain.type") // BroadcastBatchAgentTimeout how long to keep around a batching agent for a sending identity before disposal BroadcastBatchAgentTimeout = rootKey("broadcast.batch.agentTimeout") - // BroadcastBatchSize is the maximum size of a batch for broadcast messages + // BroadcastBatchSize is the maximum number of messages that can be packed into a batch BroadcastBatchSize = rootKey("broadcast.batch.size") + // BroadcastBatchPayloadLimit is the maximum payload size of a batch for broadcast messages + BroadcastBatchPayloadLimit = rootKey("broadcast.batch.payloadLimit") // BroadcastBatchTimeout is the timeout to wait for a batch to fill, before sending BroadcastBatchTimeout = rootKey("broadcast.batch.timeout") // PrivateMessagingBatchAgentTimeout how long to keep around a batching agent for a sending identity before disposal PrivateMessagingBatchAgentTimeout = rootKey("privatemessaging.batch.agentTimeout") // PrivateMessagingBatchSize is the maximum size of a batch for broadcast messages PrivateMessagingBatchSize = rootKey("privatemessaging.batch.size") + // PrivateMessagingBatchPayloadLimit is the maximum payload size of a batch for broadcast messages + PrivateMessagingBatchPayloadLimit = rootKey("privatemessaging.batch.payloadLimit") // PrivateMessagingBatchTimeout is the timeout to wait for a batch to fill, before sending PrivateMessagingBatchTimeout = rootKey("privatemessaging.batch.timeout") // PrivateMessagingOpCorrelationRetries how many times to correlate an event for an operation (such as tx submission) back to an operation. @@ -293,6 +297,7 @@ func Reset() { viper.SetDefault(string(BatchRetryMaxDelay), "30s") viper.SetDefault(string(BroadcastBatchAgentTimeout), "2m") viper.SetDefault(string(BroadcastBatchSize), 200) + viper.SetDefault(string(BroadcastBatchPayloadLimit), "800Kb") viper.SetDefault(string(BroadcastBatchTimeout), "1s") viper.SetDefault(string(CorsAllowCredentials), true) viper.SetDefault(string(CorsAllowedHeaders), []string{"*"}) @@ -337,6 +342,7 @@ func Reset() { viper.SetDefault(string(PrivateMessagingBatchAgentTimeout), "2m") viper.SetDefault(string(PrivateMessagingBatchSize), 200) viper.SetDefault(string(PrivateMessagingBatchTimeout), "1s") + viper.SetDefault(string(PrivateMessagingBatchPayloadLimit), "800Kb") viper.SetDefault(string(SubscriptionDefaultsReadAhead), 0) viper.SetDefault(string(SubscriptionMax), 500) viper.SetDefault(string(SubscriptionsRetryInitialDelay), "250ms") diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index de68b41576..f5186980d1 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -100,6 +100,7 @@ func NewPrivateMessaging(ctx context.Context, di database.Plugin, im identity.Ma bo := batch.Options{ BatchMaxSize: config.GetUint(config.PrivateMessagingBatchSize), + BatchMaxBytes: config.GetByteSize(config.PrivateMessagingBatchPayloadLimit), BatchTimeout: config.GetDuration(config.PrivateMessagingBatchTimeout), DisposeTimeout: config.GetDuration(config.PrivateMessagingBatchAgentTimeout), } diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index e65d978894..51c512e52a 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -82,6 +82,14 @@ func CheckValidatorType(ctx context.Context, validator ValidatorType) error { } } +const dataSizeEstimateBase = int64(256) + +func (d *Data) EstimateSize() int64 { + // For now we have a static estimate for the size of the serialized outer structure, + // plus the byte-length of the string + return dataSizeEstimateBase + int64(len(d.Value)) +} + func (d *Data) CalcHash(ctx context.Context) (*Bytes32, error) { if d.Value == nil { d.Value = Byteable(nullString) diff --git a/pkg/fftypes/data_test.go b/pkg/fftypes/data_test.go index 4a2f487c17..02718265f8 100644 --- a/pkg/fftypes/data_test.go +++ b/pkg/fftypes/data_test.go @@ -25,6 +25,15 @@ import ( "github.com/stretchr/testify/assert" ) +func TestEstimateDataSize(t *testing.T) { + d := Data{} + assert.Equal(t, dataSizeEstimateBase, d.EstimateSize()) + d = Data{ + Value: []byte("Test"), + } + assert.Equal(t, dataSizeEstimateBase+int64(4), d.EstimateSize()) +} + func TestDatatypeReference(t *testing.T) { var dr *DatatypeRef diff --git a/pkg/fftypes/message.go b/pkg/fftypes/message.go index bac967494b..90f6d263bf 100644 --- a/pkg/fftypes/message.go +++ b/pkg/fftypes/message.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -150,6 +150,13 @@ func (m *MessageInOut) SetInlineData(data []*Data) { } } +const messageSizeEstimateBase = int64(1024) + +func (m *Message) EstimateSize() int64 { + // For now we have a static estimate for the size of the serialized header structure. + return messageSizeEstimateBase +} + func (m *Message) Seal(ctx context.Context) (err error) { if len(m.Header.Topics) == 0 { m.Header.Topics = []string{DefaultTopic} diff --git a/pkg/fftypes/message_test.go b/pkg/fftypes/message_test.go index f13132ceb2..b32a49a345 100644 --- a/pkg/fftypes/message_test.go +++ b/pkg/fftypes/message_test.go @@ -25,6 +25,11 @@ import ( "github.com/stretchr/testify/assert" ) +func TestEstimateMessageSize(t *testing.T) { + msg := Message{} + assert.Equal(t, messageSizeEstimateBase, msg.EstimateSize()) +} + func TestSealBareMessage(t *testing.T) { msg := Message{} err := msg.Seal(context.Background()) From 76a5e0eb1701cba372d4fa43c210e1ccde332959 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Mon, 10 Jan 2022 23:23:34 -0500 Subject: [PATCH 12/21] Add stored size for JSON values as well as blobs and reject early Signed-off-by: Peter Broadhurst --- .../000049_add_blobs_size_and_name.down.sql | 1 + .../000049_add_blobs_size_and_name.up.sql | 3 +- .../000049_add_blobs_size_and_name.down.sql | 1 + .../000049_add_blobs_size_and_name.up.sql | 3 +- internal/batch/batch_processor.go | 2 +- internal/broadcast/manager.go | 44 ++++++++++--------- internal/broadcast/message.go | 4 ++ internal/broadcast/message_test.go | 38 ++++++++++++++++ internal/data/data_manager.go | 10 +++-- internal/database/sqlcommon/data_sql.go | 6 +++ internal/database/sqlcommon/data_sql_test.go | 2 + internal/i18n/en_translations.go | 2 + internal/privatemessaging/message.go | 4 ++ internal/privatemessaging/message_test.go | 43 ++++++++++++++++++ internal/privatemessaging/privatemessaging.go | 32 +++++++------- pkg/fftypes/data.go | 10 +++-- pkg/fftypes/data_test.go | 3 +- pkg/fftypes/jsonany.go | 7 +++ pkg/fftypes/message.go | 12 ++++- pkg/fftypes/message_test.go | 8 +++- 20 files changed, 185 insertions(+), 50 deletions(-) diff --git a/db/migrations/postgres/000049_add_blobs_size_and_name.down.sql b/db/migrations/postgres/000049_add_blobs_size_and_name.down.sql index 3c1c2c06ac..296e1ad704 100644 --- a/db/migrations/postgres/000049_add_blobs_size_and_name.down.sql +++ b/db/migrations/postgres/000049_add_blobs_size_and_name.down.sql @@ -5,4 +5,5 @@ DROP INDEX data_blob_size; ALTER TABLE blobs DROP COLUMN size; ALTER TABLE data DROP COLUMN blob_name; ALTER TABLE data DROP COLUMN blob_size; +ALTER TABLE data DROP COLUMN value_size; COMMIT; diff --git a/db/migrations/postgres/000049_add_blobs_size_and_name.up.sql b/db/migrations/postgres/000049_add_blobs_size_and_name.up.sql index 57c17708c4..bcd8305b93 100644 --- a/db/migrations/postgres/000049_add_blobs_size_and_name.up.sql +++ b/db/migrations/postgres/000049_add_blobs_size_and_name.up.sql @@ -3,9 +3,10 @@ ALTER TABLE blobs ADD COLUMN size BIGINT; ALTER TABLE data ADD COLUMN blob_name VARCHAR(1024); ALTER TABLE data ADD COLUMN blob_size BIGINT; +ALTER TABLE data ADD COLUMN value_size BIGINT; UPDATE blobs SET size = 0; -UPDATE data SET blob_size = 0, blob_name = ''; +UPDATE data SET blob_size = 0, value_size = 0, blob_name = ''; CREATE INDEX data_blob_name ON data(blob_name); CREATE INDEX data_blob_size ON data(blob_size); diff --git a/db/migrations/sqlite/000049_add_blobs_size_and_name.down.sql b/db/migrations/sqlite/000049_add_blobs_size_and_name.down.sql index b81e5d14e4..6c0af05307 100644 --- a/db/migrations/sqlite/000049_add_blobs_size_and_name.down.sql +++ b/db/migrations/sqlite/000049_add_blobs_size_and_name.down.sql @@ -4,3 +4,4 @@ DROP INDEX data_blob_size; ALTER TABLE blobs DROP COLUMN "size"; ALTER TABLE data DROP COLUMN "blob_name"; ALTER TABLE data DROP COLUMN "blob_size"; +ALTER TABLE data DROP COLUMN "value_size"; diff --git a/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql b/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql index db5f02e2e1..f5677012ec 100644 --- a/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql +++ b/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql @@ -2,9 +2,10 @@ ALTER TABLE blobs ADD size BIGINT; ALTER TABLE data ADD blob_name VARCHAR(1024); ALTER TABLE data ADD blob_size BIGINT; +ALTER TABLE data ADD COLUMN value_size BIGINT; UPDATE blobs SET size = 0; -UPDATE data SET blob_size = 0, blob_name = ''; +UPDATE data SET blob_size = 0, value_size = 0, blob_name = ''; CREATE INDEX data_blob_name ON data(blob_name); CREATE INDEX data_blob_size ON data(blob_size); diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 43a943e818..54d6f738dd 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -91,7 +91,7 @@ func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di da } func (bw *batchWork) estimateSize() int64 { - sizeEstimate := bw.msg.EstimateSize() + sizeEstimate := bw.msg.EstimateSize(false /* we calculate data size separately, as we have the full data objects */) for _, d := range bw.data { sizeEstimate += d.EstimateSize() } diff --git a/internal/broadcast/manager.go b/internal/broadcast/manager.go index 6a74c50c84..292d331159 100644 --- a/internal/broadcast/manager.go +++ b/internal/broadcast/manager.go @@ -51,16 +51,17 @@ type Manager interface { } type broadcastManager struct { - ctx context.Context - database database.Plugin - identity identity.Manager - data data.Manager - blockchain blockchain.Plugin - exchange dataexchange.Plugin - publicstorage publicstorage.Plugin - batch batch.Manager - syncasync syncasync.Bridge - batchpin batchpin.Submitter + ctx context.Context + database database.Plugin + identity identity.Manager + data data.Manager + blockchain blockchain.Plugin + exchange dataexchange.Plugin + publicstorage publicstorage.Plugin + batch batch.Manager + syncasync syncasync.Bridge + batchpin batchpin.Submitter + maxBatchPayloadLength int64 } func NewBroadcastManager(ctx context.Context, di database.Plugin, im identity.Manager, dm data.Manager, bi blockchain.Plugin, dx dataexchange.Plugin, pi publicstorage.Plugin, ba batch.Manager, sa syncasync.Bridge, bp batchpin.Submitter) (Manager, error) { @@ -68,20 +69,21 @@ func NewBroadcastManager(ctx context.Context, di database.Plugin, im identity.Ma return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } bm := &broadcastManager{ - ctx: ctx, - database: di, - identity: im, - data: dm, - blockchain: bi, - exchange: dx, - publicstorage: pi, - batch: ba, - syncasync: sa, - batchpin: bp, + ctx: ctx, + database: di, + identity: im, + data: dm, + blockchain: bi, + exchange: dx, + publicstorage: pi, + batch: ba, + syncasync: sa, + batchpin: bp, + maxBatchPayloadLength: config.GetByteSize(config.BroadcastBatchPayloadLimit), } bo := batch.Options{ BatchMaxSize: config.GetUint(config.BroadcastBatchSize), - BatchMaxBytes: config.GetByteSize(config.BroadcastBatchSize), + BatchMaxBytes: bm.maxBatchPayloadLength, BatchTimeout: config.GetDuration(config.BroadcastBatchTimeout), DisposeTimeout: config.GetDuration(config.BroadcastBatchAgentTimeout), } diff --git a/internal/broadcast/message.go b/internal/broadcast/message.go index 0f38f8b5a3..23a14a46ee 100644 --- a/internal/broadcast/message.go +++ b/internal/broadcast/message.go @@ -100,6 +100,10 @@ func (s *broadcastSender) resolveAndSend(ctx context.Context, method sendMethod) if dataToPublish, err = s.resolve(ctx); err != nil { return err } + msgSizeEstimate := s.msg.EstimateSize(true) + if msgSizeEstimate > s.mgr.maxBatchPayloadLength { + return i18n.NewError(ctx, i18n.MsgTooLargeBroadcast, float64(msgSizeEstimate)/1024, float64(s.mgr.maxBatchPayloadLength)/1024) + } s.resolved = true } diff --git a/internal/broadcast/message_test.go b/internal/broadcast/message_test.go index 92a0a04030..1bcc2a8d3a 100644 --- a/internal/broadcast/message_test.go +++ b/internal/broadcast/message_test.go @@ -306,6 +306,44 @@ func TestBroadcastMessageWithBlobsOk(t *testing.T) { mdm.AssertExpectations(t) } +func TestBroadcastMessageTooLarge(t *testing.T) { + bm, cancel := newTestBroadcast(t) + bm.maxBatchPayloadLength = 1000000 + defer cancel() + mdi := bm.database.(*databasemocks.Plugin) + mdm := bm.data.(*datamocks.Manager) + mim := bm.identity.(*identitymanagermocks.Manager) + + ctx := context.Background() + rag := mdi.On("RunAsGroup", ctx, mock.Anything) + rag.RunFn = func(a mock.Arguments) { + var fn = a[1].(func(context.Context) error) + rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} + } + mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), ValueSize: 1000001}, + }, []*fftypes.DataAndBlob{}, nil) + mim.On("ResolveInputIdentity", ctx, mock.Anything).Return(nil) + + _, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ + Message: fftypes.Message{ + Header: fftypes.MessageHeader{ + Identity: fftypes.Identity{ + Author: "did:firefly:org/abcd", + Key: "0x12345", + }, + }, + }, + InlineData: fftypes.InlineData{ + {Value: fftypes.JSONAnyPtr(`{"hello": "world"}`)}, + }, + }, true) + assert.Regexp(t, "FF10307", err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + func TestBroadcastMessageBadInput(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index 4036b33852..360e8918c6 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -276,8 +276,9 @@ func (dm *dataManager) validateAndStoreInlined(ctx context.Context, ns string, v // Return a ref to the newly saved data return data, blob, &fftypes.DataRef{ - ID: data.ID, - Hash: data.Hash, + ID: data.ID, + Hash: data.Hash, + ValueSize: data.ValueSize, }, nil } @@ -319,8 +320,9 @@ func (dm *dataManager) resolveInlineData(ctx context.Context, ns string, inData return nil, nil, i18n.NewError(ctx, i18n.MsgDataReferenceUnresolvable, i) } refs[i] = &fftypes.DataRef{ - ID: data.ID, - Hash: data.Hash, + ID: data.ID, + Hash: data.Hash, + ValueSize: data.ValueSize, } if blob, err = dm.resolveBlob(ctx, data.Blob); err != nil { return nil, nil, err diff --git a/internal/database/sqlcommon/data_sql.go b/internal/database/sqlcommon/data_sql.go index 9e49647ab0..a57b5ea9e3 100644 --- a/internal/database/sqlcommon/data_sql.go +++ b/internal/database/sqlcommon/data_sql.go @@ -40,6 +40,7 @@ var ( "blob_public", "blob_name", "blob_size", + "value_size", } dataColumnsWithValue = append(append([]string{}, dataColumnsNoValue...), "value") dataFilterFieldMap = map[string]string{ @@ -54,6 +55,7 @@ var ( ) func (s *SQLCommon) attemptDataUpdate(ctx context.Context, tx *txWrapper, data *fftypes.Data, datatype *fftypes.DatatypeRef, blob *fftypes.BlobRef) (int64, error) { + data.ValueSize = data.Value.Length() return s.updateTx(ctx, tx, sq.Update("data"). Set("validator", string(data.Validator)). @@ -66,6 +68,7 @@ func (s *SQLCommon) attemptDataUpdate(ctx context.Context, tx *txWrapper, data * Set("blob_public", blob.Public). Set("blob_name", blob.Name). Set("blob_size", blob.Size). + Set("value_size", data.ValueSize). Set("value", data.Value). Where(sq.Eq{ "id": data.ID, @@ -77,6 +80,7 @@ func (s *SQLCommon) attemptDataUpdate(ctx context.Context, tx *txWrapper, data * } func (s *SQLCommon) attemptDataInsert(ctx context.Context, tx *txWrapper, data *fftypes.Data, datatype *fftypes.DatatypeRef, blob *fftypes.BlobRef) (int64, error) { + data.ValueSize = data.Value.Length() return s.insertTx(ctx, tx, sq.Insert("data"). Columns(dataColumnsWithValue...). @@ -92,6 +96,7 @@ func (s *SQLCommon) attemptDataInsert(ctx context.Context, tx *txWrapper, data * blob.Public, blob.Name, blob.Size, + data.ValueSize, data.Value, ), func() { @@ -183,6 +188,7 @@ func (s *SQLCommon) dataResult(ctx context.Context, row *sql.Rows, withValue boo &data.Blob.Public, &data.Blob.Name, &data.Blob.Size, + &data.ValueSize, } if withValue { results = append(results, &data.Value) diff --git a/internal/database/sqlcommon/data_sql_test.go b/internal/database/sqlcommon/data_sql_test.go index 71e7bbbf92..82c1e45b16 100644 --- a/internal/database/sqlcommon/data_sql_test.go +++ b/internal/database/sqlcommon/data_sql_test.go @@ -72,6 +72,7 @@ func TestDataE2EWithDB(t *testing.T) { dataJson, _ := json.Marshal(&data) dataReadJson, _ := json.Marshal(&dataRead) assert.Equal(t, string(dataJson), string(dataReadJson)) + assert.Equal(t, int64(data.Value.Length()), dataRead.ValueSize) // Update the data (this is testing what's possible at the database layer, // and does not account for the verification that happens at the higher level) @@ -117,6 +118,7 @@ func TestDataE2EWithDB(t *testing.T) { dataJson, _ = json.Marshal(&dataUpdated) dataReadJson, _ = json.Marshal(&dataRead) assert.Equal(t, string(dataJson), string(dataReadJson)) + assert.Equal(t, int64(dataUpdated.Value.Length()), dataRead.ValueSize) valRestored, ok := dataRead.Value.JSONObjectOk() assert.True(t, ok) diff --git a/internal/i18n/en_translations.go b/internal/i18n/en_translations.go index 021347be5a..c95863d72f 100644 --- a/internal/i18n/en_translations.go +++ b/internal/i18n/en_translations.go @@ -224,4 +224,6 @@ var ( MsgBlobMismatchSealingData = ffm("FF10304", "Blob mismatch when sealing data") MsgFieldTypeNoStringMatching = ffm("FF10305", "Field '%s' of type '%s' does not support partial or case-insensitive string matching", 400) MsgFieldMatchNoNull = ffm("FF10306", "Comparison operator for field '%s' cannot accept a null value", 400) + MsgTooLargeBroadcast = ffm("FF10307", "Message size %.2fkb is too large for the max broadcast batch size of %.2fkb", 400) + MsgTooLargePrivate = ffm("FF10308", "Message size %.2fkb is too large for the max private message size of %.2fkb", 400) ) diff --git a/internal/privatemessaging/message.go b/internal/privatemessaging/message.go index 1644a108e2..1603fe6503 100644 --- a/internal/privatemessaging/message.go +++ b/internal/privatemessaging/message.go @@ -118,6 +118,10 @@ func (s *messageSender) resolveAndSend(ctx context.Context, method sendMethod) e if err := s.resolve(ctx); err != nil { return err } + msgSizeEstimate := s.msg.EstimateSize(true) + if msgSizeEstimate > s.mgr.maxBatchPayloadLength { + return i18n.NewError(ctx, i18n.MsgTooLargePrivate, float64(msgSizeEstimate)/1024, float64(s.mgr.maxBatchPayloadLength)/1024) + } s.resolved = true } diff --git a/internal/privatemessaging/message_test.go b/internal/privatemessaging/message_test.go index 6c76b38de2..5815fdde4e 100644 --- a/internal/privatemessaging/message_test.go +++ b/internal/privatemessaging/message_test.go @@ -312,6 +312,49 @@ func TestResolveAndSendBadInlineData(t *testing.T) { } +func TestSendUnpinnedMessageTooLarge(t *testing.T) { + + pm, cancel := newTestPrivateMessaging(t) + pm.maxBatchPayloadLength = 100000 + defer cancel() + + mim := pm.identity.(*identitymanagermocks.Manager) + mim.On("ResolveInputIdentity", pm.ctx, mock.Anything).Run(func(args mock.Arguments) { + identity := args[1].(*fftypes.Identity) + identity.Author = "localorg" + identity.Key = "localkey" + }).Return(nil) + + dataID := fftypes.NewUUID() + groupID := fftypes.NewRandB32() + mdm := pm.data.(*datamocks.Manager) + mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + {ID: dataID, Hash: fftypes.NewRandB32(), ValueSize: 100001}, + }, nil) + + _, err := pm.SendMessage(pm.ctx, "ns1", &fftypes.MessageInOut{ + Message: fftypes.Message{ + Header: fftypes.MessageHeader{ + TxType: fftypes.TransactionTypeNone, + Group: groupID, + }, + }, + InlineData: fftypes.InlineData{ + {Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, + }, + Group: &fftypes.InputGroup{ + Members: []fftypes.MemberInput{ + {Identity: "org1"}, + }, + }, + }, false) + assert.Regexp(t, "FF10308", err) + + mdm.AssertExpectations(t) + mim.AssertExpectations(t) + +} + func TestSealFail(t *testing.T) { pm, cancel := newTestPrivateMessaging(t) diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index 7fdc151aa2..e2fe97a62e 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -49,19 +49,20 @@ type Manager interface { type privateMessaging struct { groupManager - ctx context.Context - database database.Plugin - identity identity.Manager - exchange dataexchange.Plugin - blockchain blockchain.Plugin - batch batch.Manager - data data.Manager - syncasync syncasync.Bridge - batchpin batchpin.Submitter - retry retry.Retry - localNodeName string - localNodeID *fftypes.UUID // lookup and cached on first use, as might not be registered at startup - opCorrelationRetries int + ctx context.Context + database database.Plugin + identity identity.Manager + exchange dataexchange.Plugin + blockchain blockchain.Plugin + batch batch.Manager + data data.Manager + syncasync syncasync.Bridge + batchpin batchpin.Submitter + retry retry.Retry + localNodeName string + localNodeID *fftypes.UUID // lookup and cached on first use, as might not be registered at startup + opCorrelationRetries int + maxBatchPayloadLength int64 } func NewPrivateMessaging(ctx context.Context, di database.Plugin, im identity.Manager, dx dataexchange.Plugin, bi blockchain.Plugin, ba batch.Manager, dm data.Manager, sa syncasync.Bridge, bp batchpin.Submitter) (Manager, error) { @@ -90,7 +91,8 @@ func NewPrivateMessaging(ctx context.Context, di database.Plugin, im identity.Ma MaximumDelay: config.GetDuration(config.PrivateMessagingRetryMaxDelay), Factor: config.GetFloat64(config.PrivateMessagingRetryFactor), }, - opCorrelationRetries: config.GetInt(config.PrivateMessagingOpCorrelationRetries), + opCorrelationRetries: config.GetInt(config.PrivateMessagingOpCorrelationRetries), + maxBatchPayloadLength: config.GetByteSize(config.PrivateMessagingBatchPayloadLimit), } pm.groupManager.groupCache = ccache.New( // We use a LRU cache with a size-aware max @@ -100,7 +102,7 @@ func NewPrivateMessaging(ctx context.Context, di database.Plugin, im identity.Ma bo := batch.Options{ BatchMaxSize: config.GetUint(config.PrivateMessagingBatchSize), - BatchMaxBytes: config.GetByteSize(config.PrivateMessagingBatchPayloadLimit), + BatchMaxBytes: pm.maxBatchPayloadLength, BatchTimeout: config.GetDuration(config.PrivateMessagingBatchTimeout), DisposeTimeout: config.GetDuration(config.PrivateMessagingBatchAgentTimeout), } diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index ddb48f0b35..7834a285d7 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -28,6 +28,8 @@ import ( type DataRef struct { ID *UUID `json:"id,omitempty"` Hash *Bytes32 `json:"hash,omitempty"` + + ValueSize int64 `json:"-"` // used internally for message size calculation, without full payload retrieval } type BlobRef struct { @@ -46,6 +48,8 @@ type Data struct { Datatype *DatatypeRef `json:"datatype,omitempty"` Value *JSONAny `json:"value"` Blob *BlobRef `json:"blob,omitempty"` + + ValueSize int64 `json:"-"` // Used internally for message size calcuation, without full payload retrieval } type DataAndBlob struct { @@ -85,9 +89,9 @@ func CheckValidatorType(ctx context.Context, validator ValidatorType) error { const dataSizeEstimateBase = int64(256) func (d *Data) EstimateSize() int64 { - // For now we have a static estimate for the size of the serialized outer structure, - // plus the byte-length of the string - return dataSizeEstimateBase + int64(len(d.Value)) + // For now we have a static estimate for the size of the serialized outer structure. + // As long as this has been persisted, the value size will represent the length + return dataSizeEstimateBase + d.ValueSize } func (d *Data) CalcHash(ctx context.Context) (*Bytes32, error) { diff --git a/pkg/fftypes/data_test.go b/pkg/fftypes/data_test.go index e44405b2fb..4fc7d49b41 100644 --- a/pkg/fftypes/data_test.go +++ b/pkg/fftypes/data_test.go @@ -29,7 +29,8 @@ func TestEstimateDataSize(t *testing.T) { d := Data{} assert.Equal(t, dataSizeEstimateBase, d.EstimateSize()) d = Data{ - Value: []byte("Test"), + Value: JSONAnyPtr("Test"), + ValueSize: 4, } assert.Equal(t, dataSizeEstimateBase+int64(4), d.EstimateSize()) } diff --git a/pkg/fftypes/jsonany.go b/pkg/fftypes/jsonany.go index cf13c1ad1e..55d62a34b0 100644 --- a/pkg/fftypes/jsonany.go +++ b/pkg/fftypes/jsonany.go @@ -76,6 +76,13 @@ func (h JSONAny) String() string { return string(b) } +func (h *JSONAny) Length() int64 { + if h == nil { + return 0 + } + return int64(len(*h)) +} + func (h *JSONAny) Bytes() []byte { if h == nil { return nil diff --git a/pkg/fftypes/message.go b/pkg/fftypes/message.go index 391f090a90..4396b7d4ae 100644 --- a/pkg/fftypes/message.go +++ b/pkg/fftypes/message.go @@ -152,9 +152,17 @@ func (m *MessageInOut) SetInlineData(data []*Data) { const messageSizeEstimateBase = int64(1024) -func (m *Message) EstimateSize() int64 { +func (m *Message) EstimateSize(includeDataRefs bool) int64 { // For now we have a static estimate for the size of the serialized header structure. - return messageSizeEstimateBase + // + // includeDataRefs should only be set when the data has been resolved from the database. + size := messageSizeEstimateBase + if includeDataRefs { + for _, dr := range m.Data { + size += dr.ValueSize + } + } + return size } func (m *Message) Seal(ctx context.Context) (err error) { diff --git a/pkg/fftypes/message_test.go b/pkg/fftypes/message_test.go index 5689f97d23..b9ec461f67 100644 --- a/pkg/fftypes/message_test.go +++ b/pkg/fftypes/message_test.go @@ -27,7 +27,13 @@ import ( func TestEstimateMessageSize(t *testing.T) { msg := Message{} - assert.Equal(t, messageSizeEstimateBase, msg.EstimateSize()) + assert.Equal(t, messageSizeEstimateBase, msg.EstimateSize(false)) + assert.Equal(t, messageSizeEstimateBase, msg.EstimateSize(true)) + msg.Data = DataRefs{ + {ID: NewUUID(), Hash: NewRandB32(), ValueSize: 1000}, + } + assert.Equal(t, messageSizeEstimateBase, msg.EstimateSize(false)) + assert.Equal(t, messageSizeEstimateBase+int64(1000), msg.EstimateSize(true)) } func TestSealBareMessage(t *testing.T) { From 0bec7e38d9660c7f9413e155df82c2a93f8e5114 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Tue, 11 Jan 2022 07:56:04 -0500 Subject: [PATCH 13/21] Move to separate DB migration Signed-off-by: Peter Broadhurst --- .../postgres/000049_add_blobs_size_and_name.down.sql | 1 - .../postgres/000049_add_blobs_size_and_name.up.sql | 7 +++++-- db/migrations/postgres/000050_add_value_size.down.sql | 3 +++ db/migrations/postgres/000050_add_value_size.up.sql | 7 +++++++ .../sqlite/000049_add_blobs_size_and_name.down.sql | 1 - db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql | 3 +-- db/migrations/sqlite/000050_add_value_size.down.sql | 1 + db/migrations/sqlite/000050_add_value_size.up.sql | 3 +++ 8 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 db/migrations/postgres/000050_add_value_size.down.sql create mode 100644 db/migrations/postgres/000050_add_value_size.up.sql create mode 100644 db/migrations/sqlite/000050_add_value_size.down.sql create mode 100644 db/migrations/sqlite/000050_add_value_size.up.sql diff --git a/db/migrations/postgres/000049_add_blobs_size_and_name.down.sql b/db/migrations/postgres/000049_add_blobs_size_and_name.down.sql index 296e1ad704..3c1c2c06ac 100644 --- a/db/migrations/postgres/000049_add_blobs_size_and_name.down.sql +++ b/db/migrations/postgres/000049_add_blobs_size_and_name.down.sql @@ -5,5 +5,4 @@ DROP INDEX data_blob_size; ALTER TABLE blobs DROP COLUMN size; ALTER TABLE data DROP COLUMN blob_name; ALTER TABLE data DROP COLUMN blob_size; -ALTER TABLE data DROP COLUMN value_size; COMMIT; diff --git a/db/migrations/postgres/000049_add_blobs_size_and_name.up.sql b/db/migrations/postgres/000049_add_blobs_size_and_name.up.sql index bcd8305b93..2cb63c3bad 100644 --- a/db/migrations/postgres/000049_add_blobs_size_and_name.up.sql +++ b/db/migrations/postgres/000049_add_blobs_size_and_name.up.sql @@ -3,11 +3,14 @@ ALTER TABLE blobs ADD COLUMN size BIGINT; ALTER TABLE data ADD COLUMN blob_name VARCHAR(1024); ALTER TABLE data ADD COLUMN blob_size BIGINT; -ALTER TABLE data ADD COLUMN value_size BIGINT; UPDATE blobs SET size = 0; -UPDATE data SET blob_size = 0, value_size = 0, blob_name = ''; +UPDATE data SET blob_size = 0, blob_name = ''; CREATE INDEX data_blob_name ON data(blob_name); CREATE INDEX data_blob_size ON data(blob_size); + +ALTER TABLE data ALTER COLUMN blob_name SET NOT NULL; +ALTER TABLE data ALTER COLUMN blob_size SET NOT NULL; + COMMIT; diff --git a/db/migrations/postgres/000050_add_value_size.down.sql b/db/migrations/postgres/000050_add_value_size.down.sql new file mode 100644 index 0000000000..dac690ca10 --- /dev/null +++ b/db/migrations/postgres/000050_add_value_size.down.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE data DROP COLUMN value_size; +COMMIT; diff --git a/db/migrations/postgres/000050_add_value_size.up.sql b/db/migrations/postgres/000050_add_value_size.up.sql new file mode 100644 index 0000000000..f2d5548d54 --- /dev/null +++ b/db/migrations/postgres/000050_add_value_size.up.sql @@ -0,0 +1,7 @@ +BEGIN; +ALTER TABLE data ADD COLUMN value_size BIGINT; + +UPDATE data SET value_size = 0; + +ALTER TABLE data ALTER COLUMN value_size SET NOT NULL; +COMMIT; diff --git a/db/migrations/sqlite/000049_add_blobs_size_and_name.down.sql b/db/migrations/sqlite/000049_add_blobs_size_and_name.down.sql index 6c0af05307..b81e5d14e4 100644 --- a/db/migrations/sqlite/000049_add_blobs_size_and_name.down.sql +++ b/db/migrations/sqlite/000049_add_blobs_size_and_name.down.sql @@ -4,4 +4,3 @@ DROP INDEX data_blob_size; ALTER TABLE blobs DROP COLUMN "size"; ALTER TABLE data DROP COLUMN "blob_name"; ALTER TABLE data DROP COLUMN "blob_size"; -ALTER TABLE data DROP COLUMN "value_size"; diff --git a/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql b/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql index f5677012ec..db5f02e2e1 100644 --- a/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql +++ b/db/migrations/sqlite/000049_add_blobs_size_and_name.up.sql @@ -2,10 +2,9 @@ ALTER TABLE blobs ADD size BIGINT; ALTER TABLE data ADD blob_name VARCHAR(1024); ALTER TABLE data ADD blob_size BIGINT; -ALTER TABLE data ADD COLUMN value_size BIGINT; UPDATE blobs SET size = 0; -UPDATE data SET blob_size = 0, value_size = 0, blob_name = ''; +UPDATE data SET blob_size = 0, blob_name = ''; CREATE INDEX data_blob_name ON data(blob_name); CREATE INDEX data_blob_size ON data(blob_size); diff --git a/db/migrations/sqlite/000050_add_value_size.down.sql b/db/migrations/sqlite/000050_add_value_size.down.sql new file mode 100644 index 0000000000..6d5622ed08 --- /dev/null +++ b/db/migrations/sqlite/000050_add_value_size.down.sql @@ -0,0 +1 @@ +ALTER TABLE data DROP COLUMN value_size; diff --git a/db/migrations/sqlite/000050_add_value_size.up.sql b/db/migrations/sqlite/000050_add_value_size.up.sql new file mode 100644 index 0000000000..b649f43125 --- /dev/null +++ b/db/migrations/sqlite/000050_add_value_size.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE data ADD COLUMN value_size BIGINT; + +UPDATE data SET value_size = 0; From da1c58bcd693ea3017db8c56967235d22ff41452 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Tue, 11 Jan 2022 07:57:44 -0500 Subject: [PATCH 14/21] Take into account onchain logic migrations in numbering Signed-off-by: Peter Broadhurst --- ...050_add_value_size.down.sql => 000055_add_value_size.down.sql} | 0 ...{000050_add_value_size.up.sql => 000055_add_value_size.up.sql} | 0 ...050_add_value_size.down.sql => 000055_add_value_size.down.sql} | 0 ...{000050_add_value_size.up.sql => 000055_add_value_size.up.sql} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename db/migrations/postgres/{000050_add_value_size.down.sql => 000055_add_value_size.down.sql} (100%) rename db/migrations/postgres/{000050_add_value_size.up.sql => 000055_add_value_size.up.sql} (100%) rename db/migrations/sqlite/{000050_add_value_size.down.sql => 000055_add_value_size.down.sql} (100%) rename db/migrations/sqlite/{000050_add_value_size.up.sql => 000055_add_value_size.up.sql} (100%) diff --git a/db/migrations/postgres/000050_add_value_size.down.sql b/db/migrations/postgres/000055_add_value_size.down.sql similarity index 100% rename from db/migrations/postgres/000050_add_value_size.down.sql rename to db/migrations/postgres/000055_add_value_size.down.sql diff --git a/db/migrations/postgres/000050_add_value_size.up.sql b/db/migrations/postgres/000055_add_value_size.up.sql similarity index 100% rename from db/migrations/postgres/000050_add_value_size.up.sql rename to db/migrations/postgres/000055_add_value_size.up.sql diff --git a/db/migrations/sqlite/000050_add_value_size.down.sql b/db/migrations/sqlite/000055_add_value_size.down.sql similarity index 100% rename from db/migrations/sqlite/000050_add_value_size.down.sql rename to db/migrations/sqlite/000055_add_value_size.down.sql diff --git a/db/migrations/sqlite/000050_add_value_size.up.sql b/db/migrations/sqlite/000055_add_value_size.up.sql similarity index 100% rename from db/migrations/sqlite/000050_add_value_size.up.sql rename to db/migrations/sqlite/000055_add_value_size.up.sql From d216ebf0208441c2abae24fee9d009623573801f Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Tue, 11 Jan 2022 21:39:39 -0500 Subject: [PATCH 15/21] FIR-7 implementation Signed-off-by: Peter Broadhurst --- .golangci.yml | 2 + internal/database/sqlcommon/message_sql.go | 24 +--- .../database/sqlcommon/message_sql_test.go | 34 ----- internal/dataexchange/dxhttps/config.go | 6 + internal/dataexchange/dxhttps/dxhttps.go | 32 +++-- internal/dataexchange/dxhttps/dxhttps_test.go | 20 ++- internal/events/dx_callbacks.go | 84 ++++++++---- internal/events/dx_callbacks_test.go | 129 ++++++++++++++---- internal/events/event_manager.go | 4 +- internal/i18n/en_translations.go | 1 + internal/orchestrator/bound_callbacks.go | 6 +- internal/orchestrator/bound_callbacks_test.go | 8 +- internal/privatemessaging/message.go | 8 +- internal/privatemessaging/privatemessaging.go | 23 ++-- .../privatemessaging/privatemessaging_test.go | 15 +- mocks/databasemocks/plugin.go | 32 ----- mocks/dataexchangemocks/callbacks.go | 27 ++-- mocks/eventmocks/event_manager.go | 27 ++-- pkg/database/plugin.go | 3 - pkg/dataexchange/plugin.go | 8 +- pkg/fftypes/batch.go | 21 ++- pkg/fftypes/bytetypes.go | 10 +- pkg/fftypes/bytetypes_test.go | 3 + pkg/fftypes/jsonany_test.go | 3 + pkg/fftypes/manifest.go | 30 ++++ pkg/fftypes/manifest_test.go | 36 +++++ pkg/fftypes/message.go | 5 +- pkg/fftypes/transport_wrapper.go | 35 ++++- pkg/fftypes/transport_wrapper_test.go | 92 +++++++++++++ 29 files changed, 504 insertions(+), 224 deletions(-) create mode 100644 pkg/fftypes/manifest.go create mode 100644 pkg/fftypes/manifest_test.go create mode 100644 pkg/fftypes/transport_wrapper_test.go diff --git a/.golangci.yml b/.golangci.yml index d967b40a09..6c6e82f6d4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,7 @@ run: tests: false + skip-dirs: + - "mocks" linters-settings: golint: {} gocritic: diff --git a/internal/database/sqlcommon/message_sql.go b/internal/database/sqlcommon/message_sql.go index 2c62326d67..d2df09270f 100644 --- a/internal/database/sqlcommon/message_sql.go +++ b/internal/database/sqlcommon/message_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -407,28 +407,6 @@ func (s *SQLCommon) GetMessagesForData(ctx context.Context, dataID *fftypes.UUID return s.getMessagesQuery(ctx, query, fop, fi, false) } -func (s *SQLCommon) GetMessageRefs(ctx context.Context, filter database.Filter) ([]*fftypes.MessageRef, *database.FilterResult, error) { - query, fop, fi, err := s.filterSelect(ctx, "", sq.Select("id", sequenceColumn, "hash").From("messages"), filter, msgFilterFieldMap, []interface{}{"sequence"}) - if err != nil { - return nil, nil, err - } - rows, tx, err := s.query(ctx, query) - if err != nil { - return nil, nil, err - } - defer rows.Close() - - msgRefs := []*fftypes.MessageRef{} - for rows.Next() { - var msgRef fftypes.MessageRef - if err = rows.Scan(&msgRef.ID, &msgRef.Sequence, &msgRef.Hash); err != nil { - return nil, nil, i18n.WrapError(ctx, err, i18n.MsgDBReadErr, "messages") - } - msgRefs = append(msgRefs, &msgRef) - } - return msgRefs, s.queryRes(ctx, tx, "messages", fop, fi), nil -} - func (s *SQLCommon) UpdateMessage(ctx context.Context, msgid *fftypes.UUID, update database.Update) (err error) { return s.UpdateMessages(ctx, database.MessageQueryFactory.NewFilter(ctx).Eq("id", msgid), update) } diff --git a/internal/database/sqlcommon/message_sql_test.go b/internal/database/sqlcommon/message_sql_test.go index af258ef29b..0ff43f77ce 100644 --- a/internal/database/sqlcommon/message_sql_test.go +++ b/internal/database/sqlcommon/message_sql_test.go @@ -159,15 +159,6 @@ func TestUpsertE2EWithDB(t *testing.T) { msgReadJson, _ = json.Marshal(msgs[0]) assert.Equal(t, string(msgJson), string(msgReadJson)) - // Check just getting hte refs - msgRefs, res, err := s.GetMessageRefs(ctx, filter.Count(true)) - assert.NoError(t, err) - assert.Equal(t, 1, len(msgs)) - assert.Equal(t, int64(1), *res.TotalCount) - assert.Equal(t, msgUpdated.Header.ID, msgRefs[0].ID) - assert.Equal(t, msgUpdated.Hash, msgRefs[0].Hash) - assert.Equal(t, msgUpdated.Sequence, msgRefs[0].Sequence) - // Check we can get it with a filter on only mesasges with a particular data ref msgs, _, err = s.GetMessagesForData(ctx, dataID2, filter.Count(true)) assert.Regexp(t, "FF10267", err) // The left join means it will take non-trivial extra work to support this. So not supported for now @@ -458,31 +449,6 @@ func TestGetMessagesLoadRefsFail(t *testing.T) { assert.NoError(t, mock.ExpectationsWereMet()) } -func TestGetMessageRefsBuildQueryFail(t *testing.T) { - s, _ := newMockProvider().init() - f := database.MessageQueryFactory.NewFilter(context.Background()).Eq("id", map[bool]bool{true: false}) - _, _, err := s.GetMessageRefs(context.Background(), f) - assert.Regexp(t, "FF10149.*id", err) -} - -func TestGetMessageRefsQueryFail(t *testing.T) { - s, mock := newMockProvider().init() - mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) - f := database.MessageQueryFactory.NewFilter(context.Background()).Eq("id", "") - _, _, err := s.GetMessageRefs(context.Background(), f) - assert.Regexp(t, "FF10115", err) - assert.NoError(t, mock.ExpectationsWereMet()) -} - -func TestGetMessageRefsReadMessageFail(t *testing.T) { - s, mock := newMockProvider().init() - mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow("only one")) - f := database.MessageQueryFactory.NewFilter(context.Background()).Eq("id", "") - _, _, err := s.GetMessageRefs(context.Background(), f) - assert.Regexp(t, "FF10121", err) - assert.NoError(t, mock.ExpectationsWereMet()) -} - func TestMessageUpdateBeginFail(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) diff --git a/internal/dataexchange/dxhttps/config.go b/internal/dataexchange/dxhttps/config.go index ba9917d175..a0971dbb56 100644 --- a/internal/dataexchange/dxhttps/config.go +++ b/internal/dataexchange/dxhttps/config.go @@ -21,6 +21,12 @@ import ( "github.com/hyperledger/firefly/internal/config/wsconfig" ) +const ( + // DataExchangeManifestEnabled determines whether to require+validate a manifest from other DX instances in the network. Must be supported by the connector + DataExchangeManifestEnabled = "manifestEnabled" +) + func (h *HTTPS) InitPrefix(prefix config.Prefix) { wsconfig.InitPrefix(prefix) + prefix.AddKnownKey(DataExchangeManifestEnabled, false) } diff --git a/internal/dataexchange/dxhttps/dxhttps.go b/internal/dataexchange/dxhttps/dxhttps.go index cc9812dc71..a37f6f72f3 100644 --- a/internal/dataexchange/dxhttps/dxhttps.go +++ b/internal/dataexchange/dxhttps/dxhttps.go @@ -53,6 +53,8 @@ type wsEvent struct { Hash string `json:"hash"` Size int64 `json:"size"` Error string `json:"error"` + Manifest string `json:"manifest"` + Info string `json:"info"` } const ( @@ -91,6 +93,11 @@ type transferBlob struct { Recipient string `json:"recipient"` } +type wsAck struct { + Action string `json:"action"` + Manifest string `json:"manifest,omitempty"` // FireFly core determined that DX should propagate opaquely to TransferResult, if this DX supports delivery acknowledgements. +} + func (h *HTTPS) Name() string { return "https" } @@ -104,7 +111,9 @@ func (h *HTTPS) Init(ctx context.Context, prefix config.Prefix, callbacks dataex } h.client = restclient.New(h.ctx, prefix) - h.capabilities = &dataexchange.Capabilities{} + h.capabilities = &dataexchange.Capabilities{ + Manifest: prefix.GetBool(DataExchangeManifestEnabled), + } wsConfig := wsconfig.GenerateConfigFromPrefix(prefix) @@ -232,7 +241,6 @@ func (h *HTTPS) eventLoop() { defer h.wsconn.Close() l := log.L(h.ctx).WithField("role", "event-loop") ctx := log.WithLogger(h.ctx, l) - ack, _ := json.Marshal(map[string]string{"action": "commit"}) for { select { case <-ctx.Done(): @@ -252,17 +260,21 @@ func (h *HTTPS) eventLoop() { continue // Swallow this and move on } l.Debugf("Received %s event from DX sender=%s", msg.Type, msg.Sender) + var manifest string switch msg.Type { case messageFailed: - err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusFailed, msg.Error, nil) + err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{Error: msg.Error}) case messageDelivered: - err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusSucceeded, "", nil) + err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{ + Manifest: msg.Manifest, + Info: msg.Info, + }) case messageReceived: - err = h.callbacks.MessageReceived(msg.Sender, []byte(msg.Message)) + manifest, err = h.callbacks.MessageReceived(msg.Sender, []byte(msg.Message)) case blobFailed: - err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusFailed, msg.Error, nil) + err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{Error: msg.Error}) case blobDelivered: - err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusSucceeded, "", nil) + err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{}) case blobReceived: var hash *fftypes.Bytes32 hash, err = fftypes.ParseBytes32(ctx, msg.Hash) @@ -279,7 +291,11 @@ func (h *HTTPS) eventLoop() { // Send the ack - as long as we didn't fail processing (which should only happen in core // if core itself is shutting down) if err == nil { - err = h.wsconn.Send(ctx, ack) + ackBytes, _ := json.Marshal(&wsAck{ + Action: "commit", + Manifest: manifest, + }) + err = h.wsconn.Send(ctx, ackBytes) } if err != nil { l.Errorf("Event loop exiting: %s", err) diff --git a/internal/dataexchange/dxhttps/dxhttps_test.go b/internal/dataexchange/dxhttps/dxhttps_test.go index 1d81a5b2f4..7caf43e2f4 100644 --- a/internal/dataexchange/dxhttps/dxhttps_test.go +++ b/internal/dataexchange/dxhttps/dxhttps_test.go @@ -408,27 +408,33 @@ func TestEvents(t *testing.T) { mcb := h.callbacks.(*dataexchangemocks.Callbacks) - mcb.On("TransferResult", "tx12345", fftypes.OpStatusFailed, "pop", mock.Anything).Return(nil) + mcb.On("TransferResult", "tx12345", fftypes.OpStatusFailed, mock.MatchedBy(func(ts fftypes.TransportStatusUpdate) bool { + return "pop" == ts.Error + })).Return(nil) fromServer <- `{"type":"message-failed","requestID":"tx12345","error":"pop"}` msg = <-toServer assert.Equal(t, `{"action":"commit"}`, string(msg)) - mcb.On("TransferResult", "tx12345", fftypes.OpStatusSucceeded, "", mock.Anything).Return(nil) - fromServer <- `{"type":"message-delivered","requestID":"tx12345"}` + mcb.On("TransferResult", "tx12345", fftypes.OpStatusSucceeded, mock.MatchedBy(func(ts fftypes.TransportStatusUpdate) bool { + return ts.Manifest == `{"manifest":true}` && ts.Info == `{"signatures":"and stuff"}` + })).Return(nil) + fromServer <- `{"type":"message-delivered","requestID":"tx12345","info":"{\"signatures\":\"and stuff\"}","manifest":"{\"manifest\":true}"}` msg = <-toServer assert.Equal(t, `{"action":"commit"}`, string(msg)) - mcb.On("MessageReceived", "peer1", []byte("message1")).Return(nil) + mcb.On("MessageReceived", "peer1", []byte("message1")).Return(`{"manifest":true}`, nil) fromServer <- `{"type":"message-received","sender":"peer1","message":"message1"}` msg = <-toServer - assert.Equal(t, `{"action":"commit"}`, string(msg)) + assert.Equal(t, `{"action":"commit","manifest":"{\"manifest\":true}"}`, string(msg)) - mcb.On("TransferResult", "tx12345", fftypes.OpStatusFailed, "pop", mock.Anything).Return(nil) + mcb.On("TransferResult", "tx12345", fftypes.OpStatusFailed, mock.MatchedBy(func(ts fftypes.TransportStatusUpdate) bool { + return "pop" == ts.Error + })).Return(nil) fromServer <- `{"type":"blob-failed","requestID":"tx12345","error":"pop"}` msg = <-toServer assert.Equal(t, `{"action":"commit"}`, string(msg)) - mcb.On("TransferResult", "tx12345", fftypes.OpStatusSucceeded, "", mock.Anything).Return(nil) + mcb.On("TransferResult", "tx12345", fftypes.OpStatusSucceeded, mock.Anything).Return(nil) fromServer <- `{"type":"blob-delivered","requestID":"tx12345"}` msg = <-toServer assert.Equal(t, `{"action":"commit"}`, string(msg)) diff --git a/internal/events/dx_callbacks.go b/internal/events/dx_callbacks.go index 88f95d398d..aac7a7600b 100644 --- a/internal/events/dx_callbacks.go +++ b/internal/events/dx_callbacks.go @@ -27,42 +27,47 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (em *eventManager) MessageReceived(dx dataexchange.Plugin, peerID string, data []byte) error { +func (em *eventManager) MessageReceived(dx dataexchange.Plugin, peerID string, data []byte) (manifest string, err error) { l := log.L(em.ctx) // De-serializae the transport wrapper - var wrapper fftypes.TransportWrapper - err := json.Unmarshal(data, &wrapper) + var wrapper *fftypes.TransportWrapper + err = json.Unmarshal(data, &wrapper) if err != nil { l.Errorf("Invalid transmission from '%s': %s", peerID, err) - return nil + return "", nil } l.Infof("%s received from '%s' (len=%d)", wrapper.Type, peerID, len(data)) + var mf *fftypes.Manifest switch wrapper.Type { case fftypes.TransportPayloadTypeBatch: if wrapper.Batch == nil { l.Errorf("Invalid transmission: nil batch") - return nil + return "", nil } - return em.pinedBatchReceived(peerID, wrapper.Batch) + mf, err = em.pinedBatchReceived(peerID, wrapper.Batch) case fftypes.TransportPayloadTypeMessage: if wrapper.Message == nil { l.Errorf("Invalid transmission: nil message") - return nil + return "", nil } if wrapper.Group == nil { l.Errorf("Invalid transmission: nil group") - return nil + return "", nil } - return em.unpinnedMessageReceived(peerID, wrapper.Message, wrapper.Group, wrapper.Data) + mf, err = em.unpinnedMessageReceived(peerID, wrapper) default: l.Errorf("Invalid transmission: unknonwn type '%s'", wrapper.Type) - return nil + return "", nil } - + manifestBytes := []byte{} + if err == nil && mf != nil { + manifestBytes, err = json.Marshal(&mf) + } + return string(manifestBytes), err } func (em *eventManager) checkReceivedIdentity(ctx context.Context, peerID, author, signingKey string) (node *fftypes.Node, err error) { @@ -116,10 +121,10 @@ func (em *eventManager) checkReceivedIdentity(ctx context.Context, peerID, autho return node, nil } -func (em *eventManager) pinedBatchReceived(peerID string, batch *fftypes.Batch) error { +func (em *eventManager) pinedBatchReceived(peerID string, batch *fftypes.Batch) (manifest *fftypes.Manifest, err error) { // Retry for persistence errors (not validation errors) - return em.retry.Do(em.ctx, "private batch received", func(attempt int) (bool, error) { + err = em.retry.Do(em.ctx, "private batch received", func(attempt int) (bool, error) { return true, em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { l := log.L(ctx) @@ -134,16 +139,18 @@ func (em *eventManager) pinedBatchReceived(peerID string, batch *fftypes.Batch) valid, err := em.persistBatch(ctx, batch) if err != nil { - l.Errorf("Batch received from %s/%s invalid: %s", node.Owner, node.Name, err) + l.Errorf("Batch received from %s/%s processing failed valid=%t: %s", node.Owner, node.Name, valid, err) return err // retry - persistBatch only returns retryable errors } if valid { em.aggregator.offchainBatches <- batch.ID + manifest = batch.Manifest() } return nil }) }) + return manifest, err } @@ -219,12 +226,12 @@ func (em *eventManager) BLOBReceived(dx dataexchange.Plugin, peerID string, hash }) } -func (em *eventManager) TransferResult(dx dataexchange.Plugin, trackingID string, status fftypes.OpStatus, info string, opOutput fftypes.JSONObject) error { - log.L(em.ctx).Infof("Transfer result %s=%s info='%s'", trackingID, status, info) +func (em *eventManager) TransferResult(dx dataexchange.Plugin, trackingID string, status fftypes.OpStatus, update fftypes.TransportStatusUpdate) error { + log.L(em.ctx).Infof("Transfer result %s=%s error='%s' manifest='%s' info='%s'", trackingID, status, update.Error, update.Manifest, update.Info) // We process the event in a retry loop (which will break only if the context is closed), so that // we only confirm consumption of the event to the plugin once we've processed it. - return em.retry.Do(em.ctx, "blob reference insert", func(attempt int) (retry bool, err error) { + return em.retry.Do(em.ctx, "operation update", func(attempt int) (retry bool, err error) { // Find a matching operation, for this plugin, with the specified ID. // We retry a few times, as there's an outside possibility of the event arriving before we're finished persisting the operation itself @@ -238,7 +245,7 @@ func (em *eventManager) TransferResult(dx dataexchange.Plugin, trackingID string if err != nil { return true, err } - if len(operations) == 0 { + if len(operations) != 1 { // we have a limit on how long we wait to correlate an operation if we don't have a DB erro, // as it should only be a short window where the DB transaction to insert the operation is still // outstanding @@ -249,34 +256,47 @@ func (em *eventManager) TransferResult(dx dataexchange.Plugin, trackingID string return true, i18n.NewError(em.ctx, i18n.Msg404NotFound) } + // The maniest should exactly match that stored into the operation input, if supported + op := operations[0] + if status == fftypes.OpStatusSucceeded && dx.Capabilities().Manifest { + expectedManifest := op.Input.GetString("manifest") + if update.Manifest != expectedManifest { + // Log and map to failure for user to see that the receiver did not provide a matching acknowledgement + mismatchErr := i18n.NewError(em.ctx, i18n.MsgManifestMismatch, status, update.Manifest) + log.L(em.ctx).Errorf("%s transfer %s: %s", dx.Name(), trackingID, mismatchErr.Error()) + update.Error = mismatchErr.Error() + status = fftypes.OpStatusFailed + } + } + update := database.OperationQueryFactory.NewUpdate(em.ctx). Set("status", status). - Set("error", info). - Set("output", opOutput) - for _, op := range operations { - if err := em.database.UpdateOperation(em.ctx, op.ID, update); err != nil { - return true, err // this is always retryable - } + Set("error", update.Error). + Set("output", update.Info) // We don't need the manifest to be kept here, as it's already in the input + if err := em.database.UpdateOperation(em.ctx, op.ID, update); err != nil { + return true, err // this is always retryable } return false, nil }) } -func (em *eventManager) unpinnedMessageReceived(peerID string, message *fftypes.Message, group *fftypes.Group, data []*fftypes.Data) error { - if message.Header.TxType != fftypes.TransactionTypeNone { +func (em *eventManager) unpinnedMessageReceived(peerID string, tw *fftypes.TransportWrapper) (manifest *fftypes.Manifest, err error) { + message := tw.Message + + if message == nil || message.Header.TxType != fftypes.TransactionTypeNone { log.L(em.ctx).Errorf("Unpinned message '%s' transaction type must be 'none'. TxType=%s", message.Header.ID, message.Header.TxType) - return nil + return nil, nil } // Because we received this off chain, it's entirely possible the group init has not made it // to us yet. So we need to go through the same processing as if we had initiated the group. // This might result in both sides broadcasting a group-init message, but that's fine. - return em.retry.Do(em.ctx, "unpinned message received", func(attempt int) (bool, error) { + err = em.retry.Do(em.ctx, "unpinned message received", func(attempt int) (bool, error) { err := em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { - if valid, err := em.definitions.EnsureLocalGroup(ctx, group); err != nil || !valid { + if valid, err := em.definitions.EnsureLocalGroup(ctx, tw.Group); err != nil || !valid { return err } @@ -290,7 +310,7 @@ func (em *eventManager) unpinnedMessageReceived(peerID string, message *fftypes. } // Persist the data - for i, d := range data { + for i, d := range tw.Data { if ok, err := em.persistReceivedData(ctx, i, d, "message", message.Header.ID, database.UpsertOptimizationSkip); err != nil || !ok { return err } @@ -302,11 +322,15 @@ func (em *eventManager) unpinnedMessageReceived(peerID string, message *fftypes. return err } + // Generate a manifest, as we received it ok + manifest = tw.Manifest() + // Assuming all was good, we event := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, message.Header.Namespace, message.Header.ID) return em.database.InsertEvent(ctx, event) }) return err != nil, err }) + return manifest, err } diff --git a/internal/events/dx_callbacks_test.go b/internal/events/dx_callbacks_test.go index cb1497ee92..6d440fdc61 100644 --- a/internal/events/dx_callbacks_test.go +++ b/internal/events/dx_callbacks_test.go @@ -25,6 +25,7 @@ import ( "github.com/hyperledger/firefly/mocks/dataexchangemocks" "github.com/hyperledger/firefly/mocks/definitionsmocks" "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/dataexchange" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -64,8 +65,9 @@ func TestMessageReceiveOK(t *testing.T) { Identity: "parentOrg", }, nil) mdi.On("UpsertBatch", em.ctx, mock.Anything, false).Return(nil, nil) - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) + assert.NotNil(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -98,8 +100,9 @@ func TestMessageReceiveOkBadBatchIgnored(t *testing.T) { mdi.On("GetOrganizationByIdentity", em.ctx, "parentOrg").Return(&fftypes.Organization{ Identity: "parentOrg", }, nil) - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -139,8 +142,9 @@ func TestMessageReceivePersistBatchError(t *testing.T) { Identity: "parentOrg", }, nil) mdi.On("UpsertBatch", em.ctx, mock.Anything, false).Return(fmt.Errorf("pop")) - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -151,8 +155,9 @@ func TestMessageReceivedBadData(t *testing.T) { defer cancel() mdx := &dataexchangemocks.Plugin{} - err := em.MessageReceived(mdx, "peer1", []byte(`!{}`)) + m, err := em.MessageReceived(mdx, "peer1", []byte(`!{}`)) assert.NoError(t, err) + assert.Empty(t, m) } @@ -161,10 +166,11 @@ func TestMessageReceivedUnknownType(t *testing.T) { defer cancel() mdx := &dataexchangemocks.Plugin{} - err := em.MessageReceived(mdx, "peer1", []byte(`{ + m, err := em.MessageReceived(mdx, "peer1", []byte(`{ "type": "unknown" }`)) assert.NoError(t, err) + assert.Empty(t, m) } @@ -173,10 +179,11 @@ func TestMessageReceivedNilBatch(t *testing.T) { defer cancel() mdx := &dataexchangemocks.Plugin{} - err := em.MessageReceived(mdx, "peer1", []byte(`{ + m, err := em.MessageReceived(mdx, "peer1", []byte(`{ "type": "batch" }`)) assert.NoError(t, err) + assert.Empty(t, m) } @@ -185,10 +192,11 @@ func TestMessageReceivedNilMessage(t *testing.T) { defer cancel() mdx := &dataexchangemocks.Plugin{} - err := em.MessageReceived(mdx, "peer1", []byte(`{ + m, err := em.MessageReceived(mdx, "peer1", []byte(`{ "type": "message" }`)) assert.NoError(t, err) + assert.Empty(t, m) } @@ -197,11 +205,12 @@ func TestMessageReceivedNilGroup(t *testing.T) { defer cancel() mdx := &dataexchangemocks.Plugin{} - err := em.MessageReceived(mdx, "peer1", []byte(`{ + m, err := em.MessageReceived(mdx, "peer1", []byte(`{ "type": "message", "message": {} }`)) assert.NoError(t, err) + assert.Empty(t, m) } func TestMessageReceiveNodeLookupError(t *testing.T) { @@ -217,8 +226,9 @@ func TestMessageReceiveNodeLookupError(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} mdi.On("GetNodes", em.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) + assert.Empty(t, m) } func TestMessageReceiveNodeNotFound(t *testing.T) { @@ -234,8 +244,9 @@ func TestMessageReceiveNodeNotFound(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} mdi.On("GetNodes", em.ctx, mock.Anything).Return(nil, nil, nil) - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) + assert.Empty(t, m) } func TestMessageReceiveAuthorLookupError(t *testing.T) { @@ -254,8 +265,9 @@ func TestMessageReceiveAuthorLookupError(t *testing.T) { {Name: "node1", Owner: "org1"}, }, nil, nil) mdi.On("GetOrganizationByIdentity", em.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) + assert.Empty(t, m) } func TestMessageReceiveAuthorNotFound(t *testing.T) { @@ -274,8 +286,9 @@ func TestMessageReceiveAuthorNotFound(t *testing.T) { {Name: "node1", Owner: "org1"}, }, nil, nil) mdi.On("GetOrganizationByIdentity", em.ctx, mock.Anything).Return(nil, nil) - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) + assert.Empty(t, m) } func TestMessageReceiveGetCandidateOrgFail(t *testing.T) { @@ -303,8 +316,9 @@ func TestMessageReceiveGetCandidateOrgFail(t *testing.T) { Identity: "0x12345", Parent: "parentOrg", }, nil) mdi.On("GetOrganizationByIdentity", em.ctx, "parentOrg").Return(nil, fmt.Errorf("pop")) - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -335,8 +349,9 @@ func TestMessageReceiveGetCandidateOrgNotFound(t *testing.T) { Identity: "0x12345", Parent: "parentOrg", }, nil) mdi.On("GetOrganizationByIdentity", em.ctx, "parentOrg").Return(nil, nil) - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -369,8 +384,9 @@ func TestMessageReceiveGetCandidateOrgNotMatch(t *testing.T) { mdi.On("GetOrganizationByIdentity", em.ctx, "parentOrg").Return(&fftypes.Organization{ Identity: "parentOrg", }, nil) - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -481,7 +497,40 @@ func TestTransferResultOk(t *testing.T) { mdx := &dataexchangemocks.Plugin{} mdx.On("Name").Return("utdx") - err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusFailed, "error info", fftypes.JSONObject{"extra": "info"}) + err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{ + Error: "error info", + Info: `{"extra": "info"}`, + }) + assert.NoError(t, err) + +} + +func TestTransferResultManifestMismatch(t *testing.T) { + em, cancel := newTestEventManager(t) + defer cancel() + + mdi := em.database.(*databasemocks.Plugin) + id := fftypes.NewUUID() + mdi.On("GetOperations", mock.Anything, mock.Anything).Return([]*fftypes.Operation{ + { + ID: id, + BackendID: "tracking12345", + Input: fftypes.JSONObject{ + "maniest": "Bob", + }, + }, + }, nil, nil) + mdi.On("UpdateOperation", mock.Anything, id, mock.Anything).Return(nil) + + mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") + mdx.On("Capabilities").Return(&dataexchange.Capabilities{ + Manifest: true, + }) + err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{ + Info: `{"extra": "info"}`, + Manifest: "Sally", + }) assert.NoError(t, err) } @@ -497,22 +546,28 @@ func TestTransferResultNotCorrelated(t *testing.T) { mdx := &dataexchangemocks.Plugin{} mdx.On("Name").Return("utdx") - err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusFailed, "error info", fftypes.JSONObject{"extra": "info"}) + err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{ + Error: "error info", + Info: `{"extra": "info"}`, + }) assert.NoError(t, err) } func TestTransferResultNotFound(t *testing.T) { em, cancel := newTestEventManager(t) - cancel() // avoid retries + defer cancel() // we want to retry until the count mdi := em.database.(*databasemocks.Plugin) mdi.On("GetOperations", mock.Anything, mock.Anything).Return([]*fftypes.Operation{}, nil, nil) mdx := &dataexchangemocks.Plugin{} mdx.On("Name").Return("utdx") - err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusFailed, "error info", fftypes.JSONObject{"extra": "info"}) - assert.Regexp(t, "FF10158", err) + err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{ + Error: "error info", + Info: `{"extra": "info"}`, + }) + assert.NoError(t, err) } @@ -525,7 +580,10 @@ func TestTransferGetOpFail(t *testing.T) { mdx := &dataexchangemocks.Plugin{} mdx.On("Name").Return("utdx") - err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusFailed, "error info", fftypes.JSONObject{"extra": "info"}) + err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{ + Error: "error info", + Info: `{"extra": "info"}`, + }) assert.Regexp(t, "FF10158", err) } @@ -546,7 +604,10 @@ func TestTransferUpdateFail(t *testing.T) { mdx := &dataexchangemocks.Plugin{} mdx.On("Name").Return("utdx") - err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusFailed, "error info", fftypes.JSONObject{"extra": "info"}) + err := em.TransferResult(mdx, "tracking12345", fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{ + Error: "error info", + Info: `{"extra": "info"}`, + }) assert.Regexp(t, "FF10158", err) } @@ -568,8 +629,9 @@ func TestMessageReceiveMessageWrongType(t *testing.T) { }) mdx := &dataexchangemocks.Plugin{} - err := em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) + assert.Empty(t, m) mdx.AssertExpectations(t) } @@ -604,8 +666,9 @@ func TestMessageReceiveMessageIdentityFail(t *testing.T) { mdi.On("GetNodes", em.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) - err = em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -641,8 +704,9 @@ func TestMessageReceiveMessageIdentityIncorrect(t *testing.T) { mdi.On("GetNodes", em.ctx, mock.Anything).Return([]*fftypes.Node{}, nil, nil) - err = em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -684,8 +748,9 @@ func TestMessageReceiveMessagePersistMessageFail(t *testing.T) { }, nil) mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationSkip).Return(fmt.Errorf("pop")) - err = em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -734,8 +799,9 @@ func TestMessageReceiveMessagePersistDataFail(t *testing.T) { }, nil) mdi.On("UpsertData", em.ctx, mock.Anything, database.UpsertOptimizationSkip).Return(fmt.Errorf("pop")) - err = em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -786,8 +852,9 @@ func TestMessageReceiveMessagePersistEventFail(t *testing.T) { mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationSkip).Return(nil) mdi.On("InsertEvent", em.ctx, mock.Anything).Return(fmt.Errorf("pop")) - err = em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -828,8 +895,9 @@ func TestMessageReceiveMessageEnsureLocalGroupFail(t *testing.T) { msh := em.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(false, fmt.Errorf("pop")) - err = em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) @@ -870,8 +938,9 @@ func TestMessageReceiveMessageEnsureLocalGroupReject(t *testing.T) { msh := em.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(false, nil) - err = em.MessageReceived(mdx, "peer1", b) + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) + assert.Empty(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) diff --git a/internal/events/event_manager.go b/internal/events/event_manager.go index c50d310b4d..f7e7e939f9 100644 --- a/internal/events/event_manager.go +++ b/internal/events/event_manager.go @@ -61,9 +61,9 @@ type EventManager interface { BatchPinComplete(bi blockchain.Plugin, batch *blockchain.BatchPin, author string, protocolTxID string, additionalInfo fftypes.JSONObject) error // Bound dataexchange callbacks - TransferResult(dx dataexchange.Plugin, trackingID string, status fftypes.OpStatus, info string, opOutput fftypes.JSONObject) error + TransferResult(dx dataexchange.Plugin, trackingID string, status fftypes.OpStatus, update fftypes.TransportStatusUpdate) error BLOBReceived(dx dataexchange.Plugin, peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error - MessageReceived(dx dataexchange.Plugin, peerID string, data []byte) error + MessageReceived(dx dataexchange.Plugin, peerID string, data []byte) (manifest string, err error) // Bound token callbacks TokenPoolCreated(ti tokens.Plugin, pool *tokens.TokenPool, protocolTxID string, additionalInfo fftypes.JSONObject) error diff --git a/internal/i18n/en_translations.go b/internal/i18n/en_translations.go index c95863d72f..fc2b8da5de 100644 --- a/internal/i18n/en_translations.go +++ b/internal/i18n/en_translations.go @@ -226,4 +226,5 @@ var ( MsgFieldMatchNoNull = ffm("FF10306", "Comparison operator for field '%s' cannot accept a null value", 400) MsgTooLargeBroadcast = ffm("FF10307", "Message size %.2fkb is too large for the max broadcast batch size of %.2fkb", 400) MsgTooLargePrivate = ffm("FF10308", "Message size %.2fkb is too large for the max private message size of %.2fkb", 400) + MsgManifestMismatch = ffm("FF10309", "Manifest mismatch overriding '%s' status as failure: '%s'", 400) ) diff --git a/internal/orchestrator/bound_callbacks.go b/internal/orchestrator/bound_callbacks.go index d29a54156d..3989d76dbe 100644 --- a/internal/orchestrator/bound_callbacks.go +++ b/internal/orchestrator/bound_callbacks.go @@ -42,15 +42,15 @@ func (bc *boundCallbacks) BatchPinComplete(batch *blockchain.BatchPin, author st return bc.ei.BatchPinComplete(bc.bi, batch, author, protocolTxID, additionalInfo) } -func (bc *boundCallbacks) TransferResult(trackingID string, status fftypes.OpStatus, info string, opOutput fftypes.JSONObject) error { - return bc.ei.TransferResult(bc.dx, trackingID, status, info, opOutput) +func (bc *boundCallbacks) TransferResult(trackingID string, status fftypes.OpStatus, update fftypes.TransportStatusUpdate) error { + return bc.ei.TransferResult(bc.dx, trackingID, status, update) } func (bc *boundCallbacks) BLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error { return bc.ei.BLOBReceived(bc.dx, peerID, hash, size, payloadRef) } -func (bc *boundCallbacks) MessageReceived(peerID string, data []byte) error { +func (bc *boundCallbacks) MessageReceived(peerID string, data []byte) (manifest string, err error) { return bc.ei.MessageReceived(bc.dx, peerID, data) } diff --git a/internal/orchestrator/bound_callbacks_test.go b/internal/orchestrator/bound_callbacks_test.go index a5263e0561..666b4c14bc 100644 --- a/internal/orchestrator/bound_callbacks_test.go +++ b/internal/orchestrator/bound_callbacks_test.go @@ -57,15 +57,17 @@ func TestBoundCallbacks(t *testing.T) { assert.EqualError(t, err, "pop") mei.On("TransferResult", mdx, "tracking12345", fftypes.OpStatusFailed, "error info", info).Return(fmt.Errorf("pop")) - err = bc.TransferResult("tracking12345", fftypes.OpStatusFailed, "error info", info) + err = bc.TransferResult("tracking12345", fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{ + Error: "error info", Info: info.String(), + }) assert.EqualError(t, err, "pop") mei.On("BLOBReceived", mdx, "peer1", *hash, int64(12345), "ns1/id1").Return(fmt.Errorf("pop")) err = bc.BLOBReceived("peer1", *hash, 12345, "ns1/id1") assert.EqualError(t, err, "pop") - mei.On("MessageReceived", mdx, "peer1", []byte{}).Return(fmt.Errorf("pop")) - err = bc.MessageReceived("peer1", []byte{}) + mei.On("MessageReceived", mdx, "peer1", []byte{}).Return(nil, fmt.Errorf("pop")) + _, err = bc.MessageReceived("peer1", []byte{}) assert.EqualError(t, err, "pop") mei.On("TokenPoolCreated", mti, pool, "tx12345", info).Return(fmt.Errorf("pop")) diff --git a/internal/privatemessaging/message.go b/internal/privatemessaging/message.go index 1603fe6503..bbe9ac1ab1 100644 --- a/internal/privatemessaging/message.go +++ b/internal/privatemessaging/message.go @@ -18,7 +18,6 @@ package privatemessaging import ( "context" - "encoding/json" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/sysmessaging" @@ -213,15 +212,12 @@ func (s *messageSender) sendUnpinned(ctx context.Context) (err error) { return err } - payload, err := json.Marshal(&fftypes.TransportWrapper{ + tw := &fftypes.TransportWrapper{ Type: fftypes.TransportPayloadTypeMessage, Message: &s.msg.Message, Data: data, Group: group, - }) - if err != nil { - return i18n.WrapError(ctx, err, i18n.MsgSerializationFailed) } - return s.mgr.sendData(ctx, "message", s.msg.Header.ID, s.msg.Header.Group, s.namespace, nodes, fftypes.JSONAnyPtrBytes(payload), nil, data) + return s.mgr.sendData(ctx, "message", s.msg.Header.ID, s.msg.Header.Group, s.namespace, nodes, tw, nil, data) } diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index e2fe97a62e..dde0e4deb4 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -123,12 +123,9 @@ func (pm *privateMessaging) Start() error { func (pm *privateMessaging) dispatchBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error { // Serialize the full payload, which has already been sealed for us by the BatchManager - payload, err := json.Marshal(&fftypes.TransportWrapper{ + tw := &fftypes.TransportWrapper{ Type: fftypes.TransportPayloadTypeBatch, Batch: batch, - }) - if err != nil { - return i18n.WrapError(ctx, err, i18n.MsgSerializationFailed) } // Retrieve the group @@ -138,7 +135,7 @@ func (pm *privateMessaging) dispatchBatch(ctx context.Context, batch *fftypes.Ba } return pm.database.RunAsGroup(ctx, func(ctx context.Context) error { - return pm.sendAndSubmitBatch(ctx, batch, nodes, fftypes.JSONAnyPtrBytes(payload), contexts) + return pm.sendAndSubmitBatch(ctx, batch, nodes, tw, contexts) }) } @@ -177,9 +174,14 @@ func (pm *privateMessaging) transferBlobs(ctx context.Context, data []*fftypes.D return nil } -func (pm *privateMessaging) sendData(ctx context.Context, mType string, mID *fftypes.UUID, group *fftypes.Bytes32, ns string, nodes []*fftypes.Node, payload *fftypes.JSONAny, txid *fftypes.UUID, data []*fftypes.Data) (err error) { +func (pm *privateMessaging) sendData(ctx context.Context, mType string, mID *fftypes.UUID, group *fftypes.Bytes32, ns string, nodes []*fftypes.Node, tw *fftypes.TransportWrapper, txid *fftypes.UUID, data []*fftypes.Data) (err error) { l := log.L(ctx) + payload, err := json.Marshal(tw) + if err != nil { + return i18n.WrapError(ctx, err, i18n.MsgSerializationFailed) + } + // TODO: move to using DIDs consistently as the way to reference the node/organization (i.e. node.Owner becomes a DID) localOrgSigingKey, err := pm.identity.GetLocalOrgKey(ctx) if err != nil { @@ -202,7 +204,7 @@ func (pm *privateMessaging) sendData(ctx context.Context, mType string, mID *fft } // Send the payload itself - trackingID, err := pm.exchange.SendMessage(ctx, node.DX.Peer, payload.Bytes()) + trackingID, err := pm.exchange.SendMessage(ctx, node.DX.Peer, payload) if err != nil { return err } @@ -215,6 +217,9 @@ func (pm *privateMessaging) sendData(ctx context.Context, mType string, mID *fft trackingID, fftypes.OpTypeDataExchangeBatchSend, fftypes.OpStatusPending) + op.Input = fftypes.JSONObject{ + "manifest": tw.Manifest().String(), + } if err = pm.database.InsertOperation(ctx, op); err != nil { return err } @@ -225,8 +230,8 @@ func (pm *privateMessaging) sendData(ctx context.Context, mType string, mID *fft return nil } -func (pm *privateMessaging) sendAndSubmitBatch(ctx context.Context, batch *fftypes.Batch, nodes []*fftypes.Node, payload *fftypes.JSONAny, contexts []*fftypes.Bytes32) (err error) { - if err = pm.sendData(ctx, "batch", batch.ID, batch.Group, batch.Namespace, nodes, payload, batch.Payload.TX.ID, batch.Payload.Data); err != nil { +func (pm *privateMessaging) sendAndSubmitBatch(ctx context.Context, batch *fftypes.Batch, nodes []*fftypes.Node, tw *fftypes.TransportWrapper, contexts []*fftypes.Bytes32) (err error) { + if err = pm.sendData(ctx, "batch", batch.ID, batch.Group, batch.Namespace, nodes, tw, batch.Payload.TX.ID, batch.Payload.Data); err != nil { return err } return pm.writeTransaction(ctx, batch, contexts) diff --git a/internal/privatemessaging/privatemessaging_test.go b/internal/privatemessaging/privatemessaging_test.go index c1b9072564..776794fc57 100644 --- a/internal/privatemessaging/privatemessaging_test.go +++ b/internal/privatemessaging/privatemessaging_test.go @@ -181,7 +181,12 @@ func TestDispatchBatchBadData(t *testing.T) { pm, cancel := newTestPrivateMessaging(t) defer cancel() + groupID := fftypes.NewRandB32() + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{}, nil) + err := pm.dispatchBatch(pm.ctx, &fftypes.Batch{ + Group: groupID, Payload: fftypes.BatchPayload{ Data: []*fftypes.Data{ {Value: fftypes.JSONAnyPtr(`{!json}`)}, @@ -223,7 +228,7 @@ func TestSendAndSubmitBatchBadID(t *testing.T) { Identity: fftypes.Identity{ Author: "badauthor", }, - }, []*fftypes.Node{}, fftypes.JSONAnyPtr(`{}`), []*fftypes.Bytes32{}) + }, []*fftypes.Node{}, &fftypes.TransportWrapper{}, []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) } @@ -241,7 +246,7 @@ func TestSendAndSubmitBatchUnregisteredNode(t *testing.T) { Identity: fftypes.Identity{ Author: "badauthor", }, - }, []*fftypes.Node{}, fftypes.JSONAnyPtr(`{}`), []*fftypes.Bytes32{}) + }, []*fftypes.Node{}, &fftypes.TransportWrapper{}, []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) } @@ -266,7 +271,7 @@ func TestSendImmediateFail(t *testing.T) { Endpoint: fftypes.JSONObject{"url": "https://node1.example.com"}, }, }, - }, fftypes.JSONAnyPtr(`{}`), []*fftypes.Bytes32{}) + }, &fftypes.TransportWrapper{}, []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) } @@ -299,7 +304,7 @@ func TestSendSubmitInsertOperationFail(t *testing.T) { Endpoint: fftypes.JSONObject{"url": "https://node1.example.com"}, }, }, - }, fftypes.JSONAnyPtr(`{}`), []*fftypes.Bytes32{}) + }, &fftypes.TransportWrapper{}, []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) } @@ -329,7 +334,7 @@ func TestSendSubmitBlobTransferFail(t *testing.T) { Endpoint: fftypes.JSONObject{"url": "https://node1.example.com"}, }, }, - }, fftypes.JSONAnyPtr(`{}`), []*fftypes.Bytes32{}) + }, &fftypes.TransportWrapper{}, []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) } diff --git a/mocks/databasemocks/plugin.go b/mocks/databasemocks/plugin.go index 120eef8975..aa0dc05ad7 100644 --- a/mocks/databasemocks/plugin.go +++ b/mocks/databasemocks/plugin.go @@ -633,38 +633,6 @@ func (_m *Plugin) GetMessageByID(ctx context.Context, id *fftypes.UUID) (*fftype return r0, r1 } -// GetMessageRefs provides a mock function with given fields: ctx, filter -func (_m *Plugin) GetMessageRefs(ctx context.Context, filter database.Filter) ([]*fftypes.MessageRef, *database.FilterResult, error) { - ret := _m.Called(ctx, filter) - - var r0 []*fftypes.MessageRef - if rf, ok := ret.Get(0).(func(context.Context, database.Filter) []*fftypes.MessageRef); ok { - r0 = rf(ctx, filter) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.MessageRef) - } - } - - var r1 *database.FilterResult - if rf, ok := ret.Get(1).(func(context.Context, database.Filter) *database.FilterResult); ok { - r1 = rf(ctx, filter) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*database.FilterResult) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func(context.Context, database.Filter) error); ok { - r2 = rf(ctx, filter) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - // GetMessages provides a mock function with given fields: ctx, filter func (_m *Plugin) GetMessages(ctx context.Context, filter database.Filter) ([]*fftypes.Message, *database.FilterResult, error) { ret := _m.Called(ctx, filter) diff --git a/mocks/dataexchangemocks/callbacks.go b/mocks/dataexchangemocks/callbacks.go index aecaf3efdd..0e598d1421 100644 --- a/mocks/dataexchangemocks/callbacks.go +++ b/mocks/dataexchangemocks/callbacks.go @@ -27,26 +27,33 @@ func (_m *Callbacks) BLOBReceived(peerID string, hash fftypes.Bytes32, size int6 } // MessageReceived provides a mock function with given fields: peerID, data -func (_m *Callbacks) MessageReceived(peerID string, data []byte) error { +func (_m *Callbacks) MessageReceived(peerID string, data []byte) (string, error) { ret := _m.Called(peerID, data) - var r0 error - if rf, ok := ret.Get(0).(func(string, []byte) error); ok { + var r0 string + if rf, ok := ret.Get(0).(func(string, []byte) string); ok { r0 = rf(peerID, data) } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(string) } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(string, []byte) error); ok { + r1 = rf(peerID, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// TransferResult provides a mock function with given fields: trackingID, status, info, opOutput -func (_m *Callbacks) TransferResult(trackingID string, status fftypes.OpStatus, info string, opOutput fftypes.JSONObject) error { - ret := _m.Called(trackingID, status, info, opOutput) +// TransferResult provides a mock function with given fields: trackingID, status, info +func (_m *Callbacks) TransferResult(trackingID string, status fftypes.OpStatus, info fftypes.TransportStatusUpdate) error { + ret := _m.Called(trackingID, status, info) var r0 error - if rf, ok := ret.Get(0).(func(string, fftypes.OpStatus, string, fftypes.JSONObject) error); ok { - r0 = rf(trackingID, status, info, opOutput) + if rf, ok := ret.Get(0).(func(string, fftypes.OpStatus, fftypes.TransportStatusUpdate) error); ok { + r0 = rf(trackingID, status, info) } else { r0 = ret.Error(0) } diff --git a/mocks/eventmocks/event_manager.go b/mocks/eventmocks/event_manager.go index 3fef854e93..a7620d1cb0 100644 --- a/mocks/eventmocks/event_manager.go +++ b/mocks/eventmocks/event_manager.go @@ -126,17 +126,24 @@ func (_m *EventManager) DeletedSubscriptions() chan<- *fftypes.UUID { } // MessageReceived provides a mock function with given fields: dx, peerID, data -func (_m *EventManager) MessageReceived(dx dataexchange.Plugin, peerID string, data []byte) error { +func (_m *EventManager) MessageReceived(dx dataexchange.Plugin, peerID string, data []byte) (string, error) { ret := _m.Called(dx, peerID, data) - var r0 error - if rf, ok := ret.Get(0).(func(dataexchange.Plugin, string, []byte) error); ok { + var r0 string + if rf, ok := ret.Get(0).(func(dataexchange.Plugin, string, []byte) string); ok { r0 = rf(dx, peerID, data) } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(string) } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(dataexchange.Plugin, string, []byte) error); ok { + r1 = rf(dx, peerID, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // NewEvents provides a mock function with given fields: @@ -259,13 +266,13 @@ func (_m *EventManager) TokensTransferred(ti tokens.Plugin, poolProtocolID strin return r0 } -// TransferResult provides a mock function with given fields: dx, trackingID, status, info, opOutput -func (_m *EventManager) TransferResult(dx dataexchange.Plugin, trackingID string, status fftypes.OpStatus, info string, opOutput fftypes.JSONObject) error { - ret := _m.Called(dx, trackingID, status, info, opOutput) +// TransferResult provides a mock function with given fields: dx, trackingID, status, update +func (_m *EventManager) TransferResult(dx dataexchange.Plugin, trackingID string, status fftypes.OpStatus, update fftypes.TransportStatusUpdate) error { + ret := _m.Called(dx, trackingID, status, update) var r0 error - if rf, ok := ret.Get(0).(func(dataexchange.Plugin, string, fftypes.OpStatus, string, fftypes.JSONObject) error); ok { - r0 = rf(dx, trackingID, status, info, opOutput) + if rf, ok := ret.Get(0).(func(dataexchange.Plugin, string, fftypes.OpStatus, fftypes.TransportStatusUpdate) error); ok { + r0 = rf(dx, trackingID, status, update) } else { r0 = ret.Error(0) } diff --git a/pkg/database/plugin.go b/pkg/database/plugin.go index cfe0479f40..c2bd364d10 100644 --- a/pkg/database/plugin.go +++ b/pkg/database/plugin.go @@ -89,9 +89,6 @@ type iMessageCollection interface { // GetMessages - List messages, reverse sorted (newest first) by Confirmed then Created, with pagination, and simple must filters GetMessages(ctx context.Context, filter Filter) (message []*fftypes.Message, res *FilterResult, err error) - // GetMessageRefs - Lighter weight query to just get the reference info of messages - GetMessageRefs(ctx context.Context, filter Filter) ([]*fftypes.MessageRef, *FilterResult, error) - // GetMessagesForData - List messages where there is a data reference to the specified ID GetMessagesForData(ctx context.Context, dataID *fftypes.UUID, filter Filter) (message []*fftypes.Message, res *FilterResult, err error) } diff --git a/pkg/dataexchange/plugin.go b/pkg/dataexchange/plugin.go index 0d9d45487f..fe50d3f0f6 100644 --- a/pkg/dataexchange/plugin.go +++ b/pkg/dataexchange/plugin.go @@ -96,16 +96,18 @@ type Plugin interface { type Callbacks interface { // MessageReceived notifies of a message received from another node in the network - MessageReceived(peerID string, data []byte) error + MessageReceived(peerID string, data []byte) (manifest string, err error) // BLOBReceived notifies of the ID of a BLOB that has been stored by DX after being received from another node in the network BLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error - // TransferResult notifies of a status update of a transfer - TransferResult(trackingID string, status fftypes.OpStatus, info string, opOutput fftypes.JSONObject) error + // TransferResult notifies of a status update of a transfer (can have multiple status updates). + TransferResult(trackingID string, status fftypes.OpStatus, info fftypes.TransportStatusUpdate) error } // Capabilities the supported featureset of the data exchange // interface implemented by the plugin, with the specified config type Capabilities struct { + // Manifest - whether TransferResult events contain the manifest generated by the receiving FireFly + Manifest bool } diff --git a/pkg/fftypes/batch.go b/pkg/fftypes/batch.go index 5325cdbfd0..5b1fa810b6 100644 --- a/pkg/fftypes/batch.go +++ b/pkg/fftypes/batch.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -74,3 +74,22 @@ func (ma *BatchPayload) Scan(src interface{}) error { } } + +func (b *Batch) Manifest() *Manifest { + if b == nil { + return nil + } + tm := &Manifest{ + Messages: make([]MessageRef, len(b.Payload.Messages)), + Data: make([]DataRef, len(b.Payload.Data)), + } + for i, m := range b.Payload.Messages { + tm.Messages[i].ID = m.Header.ID + tm.Messages[i].Hash = m.Hash + } + for i, d := range b.Payload.Data { + tm.Data[i].ID = d.ID + tm.Data[i].Hash = d.Hash + } + return tm +} diff --git a/pkg/fftypes/bytetypes.go b/pkg/fftypes/bytetypes.go index e6e461f80b..0e12e43243 100644 --- a/pkg/fftypes/bytetypes.go +++ b/pkg/fftypes/bytetypes.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -69,6 +69,14 @@ func ParseBytes32(ctx context.Context, hexStr string) (*Bytes32, error) { return &b32, nil } +func MustParseBytes32(hexStr string) *Bytes32 { + b32, err := ParseBytes32(context.Background(), hexStr) + if err != nil { + panic(err) + } + return b32 +} + // Scan implements sql.Scanner func (b32 *Bytes32) Scan(src interface{}) error { switch src := src.(type) { diff --git a/pkg/fftypes/bytetypes_test.go b/pkg/fftypes/bytetypes_test.go index 660b15800f..3a296c53f0 100644 --- a/pkg/fftypes/bytetypes_test.go +++ b/pkg/fftypes/bytetypes_test.go @@ -151,10 +151,13 @@ func TestParseBytes32(t *testing.T) { b32, err := ParseBytes32(context.Background(), "0xd907ee03ecbcfb416ce89d957682e8ef41ac548b0b571f65cb196f2b0ab4da05") assert.NoError(t, err) assert.Equal(t, "d907ee03ecbcfb416ce89d957682e8ef41ac548b0b571f65cb196f2b0ab4da05", b32.String()) + assert.Equal(t, "d907ee03ecbcfb416ce89d957682e8ef41ac548b0b571f65cb196f2b0ab4da05", MustParseBytes32("0xd907ee03ecbcfb416ce89d957682e8ef41ac548b0b571f65cb196f2b0ab4da05").String()) _, err = ParseBytes32(context.Background(), "") assert.Regexp(t, "FF10232", err) _, err = ParseBytes32(context.Background(), "!!!!d907ee03ecbcfb416ce89d957682e8ef41ac548b0b571f65cb196f2b0ab4") assert.Regexp(t, "FF10231", err) + + assert.Panics(t, func() { MustParseBytes32("!!!!stuff") }) } diff --git a/pkg/fftypes/jsonany_test.go b/pkg/fftypes/jsonany_test.go index 292e23d7a0..0326c46167 100644 --- a/pkg/fftypes/jsonany_test.go +++ b/pkg/fftypes/jsonany_test.go @@ -93,6 +93,7 @@ func TestJSONAnyUnmarshalFail(t *testing.T) { func TestScan(t *testing.T) { var h JSONAny + assert.Equal(t, int64(0), h.Length()) assert.NoError(t, h.Scan(nil)) assert.Equal(t, []byte(NullString), []byte(h)) @@ -109,8 +110,10 @@ func TestScan(t *testing.T) { assert.Equal(t, "test", JSONAnyPtrBytes([]byte(`{"val": "test"}`)).JSONObject().GetString("val")) assert.Nil(t, JSONAnyPtrBytes(nil)) + assert.Equal(t, int64(0), JSONAnyPtrBytes(nil).Length()) assert.Nil(t, JSONAnyPtrBytes(nil).Bytes()) assert.NotEmpty(t, JSONAnyPtr("{}").Bytes()) + assert.Equal(t, int64(2), JSONAnyPtr("{}").Length()) } diff --git a/pkg/fftypes/manifest.go b/pkg/fftypes/manifest.go new file mode 100644 index 0000000000..8410c5b0c8 --- /dev/null +++ b/pkg/fftypes/manifest.go @@ -0,0 +1,30 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fftypes + +import "encoding/json" + +// Manifest is a list of references to messages and data +type Manifest struct { + Messages []MessageRef `json:"messages"` + Data []DataRef `json:"data"` +} + +func (mf *Manifest) String() string { + b, _ := json.Marshal(&mf) + return string(b) +} diff --git a/pkg/fftypes/manifest_test.go b/pkg/fftypes/manifest_test.go new file mode 100644 index 0000000000..c22d77eb82 --- /dev/null +++ b/pkg/fftypes/manifest_test.go @@ -0,0 +1,36 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fftypes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestManifestToString(t *testing.T) { + + tw := &TransportWrapper{ + Type: TransportPayloadTypeMessage, + Message: &Message{Header: MessageHeader{ID: MustParseUUID("c38e76ec-92a6-4659-805d-8ae3b7437c40")}, Hash: MustParseBytes32("169ef5233cf44df3d71df59f25928743e9a76378bb1375e06539b732b1fc57e5")}, + Data: []*Data{ + {ID: MustParseUUID("7bc49647-cd1c-4633-98fa-ddbb208d61bd"), Hash: MustParseBytes32("2b849d47e44a291cd83bee4e7ace66178a5245a151d3bbd02011312ec2604ed6")}, + {ID: MustParseUUID("5b80eec3-04b5-4557-bced-6a458ecb9ef2"), Hash: MustParseBytes32("2bcddd992d17e89a5aafbe99c59d954018ddadf4e533a164808ae2389bbf33dc")}, + }, + } + assert.Equal(t, "{\"messages\":[{\"id\":\"c38e76ec-92a6-4659-805d-8ae3b7437c40\",\"hash\":\"169ef5233cf44df3d71df59f25928743e9a76378bb1375e06539b732b1fc57e5\"}],\"data\":[{\"id\":\"7bc49647-cd1c-4633-98fa-ddbb208d61bd\",\"hash\":\"2b849d47e44a291cd83bee4e7ace66178a5245a151d3bbd02011312ec2604ed6\"},{\"id\":\"5b80eec3-04b5-4557-bced-6a458ecb9ef2\",\"hash\":\"2bcddd992d17e89a5aafbe99c59d954018ddadf4e533a164808ae2389bbf33dc\"}]}", tw.Manifest().String()) +} diff --git a/pkg/fftypes/message.go b/pkg/fftypes/message.go index 4396b7d4ae..515009df5b 100644 --- a/pkg/fftypes/message.go +++ b/pkg/fftypes/message.go @@ -124,9 +124,8 @@ type DataRefOrValue struct { // MessageRef is a lightweight data structure that can be used to refer to a message type MessageRef struct { - ID *UUID `json:"id,omitempty"` - Sequence int64 `json:"sequence,omitempty"` - Hash *Bytes32 `json:"hash,omitempty"` + ID *UUID `json:"id,omitempty"` + Hash *Bytes32 `json:"hash,omitempty"` } func (h *MessageHeader) Hash() *Bytes32 { diff --git a/pkg/fftypes/transport_wrapper.go b/pkg/fftypes/transport_wrapper.go index 5146b05013..3d989d0c71 100644 --- a/pkg/fftypes/transport_wrapper.go +++ b/pkg/fftypes/transport_wrapper.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -31,3 +31,36 @@ type TransportWrapper struct { Batch *Batch `json:"batch,omitempty"` Group *Group `json:"group,omitempty"` } + +// Manifest lists the contents of the transmission in a Manifest, which can be compared with +// a signed receipt provided back by the DX plugin +func (tw *TransportWrapper) Manifest() *Manifest { + if tw.Type == TransportPayloadTypeBatch { + return tw.Batch.Manifest() + } else if tw.Type == TransportPayloadTypeMessage { + tm := &Manifest{ + Messages: []MessageRef{}, + Data: make([]DataRef, len(tw.Data)), + } + if tw.Message != nil { + tm.Messages = []MessageRef{ + { + ID: tw.Message.Header.ID, + Hash: tw.Message.Hash, + }, + } + } + for i, d := range tw.Data { + tm.Data[i].ID = d.ID + tm.Data[i].Hash = d.Hash + } + return tm + } + return nil +} + +type TransportStatusUpdate struct { + Error string `json:"error,omitempty"` + Manifest string `json:"manifest,omitempty"` + Info string `json:"info,omitempty"` +} diff --git a/pkg/fftypes/transport_wrapper_test.go b/pkg/fftypes/transport_wrapper_test.go new file mode 100644 index 0000000000..1520c5a662 --- /dev/null +++ b/pkg/fftypes/transport_wrapper_test.go @@ -0,0 +1,92 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fftypes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBatchManifest(t *testing.T) { + + tw := TransportWrapper{ + Type: TransportPayloadTypeBatch, + Batch: &Batch{ + Payload: BatchPayload{ + Messages: []*Message{ + {Header: MessageHeader{ID: NewUUID()}, Hash: NewRandB32()}, + {Header: MessageHeader{ID: NewUUID()}, Hash: NewRandB32()}, + }, + Data: []*Data{ + {ID: NewUUID(), Hash: NewRandB32()}, + {ID: NewUUID(), Hash: NewRandB32()}, + }, + }, + }, + } + tm := tw.Manifest() + assert.Equal(t, 2, len(tm.Messages)) + assert.Equal(t, tw.Batch.Payload.Messages[0].Header.ID.String(), tm.Messages[0].ID.String()) + assert.Equal(t, tw.Batch.Payload.Messages[1].Header.ID.String(), tm.Messages[1].ID.String()) + assert.Equal(t, tw.Batch.Payload.Messages[0].Hash.String(), tm.Messages[0].Hash.String()) + assert.Equal(t, tw.Batch.Payload.Messages[1].Hash.String(), tm.Messages[1].Hash.String()) + assert.Equal(t, 2, len(tm.Data)) + assert.Equal(t, tw.Batch.Payload.Data[0].ID.String(), tm.Data[0].ID.String()) + assert.Equal(t, tw.Batch.Payload.Data[1].ID.String(), tm.Data[1].ID.String()) + assert.Equal(t, tw.Batch.Payload.Data[0].Hash.String(), tm.Data[0].Hash.String()) + assert.Equal(t, tw.Batch.Payload.Data[1].Hash.String(), tm.Data[1].Hash.String()) + +} + +func TestSingleMessageManifest(t *testing.T) { + + tw := TransportWrapper{ + Type: TransportPayloadTypeMessage, + Message: &Message{Header: MessageHeader{ID: NewUUID()}, Hash: NewRandB32()}, + Data: []*Data{ + {ID: NewUUID(), Hash: NewRandB32()}, + {ID: NewUUID(), Hash: NewRandB32()}, + }, + } + tm := tw.Manifest() + assert.Equal(t, 1, len(tm.Messages)) + assert.Equal(t, tw.Message.Header.ID.String(), tm.Messages[0].ID.String()) + assert.Equal(t, tw.Message.Hash.String(), tm.Messages[0].Hash.String()) + assert.Equal(t, 2, len(tm.Data)) + assert.Equal(t, tw.Data[0].ID.String(), tm.Data[0].ID.String()) + assert.Equal(t, tw.Data[1].ID.String(), tm.Data[1].ID.String()) + assert.Equal(t, tw.Data[0].Hash.String(), tm.Data[0].Hash.String()) + assert.Equal(t, tw.Data[1].Hash.String(), tm.Data[1].Hash.String()) + +} + +func TestUnknownManifest(t *testing.T) { + + tw := TransportWrapper{} + assert.Nil(t, tw.Manifest()) + +} + +func TestNillBatchManifest(t *testing.T) { + + tw := TransportWrapper{ + Type: TransportPayloadTypeBatch, + } + assert.Nil(t, tw.Manifest()) + +} From e08696a388256c13d694bdd4b2b7e83c7589f7ba Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Tue, 11 Jan 2022 21:49:35 -0500 Subject: [PATCH 16/21] Tidy up tests Signed-off-by: Peter Broadhurst --- internal/dataexchange/dxhttps/config.go | 2 +- internal/orchestrator/bound_callbacks_test.go | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/dataexchange/dxhttps/config.go b/internal/dataexchange/dxhttps/config.go index a0971dbb56..6d200122ea 100644 --- a/internal/dataexchange/dxhttps/config.go +++ b/internal/dataexchange/dxhttps/config.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // diff --git a/internal/orchestrator/bound_callbacks_test.go b/internal/orchestrator/bound_callbacks_test.go index 666b4c14bc..e066d57c06 100644 --- a/internal/orchestrator/bound_callbacks_test.go +++ b/internal/orchestrator/bound_callbacks_test.go @@ -28,6 +28,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" "github.com/hyperledger/firefly/pkg/tokens" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" ) func TestBoundCallbacks(t *testing.T) { @@ -56,7 +57,7 @@ func TestBoundCallbacks(t *testing.T) { err = bc.TokenOpUpdate(mti, opID, fftypes.OpStatusFailed, "error info", info) assert.EqualError(t, err, "pop") - mei.On("TransferResult", mdx, "tracking12345", fftypes.OpStatusFailed, "error info", info).Return(fmt.Errorf("pop")) + mei.On("TransferResult", mdx, "tracking12345", fftypes.OpStatusFailed, mock.Anything).Return(fmt.Errorf("pop")) err = bc.TransferResult("tracking12345", fftypes.OpStatusFailed, fftypes.TransportStatusUpdate{ Error: "error info", Info: info.String(), }) @@ -66,7 +67,7 @@ func TestBoundCallbacks(t *testing.T) { err = bc.BLOBReceived("peer1", *hash, 12345, "ns1/id1") assert.EqualError(t, err, "pop") - mei.On("MessageReceived", mdx, "peer1", []byte{}).Return(nil, fmt.Errorf("pop")) + mei.On("MessageReceived", mdx, "peer1", []byte{}).Return("manifest data", fmt.Errorf("pop")) _, err = bc.MessageReceived("peer1", []byte{}) assert.EqualError(t, err, "pop") From f53d90442dce3e157110d15c588e80a997f14f0c Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Tue, 11 Jan 2022 23:31:01 -0500 Subject: [PATCH 17/21] Move new JSONAny columns to TEXT too Signed-off-by: Peter Broadhurst --- db/migrations/postgres/000050_create_ffi_methods_table.up.sql | 4 ++-- db/migrations/postgres/000051_create_ffi_events_table.up.sql | 2 +- .../postgres/000052_create_contractapis_table.up.sql | 4 ++-- .../postgres/000053_create_contractsubscriptions_table.up.sql | 4 ++-- .../postgres/000054_create_contractevents_table.up.sql | 4 ++-- db/migrations/sqlite/000050_create_ffi_methods_table.up.sql | 4 ++-- db/migrations/sqlite/000051_create_ffi_events_table.up.sql | 2 +- db/migrations/sqlite/000052_create_contractapis_table.up.sql | 4 ++-- .../sqlite/000053_create_contractsubscriptions_table.up.sql | 4 ++-- .../sqlite/000054_create_contractevents_table.up.sql | 4 ++-- 10 files changed, 18 insertions(+), 18 deletions(-) diff --git a/db/migrations/postgres/000050_create_ffi_methods_table.up.sql b/db/migrations/postgres/000050_create_ffi_methods_table.up.sql index 13f5cd646b..55bd577311 100644 --- a/db/migrations/postgres/000050_create_ffi_methods_table.up.sql +++ b/db/migrations/postgres/000050_create_ffi_methods_table.up.sql @@ -7,8 +7,8 @@ CREATE TABLE ffimethods ( name VARCHAR(1024) NOT NULL, pathname VARCHAR(1024) NOT NULL, description TEXT NOT NULL, - params BYTEA NOT NULL, - returns BYTEA NOT NULL + params TEXT NOT NULL, + returns TEXT NOT NULL ); CREATE UNIQUE INDEX ffimethods_pathname ON ffimethods(interface_id,pathname); diff --git a/db/migrations/postgres/000051_create_ffi_events_table.up.sql b/db/migrations/postgres/000051_create_ffi_events_table.up.sql index f0d1dd1328..88ce5b8597 100644 --- a/db/migrations/postgres/000051_create_ffi_events_table.up.sql +++ b/db/migrations/postgres/000051_create_ffi_events_table.up.sql @@ -7,7 +7,7 @@ CREATE TABLE ffievents ( name VARCHAR(1024) NOT NULL, pathname VARCHAR(1024) NOT NULL, description TEXT NOT NULL, - params BYTEA NOT NULL + params TEXT NOT NULL ); CREATE UNIQUE INDEX ffievents_pathname ON ffievents(interface_id,pathname); diff --git a/db/migrations/postgres/000052_create_contractapis_table.up.sql b/db/migrations/postgres/000052_create_contractapis_table.up.sql index 1153b1c5b9..c5ecd8e218 100644 --- a/db/migrations/postgres/000052_create_contractapis_table.up.sql +++ b/db/migrations/postgres/000052_create_contractapis_table.up.sql @@ -3,8 +3,8 @@ CREATE TABLE contractapis ( seq SERIAL PRIMARY KEY, id UUID NOT NULL, interface_id UUID NOT NULL, - ledger BYTEA, - location BYTEA, + ledger TEXT, + location TEXT, name VARCHAR(64) NOT NULL, namespace VARCHAR(64) NOT NULL ); diff --git a/db/migrations/postgres/000053_create_contractsubscriptions_table.up.sql b/db/migrations/postgres/000053_create_contractsubscriptions_table.up.sql index a17d94b376..d8e0e7c391 100644 --- a/db/migrations/postgres/000053_create_contractsubscriptions_table.up.sql +++ b/db/migrations/postgres/000053_create_contractsubscriptions_table.up.sql @@ -3,11 +3,11 @@ CREATE TABLE contractsubscriptions ( seq SERIAL PRIMARY KEY, id UUID NOT NULL, interface_id UUID NULL, - event BYTEA NOT NULL, + event TEXT NOT NULL, namespace VARCHAR(64) NOT NULL, name VARCHAR(64) NULL, protocol_id VARCHAR(1024) NOT NULL, - location BYTEA NOT NULL, + location TEXT NOT NULL, created BIGINT NOT NULL ); diff --git a/db/migrations/postgres/000054_create_contractevents_table.up.sql b/db/migrations/postgres/000054_create_contractevents_table.up.sql index c329a570c0..c5a1f4f4ae 100644 --- a/db/migrations/postgres/000054_create_contractevents_table.up.sql +++ b/db/migrations/postgres/000054_create_contractevents_table.up.sql @@ -5,8 +5,8 @@ CREATE TABLE contractevents ( namespace VARCHAR(64) NOT NULL, name VARCHAR(1024) NOT NULL, subscription_id UUID NOT NULL, - outputs BYTEA, - info BYTEA, + outputs TEXT, + info TEXT, timestamp BIGINT NOT NULL ); CREATE UNIQUE INDEX contractevents_name ON contractevents(namespace,name); diff --git a/db/migrations/sqlite/000050_create_ffi_methods_table.up.sql b/db/migrations/sqlite/000050_create_ffi_methods_table.up.sql index bc1b3e3a5a..461c8c768b 100644 --- a/db/migrations/sqlite/000050_create_ffi_methods_table.up.sql +++ b/db/migrations/sqlite/000050_create_ffi_methods_table.up.sql @@ -6,8 +6,8 @@ CREATE TABLE ffimethods ( name VARCHAR(1024) NOT NULL, pathname VARCHAR(1024) NOT NULL, description TEXT NOT NULL, - params BYTEA NOT NULL, - returns BYTEA NOT NULL + params TEXT NOT NULL, + returns TEXT NOT NULL ); CREATE UNIQUE INDEX ffimethods_pathname ON ffimethods(interface_id,pathname); diff --git a/db/migrations/sqlite/000051_create_ffi_events_table.up.sql b/db/migrations/sqlite/000051_create_ffi_events_table.up.sql index 2d1bbfb4ca..51bdc8805f 100644 --- a/db/migrations/sqlite/000051_create_ffi_events_table.up.sql +++ b/db/migrations/sqlite/000051_create_ffi_events_table.up.sql @@ -6,7 +6,7 @@ CREATE TABLE ffievents ( name VARCHAR(1024) NOT NULL, pathname VARCHAR(1024) NOT NULL, description TEXT NOT NULL, - params BYTEA NOT NULL + params TEXT NOT NULL ); CREATE UNIQUE INDEX ffievents_pathname ON ffievents(interface_id,pathname); diff --git a/db/migrations/sqlite/000052_create_contractapis_table.up.sql b/db/migrations/sqlite/000052_create_contractapis_table.up.sql index 95a0b501cd..17edc25fa6 100644 --- a/db/migrations/sqlite/000052_create_contractapis_table.up.sql +++ b/db/migrations/sqlite/000052_create_contractapis_table.up.sql @@ -2,8 +2,8 @@ CREATE TABLE contractapis ( seq INTEGER PRIMARY KEY AUTOINCREMENT, id UUID NOT NULL, interface_id UUID NOT NULL, - ledger BYTEA, - location BYTEA, + ledger TEXT, + location TEXT, name VARCHAR(64) NOT NULL, namespace VARCHAR(64) NOT NULL, message_id UUID NOT NULL diff --git a/db/migrations/sqlite/000053_create_contractsubscriptions_table.up.sql b/db/migrations/sqlite/000053_create_contractsubscriptions_table.up.sql index cf40ce7060..e0c6448c75 100644 --- a/db/migrations/sqlite/000053_create_contractsubscriptions_table.up.sql +++ b/db/migrations/sqlite/000053_create_contractsubscriptions_table.up.sql @@ -2,11 +2,11 @@ CREATE TABLE contractsubscriptions ( seq INTEGER PRIMARY KEY AUTOINCREMENT, id UUID NOT NULL, interface_id UUID NULL, - event BYTEA NOT NULL, + event TEXT NOT NULL, namespace VARCHAR(64) NOT NULL, name VARCHAR(64) NULL, protocol_id VARCHAR(1024) NOT NULL, - location BYTEA NOT NULL, + location TEXT NOT NULL, created BIGINT NOT NULL ); diff --git a/db/migrations/sqlite/000054_create_contractevents_table.up.sql b/db/migrations/sqlite/000054_create_contractevents_table.up.sql index 97ead7ea56..d08726c24e 100644 --- a/db/migrations/sqlite/000054_create_contractevents_table.up.sql +++ b/db/migrations/sqlite/000054_create_contractevents_table.up.sql @@ -4,8 +4,8 @@ CREATE TABLE contractevents ( namespace VARCHAR(64) NOT NULL, name VARCHAR(1024) NOT NULL, subscription_id UUID NOT NULL, - outputs BYTEA, - info BYTEA, + outputs TEXT, + info TEXT, timestamp BIGINT NOT NULL ); From 87d0ddeb48637581b4ef0bb0369e8a23c2a38eed Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Tue, 11 Jan 2022 23:39:17 -0500 Subject: [PATCH 18/21] Fix gap in coverage Signed-off-by: Peter Broadhurst --- internal/apiserver/route_put_contract_api.go | 8 +++++++- internal/apiserver/route_put_contract_api_test.go | 4 ++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/internal/apiserver/route_put_contract_api.go b/internal/apiserver/route_put_contract_api.go index a27f9e872f..1d74921cb4 100644 --- a/internal/apiserver/route_put_contract_api.go +++ b/internal/apiserver/route_put_contract_api.go @@ -46,6 +46,12 @@ var putContractAPI = &oapispec.Route{ JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { waitConfirm := strings.EqualFold(r.QP["confirm"], "true") r.SuccessStatus = syncRetcode(waitConfirm) - return getOr(r.Ctx).Contracts().BroadcastContractAPI(r.Ctx, r.PP["ns"], r.Input.(*fftypes.ContractAPI), waitConfirm) + api := r.Input.(*fftypes.ContractAPI) + api.ID, err = fftypes.ParseUUID(r.Ctx, r.PP["id"]) + var res interface{} + if err == nil { + res, err = getOr(r.Ctx).Contracts().BroadcastContractAPI(r.Ctx, r.PP["ns"], api, waitConfirm) + } + return res, err }, } diff --git a/internal/apiserver/route_put_contract_api_test.go b/internal/apiserver/route_put_contract_api_test.go index a7cf267981..5809cf6fbe 100644 --- a/internal/apiserver/route_put_contract_api_test.go +++ b/internal/apiserver/route_put_contract_api_test.go @@ -35,7 +35,7 @@ func TestPutContractAPI(t *testing.T) { input := fftypes.Datatype{} var buf bytes.Buffer json.NewEncoder(&buf).Encode(&input) - req := httptest.NewRequest("POST", "/api/v1/namespaces/ns1/apis", &buf) + req := httptest.NewRequest("PUT", "/api/v1/namespaces/ns1/apis/99EEE458-037C-4C78-B66B-31E52F93D2E9", &buf) req.Header.Set("Content-Type", "application/json; charset=utf-8") res := httptest.NewRecorder() @@ -53,7 +53,7 @@ func TestPutContractAPISync(t *testing.T) { input := fftypes.Datatype{} var buf bytes.Buffer json.NewEncoder(&buf).Encode(&input) - req := httptest.NewRequest("POST", "/api/v1/namespaces/ns1/apis?confirm", &buf) + req := httptest.NewRequest("PUT", "/api/v1/namespaces/ns1/apis/99EEE458-037C-4C78-B66B-31E52F93D2E9?confirm", &buf) req.Header.Set("Content-Type", "application/json; charset=utf-8") res := httptest.NewRecorder() From 4478b71a89376a918e5caa7f16c99dc9e5b67a7d Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Wed, 12 Jan 2022 17:28:14 -0500 Subject: [PATCH 19/21] Restore normality to the space time continuum Signed-off-by: Peter Broadhurst --- db/migrations/postgres/000002_create_data_table.up.sql | 2 +- db/migrations/postgres/000004_create_batches_table.up.sql | 2 +- .../postgres/000010_create_subscriptions_table.up.sql | 2 +- db/migrations/postgres/000015_create_config_table.up.sql | 2 +- db/migrations/postgres/000050_create_ffi_methods_table.up.sql | 4 ++-- db/migrations/postgres/000051_create_ffi_events_table.up.sql | 2 +- .../postgres/000053_create_contractsubscriptions_table.up.sql | 4 ++-- db/migrations/sqlite/000002_create_data_table.up.sql | 2 +- db/migrations/sqlite/000010_create_subscriptions_table.up.sql | 2 +- db/migrations/sqlite/000015_create_config_table.up.sql | 2 +- db/migrations/sqlite/000050_create_ffi_methods_table.up.sql | 4 ++-- db/migrations/sqlite/000051_create_ffi_events_table.up.sql | 2 +- .../sqlite/000053_create_contractsubscriptions_table.up.sql | 4 ++-- 13 files changed, 17 insertions(+), 17 deletions(-) diff --git a/db/migrations/postgres/000002_create_data_table.up.sql b/db/migrations/postgres/000002_create_data_table.up.sql index d761c23a24..a6782e9e67 100644 --- a/db/migrations/postgres/000002_create_data_table.up.sql +++ b/db/migrations/postgres/000002_create_data_table.up.sql @@ -8,7 +8,7 @@ CREATE TABLE data ( datatype_version VARCHAR(64) NOT NULL, hash CHAR(64) NOT NULL, created BIGINT NOT NULL, - value TEXT NOT NULL, + value TEXT NOT NULL, blobstore BOOLEAN NOT NULL ); CREATE UNIQUE INDEX data_id ON data(id); diff --git a/db/migrations/postgres/000004_create_batches_table.up.sql b/db/migrations/postgres/000004_create_batches_table.up.sql index 84973cf5f2..b18c52f2f5 100644 --- a/db/migrations/postgres/000004_create_batches_table.up.sql +++ b/db/migrations/postgres/000004_create_batches_table.up.sql @@ -8,7 +8,7 @@ CREATE TABLE batches ( group_hash CHAR(64), hash CHAR(64), created BIGINT NOT NULL, - payload TEXT NOT NULL, + payload TEXT NOT NULL, payload_ref CHAR(64), confirmed BIGINT, tx_type VARCHAR(64) NOT NULL, diff --git a/db/migrations/postgres/000010_create_subscriptions_table.up.sql b/db/migrations/postgres/000010_create_subscriptions_table.up.sql index 96af8c8509..a6f7f65caf 100644 --- a/db/migrations/postgres/000010_create_subscriptions_table.up.sql +++ b/db/migrations/postgres/000010_create_subscriptions_table.up.sql @@ -9,7 +9,7 @@ CREATE TABLE subscriptions ( filter_topics VARCHAR(256) NOT NULL, filter_tag VARCHAR(256) NOT NULL, filter_group VARCHAR(256) NOT NULL, - options TEXT NOT NULL, + options TEXT NOT NULL, created BIGINT NOT NULL ); diff --git a/db/migrations/postgres/000015_create_config_table.up.sql b/db/migrations/postgres/000015_create_config_table.up.sql index 663928c435..9a39470550 100644 --- a/db/migrations/postgres/000015_create_config_table.up.sql +++ b/db/migrations/postgres/000015_create_config_table.up.sql @@ -2,7 +2,7 @@ BEGIN; CREATE TABLE config ( seq SERIAL PRIMARY KEY, config_key VARCHAR(512) NOT NULL, - config_value TEXT NOT NULL + config_value TEXT NOT NULL ); CREATE UNIQUE INDEX config_sequence ON config(seq); CREATE UNIQUE INDEX config_config_key ON config(config_key); diff --git a/db/migrations/postgres/000050_create_ffi_methods_table.up.sql b/db/migrations/postgres/000050_create_ffi_methods_table.up.sql index 55bd577311..37079afc81 100644 --- a/db/migrations/postgres/000050_create_ffi_methods_table.up.sql +++ b/db/migrations/postgres/000050_create_ffi_methods_table.up.sql @@ -7,8 +7,8 @@ CREATE TABLE ffimethods ( name VARCHAR(1024) NOT NULL, pathname VARCHAR(1024) NOT NULL, description TEXT NOT NULL, - params TEXT NOT NULL, - returns TEXT NOT NULL + params TEXT NOT NULL, + returns TEXT NOT NULL ); CREATE UNIQUE INDEX ffimethods_pathname ON ffimethods(interface_id,pathname); diff --git a/db/migrations/postgres/000051_create_ffi_events_table.up.sql b/db/migrations/postgres/000051_create_ffi_events_table.up.sql index 88ce5b8597..06ddbeea13 100644 --- a/db/migrations/postgres/000051_create_ffi_events_table.up.sql +++ b/db/migrations/postgres/000051_create_ffi_events_table.up.sql @@ -7,7 +7,7 @@ CREATE TABLE ffievents ( name VARCHAR(1024) NOT NULL, pathname VARCHAR(1024) NOT NULL, description TEXT NOT NULL, - params TEXT NOT NULL + params TEXT NOT NULL ); CREATE UNIQUE INDEX ffievents_pathname ON ffievents(interface_id,pathname); diff --git a/db/migrations/postgres/000053_create_contractsubscriptions_table.up.sql b/db/migrations/postgres/000053_create_contractsubscriptions_table.up.sql index d8e0e7c391..d719bebbc6 100644 --- a/db/migrations/postgres/000053_create_contractsubscriptions_table.up.sql +++ b/db/migrations/postgres/000053_create_contractsubscriptions_table.up.sql @@ -3,11 +3,11 @@ CREATE TABLE contractsubscriptions ( seq SERIAL PRIMARY KEY, id UUID NOT NULL, interface_id UUID NULL, - event TEXT NOT NULL, + event TEXT NOT NULL, namespace VARCHAR(64) NOT NULL, name VARCHAR(64) NULL, protocol_id VARCHAR(1024) NOT NULL, - location TEXT NOT NULL, + location TEXT NOT NULL, created BIGINT NOT NULL ); diff --git a/db/migrations/sqlite/000002_create_data_table.up.sql b/db/migrations/sqlite/000002_create_data_table.up.sql index f420b15d4c..d3c0066256 100644 --- a/db/migrations/sqlite/000002_create_data_table.up.sql +++ b/db/migrations/sqlite/000002_create_data_table.up.sql @@ -7,7 +7,7 @@ CREATE TABLE data ( datatype_version VARCHAR(64) NOT NULL, hash CHAR(64) NOT NULL, created BIGINT NOT NULL, - value TEXT NOT NULL, + value TEXT NOT NULL, blob_hash CHAR(64), blob_public VARCHAR(1024) ); diff --git a/db/migrations/sqlite/000010_create_subscriptions_table.up.sql b/db/migrations/sqlite/000010_create_subscriptions_table.up.sql index e017bbd4d4..bfef71a662 100644 --- a/db/migrations/sqlite/000010_create_subscriptions_table.up.sql +++ b/db/migrations/sqlite/000010_create_subscriptions_table.up.sql @@ -8,7 +8,7 @@ CREATE TABLE subscriptions ( filter_topics VARCHAR(256) NOT NULL, filter_tag VARCHAR(256) NOT NULL, filter_group VARCHAR(256) NOT NULL, - options TEXT NOT NULL, + options TEXT NOT NULL, created BIGINT NOT NULL ); diff --git a/db/migrations/sqlite/000015_create_config_table.up.sql b/db/migrations/sqlite/000015_create_config_table.up.sql index 1037cba812..3e55ea938e 100644 --- a/db/migrations/sqlite/000015_create_config_table.up.sql +++ b/db/migrations/sqlite/000015_create_config_table.up.sql @@ -1,7 +1,7 @@ CREATE TABLE config ( seq INTEGER PRIMARY KEY AUTOINCREMENT, config_key VARCHAR(512) NOT NULL, - config_value TEXT NOT NULL + config_value TEXT NOT NULL ); CREATE UNIQUE INDEX config_sequence ON config(seq); CREATE UNIQUE INDEX config_config_key ON config(config_key); diff --git a/db/migrations/sqlite/000050_create_ffi_methods_table.up.sql b/db/migrations/sqlite/000050_create_ffi_methods_table.up.sql index 461c8c768b..3d435ced02 100644 --- a/db/migrations/sqlite/000050_create_ffi_methods_table.up.sql +++ b/db/migrations/sqlite/000050_create_ffi_methods_table.up.sql @@ -6,8 +6,8 @@ CREATE TABLE ffimethods ( name VARCHAR(1024) NOT NULL, pathname VARCHAR(1024) NOT NULL, description TEXT NOT NULL, - params TEXT NOT NULL, - returns TEXT NOT NULL + params TEXT NOT NULL, + returns TEXT NOT NULL ); CREATE UNIQUE INDEX ffimethods_pathname ON ffimethods(interface_id,pathname); diff --git a/db/migrations/sqlite/000051_create_ffi_events_table.up.sql b/db/migrations/sqlite/000051_create_ffi_events_table.up.sql index 51bdc8805f..c48bb1c35d 100644 --- a/db/migrations/sqlite/000051_create_ffi_events_table.up.sql +++ b/db/migrations/sqlite/000051_create_ffi_events_table.up.sql @@ -6,7 +6,7 @@ CREATE TABLE ffievents ( name VARCHAR(1024) NOT NULL, pathname VARCHAR(1024) NOT NULL, description TEXT NOT NULL, - params TEXT NOT NULL + params TEXT NOT NULL ); CREATE UNIQUE INDEX ffievents_pathname ON ffievents(interface_id,pathname); diff --git a/db/migrations/sqlite/000053_create_contractsubscriptions_table.up.sql b/db/migrations/sqlite/000053_create_contractsubscriptions_table.up.sql index e0c6448c75..d4e425a3a4 100644 --- a/db/migrations/sqlite/000053_create_contractsubscriptions_table.up.sql +++ b/db/migrations/sqlite/000053_create_contractsubscriptions_table.up.sql @@ -2,11 +2,11 @@ CREATE TABLE contractsubscriptions ( seq INTEGER PRIMARY KEY AUTOINCREMENT, id UUID NOT NULL, interface_id UUID NULL, - event TEXT NOT NULL, + event TEXT NOT NULL, namespace VARCHAR(64) NOT NULL, name VARCHAR(64) NULL, protocol_id VARCHAR(1024) NOT NULL, - location TEXT NOT NULL, + location TEXT NOT NULL, created BIGINT NOT NULL ); From 0942cd94ec24beb09946c66a44ed804bd500ca8e Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Thu, 13 Jan 2022 14:26:38 -0500 Subject: [PATCH 20/21] Fix comments Signed-off-by: Peter Broadhurst --- internal/batch/batch_processor_test.go | 8 ++++---- internal/config/config.go | 2 +- internal/database/sqlcommon/provider.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/batch/batch_processor_test.go b/internal/batch/batch_processor_test.go index 1fdc95aac1..181c8cf524 100644 --- a/internal/batch/batch_processor_test.go +++ b/internal/batch/batch_processor_test.go @@ -77,7 +77,7 @@ func TestUnfilledBatch(t *testing.T) { mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpdateBatch", mock.Anything, mock.Anything).Return(nil) - // Generate the work the work + // Generate the work work := make([]*batchWork, 5) for i := 0; i < len(work); i++ { msgid := fftypes.NewUUID() @@ -129,7 +129,7 @@ func TestBatchSizeOverflow(t *testing.T) { mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpdateBatch", mock.Anything, mock.Anything).Return(nil) - // Generate the work the work + // Generate the work work := make([]*batchWork, 2) for i := 0; i < 2; i++ { msgid := fftypes.NewUUID() @@ -185,7 +185,7 @@ func TestFilledBatchSlowPersistence(t *testing.T) { mdi.On("UpdateMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) - // Generate the work the work + // Generate the work work := make([]*batchWork, 10) for i := 0; i < 10; i++ { msgid := fftypes.NewUUID() @@ -259,7 +259,7 @@ func TestCloseToUnblockUpsertBatch(t *testing.T) { <-waitForCall } - // Generate the work the work + // Generate the work msgid := fftypes.NewUUID() work := &batchWork{ msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgid}}, diff --git a/internal/config/config.go b/internal/config/config.go index 05d1bc0cf0..c660d8529c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -78,7 +78,7 @@ var ( PrivateMessagingBatchAgentTimeout = rootKey("privatemessaging.batch.agentTimeout") // PrivateMessagingBatchSize is the maximum size of a batch for broadcast messages PrivateMessagingBatchSize = rootKey("privatemessaging.batch.size") - // PrivateMessagingBatchPayloadLimit is the maximum payload size of a batch for broadcast messages + // PrivateMessagingBatchPayloadLimit is the maximum payload size of a private message data exchange payload PrivateMessagingBatchPayloadLimit = rootKey("privatemessaging.batch.payloadLimit") // PrivateMessagingBatchTimeout is the timeout to wait for a batch to fill, before sending PrivateMessagingBatchTimeout = rootKey("privatemessaging.batch.timeout") diff --git a/internal/database/sqlcommon/provider.go b/internal/database/sqlcommon/provider.go index ac6e14b05a..092afc4eaa 100644 --- a/internal/database/sqlcommon/provider.go +++ b/internal/database/sqlcommon/provider.go @@ -54,7 +54,7 @@ type Provider interface { // GetDriver returns the driver implementation GetMigrationDriver(*sql.DB) (migratedb.Driver, error) - // Features returns fields + // Features returns database specific configuration switches Features() SQLFeatures // UpdateInsertForSequenceReturn updates the INSERT query for returning the Sequence, and returns whether it needs to be run as a query to return the Sequence field From 87ab9adf0bd4662292b56573cdda07e08ab66953 Mon Sep 17 00:00:00 2001 From: Andrew Richardson Date: Thu, 13 Jan 2022 17:19:17 -0500 Subject: [PATCH 21/21] Fix typo in dx callbacks unit test Signed-off-by: Andrew Richardson --- internal/events/dx_callbacks_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/events/dx_callbacks_test.go b/internal/events/dx_callbacks_test.go index 6d440fdc61..db2d2c3070 100644 --- a/internal/events/dx_callbacks_test.go +++ b/internal/events/dx_callbacks_test.go @@ -516,7 +516,7 @@ func TestTransferResultManifestMismatch(t *testing.T) { ID: id, BackendID: "tracking12345", Input: fftypes.JSONObject{ - "maniest": "Bob", + "manifest": "Bob", }, }, }, nil, nil)