From 1727abd86ee400fcc92ec36b954e8d5865b92ca8 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Thu, 3 Mar 2022 09:19:08 -0500 Subject: [PATCH 01/11] Initial data structures for batch manifest storage only change Signed-off-by: Peter Broadhurst --- internal/batch/batch_manager_test.go | 2 + internal/batch/batch_processor.go | 19 +++++ internal/batch/batch_processor_test.go | 78 +++++++++++++++++++ internal/database/sqlcommon/batch_sql.go | 12 +-- internal/database/sqlcommon/batch_sql_test.go | 18 ++--- pkg/fftypes/batch.go | 50 ++++++++++-- pkg/fftypes/data.go | 15 ++++ pkg/fftypes/manifest.go | 30 ------- pkg/fftypes/manifest_test.go | 39 ---------- pkg/fftypes/message.go | 12 +++ pkg/fftypes/message_test.go | 17 ++++ 11 files changed, 200 insertions(+), 92 deletions(-) delete mode 100644 pkg/fftypes/manifest.go delete mode 100644 pkg/fftypes/manifest_test.go diff --git a/internal/batch/batch_manager_test.go b/internal/batch/batch_manager_test.go index a37ac73801..c9566e862d 100644 --- a/internal/batch/batch_manager_test.go +++ b/internal/batch/batch_manager_test.go @@ -102,6 +102,7 @@ func TestE2EDispatchBroadcast(t *testing.T) { mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) + mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) // pins rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything, mock.Anything).Return(nil) rag.RunFn = func(a mock.Arguments) { ctx := a.Get(0).(context.Context) @@ -216,6 +217,7 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{data}, true, nil) mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) + mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) // pins mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything, mock.Anything).Return(nil) diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 4d17900ff9..73e60f88ad 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -434,8 +434,14 @@ func (bp *batchProcessor) maskContext(ctx context.Context, msg *fftypes.Message, func (bp *batchProcessor) maskContexts(ctx context.Context, batch *fftypes.Batch) ([]*fftypes.Bytes32, error) { // Calculate the sequence hashes + pinsAssigned := false contextsOrPins := make([]*fftypes.Bytes32, 0, len(batch.Payload.Messages)) for _, msg := range batch.Payload.Messages { + if len(msg.Pins) > 0 { + // We have already allocated pins to this message, we cannot re-allocate. + log.L(ctx).Debugf("Message %s already has %d pins allocated", msg.Header.ID, len(msg.Pins)) + continue + } for _, topic := range msg.Header.Topics { contextOrPin, err := bp.maskContext(ctx, msg, topic) if err != nil { @@ -444,6 +450,19 @@ func (bp *batchProcessor) maskContexts(ctx context.Context, batch *fftypes.Batch contextsOrPins = append(contextsOrPins, contextOrPin) if msg.Header.Group != nil { msg.Pins = append(msg.Pins, contextOrPin.String()) + pinsAssigned = true + } + } + if pinsAssigned { + // It's important we update the message pins at this phase, as we have "spent" a nonce + // on this topic from the database. So this message has grabbed a slot in our queue. + // If we fail the dispatch, and redo the batch sealing process, we must not allocate + // a second nonce to it (and as such modifiy the batch payload). + err := bp.database.UpdateMessage(ctx, msg.Header.ID, + database.MessageQueryFactory.NewUpdate(ctx).Set("pins", msg.Pins), + ) + if err != nil { + return nil, err } } } diff --git a/internal/batch/batch_processor_test.go b/internal/batch/batch_processor_test.go index 63b3cd89ff..6f7a6a0d2c 100644 --- a/internal/batch/batch_processor_test.go +++ b/internal/batch/batch_processor_test.go @@ -331,3 +331,81 @@ func TestMarkMessageDispatchedUnpinnedOK(t *testing.T) { mdi.AssertExpectations(t) } + +func TestMaskContextsDuplicate(t *testing.T) { + log.SetLevel("debug") + config.Reset() + + dispatched := make(chan *fftypes.Batch) + mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + dispatched <- b + return nil + }) + + mdi.On("UpsertNonceNext", mock.Anything, mock.Anything).Return(nil).Once() + mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + + batch := &fftypes.Batch{ + Payload: fftypes.BatchPayload{ + Messages: []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.MessageTypePrivate, + Group: fftypes.NewRandB32(), + Topics: fftypes.FFStringArray{"topic1"}, + }, + }, + }, + }, + } + + _, err := bp.maskContexts(bp.ctx, batch) + assert.NoError(t, err) + + // 2nd time no DB ops + _, err = bp.maskContexts(bp.ctx, batch) + assert.NoError(t, err) + + bp.cancelCtx() + <-bp.done + + mdi.AssertExpectations(t) +} + +func TestMaskContextsUpdataMessageFail(t *testing.T) { + log.SetLevel("debug") + config.Reset() + + dispatched := make(chan *fftypes.Batch) + mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + dispatched <- b + return nil + }) + + mdi.On("UpsertNonceNext", mock.Anything, mock.Anything).Return(nil).Once() + mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")).Once() + + batch := &fftypes.Batch{ + Payload: fftypes.BatchPayload{ + Messages: []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.MessageTypePrivate, + Group: fftypes.NewRandB32(), + Topics: fftypes.FFStringArray{"topic1"}, + }, + }, + }, + }, + } + + _, err := bp.maskContexts(bp.ctx, batch) + assert.Regexp(t, "pop", err) + + bp.cancelCtx() + <-bp.done + + mdi.AssertExpectations(t) +} diff --git a/internal/database/sqlcommon/batch_sql.go b/internal/database/sqlcommon/batch_sql.go index d4f72d4caf..6f2fefda82 100644 --- a/internal/database/sqlcommon/batch_sql.go +++ b/internal/database/sqlcommon/batch_sql.go @@ -54,7 +54,7 @@ var ( } ) -func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.Batch) (err error) { +func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.BatchPersisted) (err error) { ctx, tx, autoCommit, err := s.beginOrUseTx(ctx) if err != nil { return err @@ -140,8 +140,8 @@ func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.Batch) (err return s.commitTx(ctx, tx, autoCommit) } -func (s *SQLCommon) batchResult(ctx context.Context, row *sql.Rows) (*fftypes.Batch, error) { - var batch fftypes.Batch +func (s *SQLCommon) batchResult(ctx context.Context, row *sql.Rows) (*fftypes.BatchPersisted, error) { + var batch fftypes.BatchPersisted err := row.Scan( &batch.ID, &batch.Type, @@ -164,7 +164,7 @@ func (s *SQLCommon) batchResult(ctx context.Context, row *sql.Rows) (*fftypes.Ba return &batch, nil } -func (s *SQLCommon) GetBatchByID(ctx context.Context, id *fftypes.UUID) (message *fftypes.Batch, err error) { +func (s *SQLCommon) GetBatchByID(ctx context.Context, id *fftypes.UUID) (message *fftypes.BatchPersisted, err error) { rows, _, err := s.query(ctx, sq.Select(batchColumns...). @@ -189,7 +189,7 @@ func (s *SQLCommon) GetBatchByID(ctx context.Context, id *fftypes.UUID) (message return batch, nil } -func (s *SQLCommon) GetBatches(ctx context.Context, filter database.Filter) (message []*fftypes.Batch, res *database.FilterResult, err error) { +func (s *SQLCommon) GetBatches(ctx context.Context, filter database.Filter) (message []*fftypes.BatchPersisted, res *database.FilterResult, err error) { query, fop, fi, err := s.filterSelect(ctx, "", sq.Select(batchColumns...).From("batches"), filter, batchFilterFieldMap, []interface{}{"sequence"}) if err != nil { @@ -202,7 +202,7 @@ func (s *SQLCommon) GetBatches(ctx context.Context, filter database.Filter) (mes } defer rows.Close() - batches := []*fftypes.Batch{} + batches := []*fftypes.BatchPersisted{} for rows.Next() { batch, err := s.batchResult(ctx, rows) if err != nil { diff --git a/internal/database/sqlcommon/batch_sql_test.go b/internal/database/sqlcommon/batch_sql_test.go index c814f5d758..2d32766366 100644 --- a/internal/database/sqlcommon/batch_sql_test.go +++ b/internal/database/sqlcommon/batch_sql_test.go @@ -38,7 +38,7 @@ func TestBatch2EWithDB(t *testing.T) { // Create a new batch entry batchID := fftypes.NewUUID() msgID1 := fftypes.NewUUID() - batch := &fftypes.Batch{ + batch := &fftypes.BatchPersisted{ ID: batchID, Type: fftypes.MessageTypeBroadcast, SignerRef: fftypes.SignerRef{ @@ -49,7 +49,7 @@ func TestBatch2EWithDB(t *testing.T) { Hash: fftypes.NewRandB32(), Created: fftypes.Now(), Node: fftypes.NewUUID(), - Payload: fftypes.BatchPayload{ + Payload: fftypes.BatchPersistedPayload{ Messages: []*fftypes.Message{ {Header: fftypes.MessageHeader{ID: msgID1}}, }, @@ -78,7 +78,7 @@ func TestBatch2EWithDB(t *testing.T) { txid := fftypes.NewUUID() msgID2 := fftypes.NewUUID() payloadRef := "" - batchUpdated := &fftypes.Batch{ + batchUpdated := &fftypes.BatchPersisted{ ID: batchID, Type: fftypes.MessageTypeBroadcast, SignerRef: fftypes.SignerRef{ @@ -89,7 +89,7 @@ func TestBatch2EWithDB(t *testing.T) { Hash: fftypes.NewRandB32(), Created: fftypes.Now(), Node: fftypes.NewUUID(), - Payload: fftypes.BatchPayload{ + Payload: fftypes.BatchPersistedPayload{ TX: fftypes.TransactionRef{ ID: txid, Type: fftypes.TransactionTypeBatchPin, @@ -164,7 +164,7 @@ func TestBatch2EWithDB(t *testing.T) { func TestUpsertBatchFailBegin(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertBatch(context.Background(), &fftypes.Batch{}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{}) assert.Regexp(t, "FF10114", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -175,7 +175,7 @@ func TestUpsertBatchFailSelect(t *testing.T) { mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() batchID := fftypes.NewUUID() - err := s.UpsertBatch(context.Background(), &fftypes.Batch{ID: batchID}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{ID: batchID}) assert.Regexp(t, "FF10115", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -187,7 +187,7 @@ func TestUpsertBatchFailInsert(t *testing.T) { mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() batchID := fftypes.NewUUID() - err := s.UpsertBatch(context.Background(), &fftypes.Batch{ID: batchID}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{ID: batchID}) assert.Regexp(t, "FF10116", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -200,7 +200,7 @@ func TestUpsertBatchFailUpdate(t *testing.T) { mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"hash"}).AddRow(hash)) mock.ExpectExec("UPDATE .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() - err := s.UpsertBatch(context.Background(), &fftypes.Batch{ID: batchID, Hash: hash}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{ID: batchID, Hash: hash}) assert.Regexp(t, "FF10117", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -212,7 +212,7 @@ func TestUpsertBatchFailCommit(t *testing.T) { mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"id"})) mock.ExpectExec("INSERT .*").WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertBatch(context.Background(), &fftypes.Batch{ID: batchID}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{ID: batchID}) assert.Regexp(t, "FF10119", err) assert.NoError(t, mock.ExpectationsWereMet()) } diff --git a/pkg/fftypes/batch.go b/pkg/fftypes/batch.go index db5bbc62a0..65ccdd13ad 100644 --- a/pkg/fftypes/batch.go +++ b/pkg/fftypes/batch.go @@ -25,21 +25,55 @@ import ( "github.com/hyperledger/firefly/internal/i18n" ) -type Batch struct { +// BatchHeader is the common fields between the serialized batch, and the batch manifest +type BatchHeader struct { ID *UUID `json:"id"` Namespace string `json:"namespace"` Type MessageType `json:"type"` Node *UUID `json:"node,omitempty"` SignerRef - Group *Bytes32 `jdon:"group,omitempty"` - Hash *Bytes32 `json:"hash"` - Created *FFTime `json:"created"` - Confirmed *FFTime `json:"confirmed"` + Group *Bytes32 `jdon:"group,omitempty"` + Hash *Bytes32 `json:"hash"` +} + +// BatchManifest is all we need to persist to be able to reconstitute +// an identical batch. It can be generated from a received batch to +// confirm you have received an identical batch to that sent +type BatchManifest struct { + BatchHeader + TX TransactionRef `json:"tx"` + Messages []MessageRef `json:"messages"` + Data []DataRef `json:"data"` +} + +// Batch is the full payload object used in-flight. +type Batch struct { + BatchHeader Payload BatchPayload `json:"payload"` PayloadRef string `json:"payloadRef,omitempty"` - Blobs []*Bytes32 `json:"blobs,omitempty"` // only used in-flight + Blobs []*Bytes32 `json:"blobs,omitempty"` +} + +// BatchPersisted is the structure written to the database +type BatchPersisted struct { + BatchManifest + Created *FFTime `json:"created"` + Confirmed *FFTime `json:"confirmed"` +} + +func (mf *BatchManifest) String() string { + b, _ := json.Marshal(&mf) + return string(b) } +// BatchPayload contains the full JSON of the messages and data, but +// importantly only the immutable parts of the messages/data. +// In v0.13 and earlier, we used the whole of this payload object to +// form the hash of the in-flight batch. Subsequent to that we only +// calculate the hash of the manifest, as that contains the hashes +// of all the messages and data (thus minimizing the overhead of +// calculating the hash). +// - See Message.BatchMessage() and Data.BatchData() type BatchPayload struct { TX TransactionRef `json:"tx"` Messages []*Message `json:"messages"` @@ -78,11 +112,11 @@ func (ma *BatchPayload) Scan(src interface{}) error { } -func (b *Batch) Manifest() *Manifest { +func (b *Batch) Manifest() *BatchManifest { if b == nil { return nil } - tm := &Manifest{ + tm := &BatchManifest{ Messages: make([]MessageRef, len(b.Payload.Messages)), Data: make([]DataRef, len(b.Payload.Data)), } diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index 7834a285d7..070c311ce1 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -52,6 +52,21 @@ type Data struct { ValueSize int64 `json:"-"` // Used internally for message size calcuation, without full payload retrieval } +// BatchMessage is the fields in a data record that are immutable, assured to be consistent on all partied, +// and cannot change after the data is sealed. +// This is what is transferred and hashed in a batch payload between nodes. +func (d *Data) BatchMessage() *Data { + return &Data{ + ID: d.ID, + Validator: d.Validator, + Namespace: d.Namespace, + Hash: d.Hash, + Datatype: d.Datatype, + Value: d.Value, + Blob: d.Blob, + } +} + type DataAndBlob struct { Data *Data Blob *Blob diff --git a/pkg/fftypes/manifest.go b/pkg/fftypes/manifest.go deleted file mode 100644 index 8410c5b0c8..0000000000 --- a/pkg/fftypes/manifest.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright © 2022 Kaleido, Inc. -// -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fftypes - -import "encoding/json" - -// Manifest is a list of references to messages and data -type Manifest struct { - Messages []MessageRef `json:"messages"` - Data []DataRef `json:"data"` -} - -func (mf *Manifest) String() string { - b, _ := json.Marshal(&mf) - return string(b) -} diff --git a/pkg/fftypes/manifest_test.go b/pkg/fftypes/manifest_test.go deleted file mode 100644 index 6f8df08127..0000000000 --- a/pkg/fftypes/manifest_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright © 2021 Kaleido, Inc. -// -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fftypes - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestManifestToString(t *testing.T) { - - batch := &Batch{ - Payload: BatchPayload{ - Messages: []*Message{ - {Header: MessageHeader{ID: MustParseUUID("c38e76ec-92a6-4659-805d-8ae3b7437c40")}, Hash: MustParseBytes32("169ef5233cf44df3d71df59f25928743e9a76378bb1375e06539b732b1fc57e5")}, - }, - Data: []*Data{ - {ID: MustParseUUID("7bc49647-cd1c-4633-98fa-ddbb208d61bd"), Hash: MustParseBytes32("2b849d47e44a291cd83bee4e7ace66178a5245a151d3bbd02011312ec2604ed6")}, - {ID: MustParseUUID("5b80eec3-04b5-4557-bced-6a458ecb9ef2"), Hash: MustParseBytes32("2bcddd992d17e89a5aafbe99c59d954018ddadf4e533a164808ae2389bbf33dc")}, - }, - }, - } - assert.Equal(t, "{\"messages\":[{\"id\":\"c38e76ec-92a6-4659-805d-8ae3b7437c40\",\"hash\":\"169ef5233cf44df3d71df59f25928743e9a76378bb1375e06539b732b1fc57e5\"}],\"data\":[{\"id\":\"7bc49647-cd1c-4633-98fa-ddbb208d61bd\",\"hash\":\"2b849d47e44a291cd83bee4e7ace66178a5245a151d3bbd02011312ec2604ed6\"},{\"id\":\"5b80eec3-04b5-4557-bced-6a458ecb9ef2\",\"hash\":\"2bcddd992d17e89a5aafbe99c59d954018ddadf4e533a164808ae2389bbf33dc\"}]}", batch.Manifest().String()) -} diff --git a/pkg/fftypes/message.go b/pkg/fftypes/message.go index 50d00034a1..b97d31c588 100644 --- a/pkg/fftypes/message.go +++ b/pkg/fftypes/message.go @@ -95,6 +95,18 @@ type Message struct { Sequence int64 `json:"-"` // Local database sequence used internally for batch assembly } +// BatchMessage returns a copy of the fields in a message that are immutable, assured to be consistent on all partied, +// and cannot change after the batch containing the message is sealed. +// This is what is transferred and hashed in a batch payload between nodes. +func (m *Message) BatchMessage() *Message { + return &Message{ + Header: m.Header, + Hash: m.Hash, + Data: m.Data, + Pins: m.Pins, + } +} + // MessageInOut allows API users to submit values in-line in the payload submitted, which // will be broken out and stored separately during the call. type MessageInOut struct { diff --git a/pkg/fftypes/message_test.go b/pkg/fftypes/message_test.go index 24cda7e7b2..4fe153c7d1 100644 --- a/pkg/fftypes/message_test.go +++ b/pkg/fftypes/message_test.go @@ -260,3 +260,20 @@ func TestSetInlineData(t *testing.T) { assert.NoError(t, err) assert.Regexp(t, "some data", string(b)) } + +func TestMessageImmutable(t *testing.T) { + msg := &Message{ + Header: MessageHeader{ + ID: NewUUID(), + }, + BatchID: NewUUID(), + Hash: NewRandB32(), + State: MessageStateConfirmed, + Confirmed: Now(), + Data: DataRefs{ + {ID: NewUUID(), Hash: NewRandB32()}, + }, + Pins: NewFFStringArray("pin1", "pin2"), + } + assert.True(t, msg.Hash.Equals(msg.BatchMessage().Hash)) +} From b57f45dfe580777c5d8ff2b10619c34cecad6bab Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 4 Mar 2022 07:49:44 -0500 Subject: [PATCH 02/11] Updates to structures Signed-off-by: Peter Broadhurst --- ...00069_rename_batch_payload_column.down.sql | 3 + .../000069_rename_batch_payload_column.up.sql | 3 + internal/database/sqlcommon/batch_sql.go | 4 +- internal/privatemessaging/message_test.go | 36 +++++++---- internal/privatemessaging/privatemessaging.go | 2 +- .../privatemessaging/privatemessaging_test.go | 62 +++++++++++------- pkg/fftypes/batch.go | 63 +++++++------------ pkg/fftypes/batch_test.go | 55 +++++++--------- pkg/fftypes/data.go | 25 ++++++-- pkg/fftypes/data_test.go | 20 ++++++ pkg/fftypes/message.go | 6 +- 11 files changed, 162 insertions(+), 117 deletions(-) create mode 100644 db/migrations/sqlite/000069_rename_batch_payload_column.down.sql create mode 100644 db/migrations/sqlite/000069_rename_batch_payload_column.up.sql diff --git a/db/migrations/sqlite/000069_rename_batch_payload_column.down.sql b/db/migrations/sqlite/000069_rename_batch_payload_column.down.sql new file mode 100644 index 0000000000..05adb7ac53 --- /dev/null +++ b/db/migrations/sqlite/000069_rename_batch_payload_column.down.sql @@ -0,0 +1,3 @@ +DROP INDEX blockchainevents_listener_id; +ALTER TABLE blockchainevents RENAME COLUMN listener_id TO subscription_id; +CREATE INDEX blockchainevents_subscription_id ON blockchainevents(subscription_id); \ No newline at end of file diff --git a/db/migrations/sqlite/000069_rename_batch_payload_column.up.sql b/db/migrations/sqlite/000069_rename_batch_payload_column.up.sql new file mode 100644 index 0000000000..0ed89de72c --- /dev/null +++ b/db/migrations/sqlite/000069_rename_batch_payload_column.up.sql @@ -0,0 +1,3 @@ +DROP INDEX blockchainevents_subscription_id; +ALTER TABLE blockchainevents RENAME COLUMN subscription_id TO listener_id; +CREATE INDEX blockchainevents_listener_id ON blockchainevents(listener_id); \ No newline at end of file diff --git a/internal/database/sqlcommon/batch_sql.go b/internal/database/sqlcommon/batch_sql.go index 6f2fefda82..67668a35bd 100644 --- a/internal/database/sqlcommon/batch_sql.go +++ b/internal/database/sqlcommon/batch_sql.go @@ -37,7 +37,7 @@ var ( "group_hash", "created", "hash", - "payload", + "manifest", "payload_ref", "confirmed", "tx_type", @@ -95,7 +95,7 @@ func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.BatchPersist Set("group_hash", batch.Group). Set("created", batch.Created). Set("hash", batch.Hash). - Set("payload", batch.Payload). + Set("manifest", batch.BatchManifest). Set("payload_ref", batch.PayloadRef). Set("confirmed", batch.Confirmed). Set("tx_type", batch.Payload.TX.Type). diff --git a/internal/privatemessaging/message_test.go b/internal/privatemessaging/message_test.go index 5901e14bf8..30b03bd59a 100644 --- a/internal/privatemessaging/message_test.go +++ b/internal/privatemessaging/message_test.go @@ -442,8 +442,10 @@ func TestSendUnpinnedMessageGroupLookupFail(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, groupID).Return(nil, fmt.Errorf("pop")).Once() err := pm.dispatchUnpinnedBatch(pm.ctx, &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -714,8 +716,10 @@ func TestDispatchedUnpinnedMessageMarshalFail(t *testing.T) { mdi.On("GetIdentityByID", pm.ctx, node1.ID).Return(node2, nil).Once() err := pm.dispatchUnpinnedBatch(pm.ctx, &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, Payload: fftypes.BatchPayload{ Data: []*fftypes.Data{ {Value: fftypes.JSONAnyPtr("!Bad JSON")}, @@ -764,8 +768,10 @@ func TestDispatchedUnpinnedMessageOK(t *testing.T) { mdi.On("InsertOperation", pm.ctx, mock.Anything).Return(nil) err := pm.dispatchUnpinnedBatch(pm.ctx, &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ ID: fftypes.NewUUID(), @@ -812,8 +818,10 @@ func TestSendDataTransferBlobsFail(t *testing.T) { err := pm.sendData(pm.ctx, &fftypes.TransportWrapper{ Batch: &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -865,8 +873,10 @@ func TestSendDataTransferFail(t *testing.T) { err := pm.sendData(pm.ctx, &fftypes.TransportWrapper{ Batch: &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -910,8 +920,10 @@ func TestSendDataTransferInsertOperationFail(t *testing.T) { err := pm.sendData(pm.ctx, &fftypes.TransportWrapper{ Batch: &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index 2d30393070..22d289a37d 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -239,7 +239,7 @@ func (pm *privateMessaging) sendData(ctx context.Context, tw *fftypes.TransportW batch.Payload.TX.ID, fftypes.OpTypeDataExchangeBatchSend) op.Input = fftypes.JSONObject{ - "manifest": tw.Batch.Manifest().String(), + "batch": tw.Batch.ID, } if err = pm.database.InsertOperation(ctx, op); err != nil { return err diff --git a/internal/privatemessaging/privatemessaging_test.go b/internal/privatemessaging/privatemessaging_test.go index bc88e93adf..960807c443 100644 --- a/internal/privatemessaging/privatemessaging_test.go +++ b/internal/privatemessaging/privatemessaging_test.go @@ -155,12 +155,15 @@ func TestDispatchBatchWithBlobs(t *testing.T) { mbp.On("SubmitPinnedBatch", pm.ctx, mock.Anything, mock.Anything).Return(nil) err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - ID: batchID, - SignerRef: fftypes.SignerRef{ - Author: "org1", + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, + Group: groupID, + Namespace: "ns1", + Hash: batchHash, }, - Group: groupID, - Namespace: "ns1", Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ ID: txID, @@ -169,7 +172,6 @@ func TestDispatchBatchWithBlobs(t *testing.T) { {ID: dataID1, Blob: &fftypes.BlobRef{Hash: blob1}}, }, }, - Hash: batchHash, }, []*fftypes.Bytes32{pin1, pin2}) assert.NoError(t, err) @@ -191,7 +193,9 @@ func TestDispatchBatchBadData(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{}, nil) err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - Group: groupID, + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + }, Payload: fftypes.BatchPayload{ Data: []*fftypes.Data{ {Value: fftypes.JSONAnyPtr(`{!json}`)}, @@ -223,8 +227,10 @@ func TestSendAndSubmitBatchBadID(t *testing.T) { mbp.On("SubmitPinnedBatch", pm.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - SignerRef: fftypes.SignerRef{ - Author: "badauthor", + BatchHeader: fftypes.BatchHeader{ + SignerRef: fftypes.SignerRef{ + Author: "badauthor", + }, }, }, []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) @@ -258,9 +264,11 @@ func TestSendAndSubmitBatchUnregisteredNode(t *testing.T) { mim.On("GetNodeOwnerOrg", pm.ctx).Return(nil, fmt.Errorf("pop")) err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "badauthor", + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "badauthor", + }, }, }, []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) @@ -277,8 +285,10 @@ func TestSendImmediateFail(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - SignerRef: fftypes.SignerRef{ - Author: "org1", + BatchHeader: fftypes.BatchHeader{ + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, }, []*fftypes.Bytes32{}) assert.Regexp(t, "pop", err) @@ -314,9 +324,11 @@ func TestSendSubmitInsertOperationFail(t *testing.T) { mdi.On("InsertOperation", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "org1", + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -366,9 +378,11 @@ func TestSendSubmitBlobTransferFail(t *testing.T) { mdx.On("TransferBLOB", pm.ctx, mock.Anything, "node2-peer", "/blob/1").Return(fmt.Errorf("pop")).Once() err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "org1", + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, Payload: fftypes.BatchPayload{ Data: []*fftypes.Data{ @@ -423,9 +437,11 @@ func TestWriteTransactionSubmitBatchPinFail(t *testing.T) { mbp.On("SubmitPinnedBatch", pm.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "org1", + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, Payload: fftypes.BatchPayload{ Data: []*fftypes.Data{ diff --git a/pkg/fftypes/batch.go b/pkg/fftypes/batch.go index 65ccdd13ad..fe5e7559f1 100644 --- a/pkg/fftypes/batch.go +++ b/pkg/fftypes/batch.go @@ -17,12 +17,8 @@ package fftypes import ( - "context" "crypto/sha256" - "database/sql/driver" "encoding/json" - - "github.com/hyperledger/firefly/internal/i18n" ) // BatchHeader is the common fields between the serialized batch, and the batch manifest @@ -38,12 +34,11 @@ type BatchHeader struct { // BatchManifest is all we need to persist to be able to reconstitute // an identical batch. It can be generated from a received batch to -// confirm you have received an identical batch to that sent +// confirm you have received an identical batch to that sent. type BatchManifest struct { - BatchHeader - TX TransactionRef `json:"tx"` - Messages []MessageRef `json:"messages"` - Data []DataRef `json:"data"` + ID *UUID `json:"id"` + Messages []MessageRef `json:"messages"` + Data []DataRef `json:"data"` } // Batch is the full payload object used in-flight. @@ -56,14 +51,12 @@ type Batch struct { // BatchPersisted is the structure written to the database type BatchPersisted struct { - BatchManifest - Created *FFTime `json:"created"` - Confirmed *FFTime `json:"confirmed"` -} - -func (mf *BatchManifest) String() string { - b, _ := json.Marshal(&mf) - return string(b) + BatchHeader + Manifest string `json:"manifest"` // not automatically parsed + TX TransactionRef `json:"tx"` + PayloadRef string `json:"payloadRef,omitempty"` + Created *FFTime `json:"created"` + Confirmed *FFTime `json:"confirmed"` } // BatchPayload contains the full JSON of the messages and data, but @@ -80,9 +73,17 @@ type BatchPayload struct { Data []*Data `json:"data"` } -// Value implements sql.Valuer -func (ma BatchPayload) Value() (driver.Value, error) { - return json.Marshal(&ma) +func (bm *BatchManifest) String() string { + if bm == nil { + return "" + } + b, _ := json.Marshal(&bm) + return string(b) +} + +func (bm *BatchManifest) Hash() *Bytes32 { + var b32 Bytes32 = sha256.Sum256([]byte(bm.String())) + return &b32 } func (ma *BatchPayload) Hash() *Bytes32 { @@ -91,32 +92,12 @@ func (ma *BatchPayload) Hash() *Bytes32 { return &b32 } -// Scan implements sql.Scanner -func (ma *BatchPayload) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case []byte: - return json.Unmarshal(src, &ma) - - case string: - if src == "" { - return nil - } - return json.Unmarshal([]byte(src), &ma) - - default: - return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, ma) - } - -} - func (b *Batch) Manifest() *BatchManifest { if b == nil { return nil } tm := &BatchManifest{ + ID: b.ID, Messages: make([]MessageRef, len(b.Payload.Messages)), Data: make([]DataRef, len(b.Payload.Data)), } diff --git a/pkg/fftypes/batch_test.go b/pkg/fftypes/batch_test.go index f184c353f1..f55644775c 100644 --- a/pkg/fftypes/batch_test.go +++ b/pkg/fftypes/batch_test.go @@ -17,51 +17,44 @@ package fftypes import ( + "crypto/sha256" + "encoding/hex" "encoding/json" "testing" "github.com/stretchr/testify/assert" ) -func TestSQLSerializedMessageArray(t *testing.T) { +func TestSQLSerializedManifest(t *testing.T) { msgID1 := NewUUID() msgID2 := NewUUID() - batchPayload := BatchPayload{ - Messages: []*Message{ - {Header: MessageHeader{ID: msgID1}}, - {Header: MessageHeader{ID: msgID2}}, + batch := Batch{ + BatchHeader: BatchHeader{ + ID: NewUUID(), + }, + Payload: BatchPayload{ + TX: TransactionRef{ + ID: NewUUID(), + }, + Messages: []*Message{ + {Header: MessageHeader{ID: msgID1}}, + {Header: MessageHeader{ID: msgID2}}, + }, }, } - b, err := batchPayload.Value() - assert.NoError(t, err) - assert.IsType(t, []byte{}, b) - - var batchPayloadRead BatchPayload - err = batchPayloadRead.Scan(b) - assert.NoError(t, err) - - j1, err := json.Marshal(&batchPayload) - assert.NoError(t, err) - j2, err := json.Marshal(&batchPayloadRead) - assert.NoError(t, err) - assert.Equal(t, string(j1), string(j2)) - - err = batchPayloadRead.Scan("") - assert.NoError(t, err) - - err = batchPayloadRead.Scan("{}") - assert.NoError(t, err) + assert.Equal(t, "", ((*BatchManifest)(nil)).String()) - err = batchPayloadRead.Scan(nil) + mfString := batch.Manifest().String() + var mf *BatchManifest + err := json.Unmarshal([]byte(mfString), &mf) assert.NoError(t, err) + assert.Equal(t, msgID1, mf.Messages[0].ID) + assert.Equal(t, msgID2, mf.Messages[1].ID) + mfHash := sha256.Sum256([]byte(mfString)) + assert.Equal(t, batch.Manifest().Hash().String(), hex.EncodeToString(mfHash[:])) - var wrongType int - err = batchPayloadRead.Scan(&wrongType) - assert.Error(t, err) - - hash := batchPayload.Hash() - assert.NotNil(t, hash) + assert.NotEqual(t, batch.Payload.Hash().String(), hex.EncodeToString(mfHash[:])) } diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index 070c311ce1..bcd0fe5d0d 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -52,10 +52,27 @@ type Data struct { ValueSize int64 `json:"-"` // Used internally for message size calcuation, without full payload retrieval } -// BatchMessage is the fields in a data record that are immutable, assured to be consistent on all partied, -// and cannot change after the data is sealed. +func (br *BlobRef) BatchBlobRef(broadcast bool) *BlobRef { + if br == nil { + return nil + } + // For broadcast data the blob reference contains the "public" (shared storage) reference, which + // must have been allocated to this data item before sealing the batch. + if broadcast { + return br + } + // For private we omit the "public" ref in all cases, to avoid an potential for the batch pay to change due + // to the same data being allocated by the same data being sent in a broadcast batch (thus assigining a public ref). + return &BlobRef{ + Hash: br.Hash, + Size: br.Size, + Name: br.Name, + } +} + +// BatchData is the fields in a data record that are assured to be consistent on all parties. // This is what is transferred and hashed in a batch payload between nodes. -func (d *Data) BatchMessage() *Data { +func (d *Data) BatchData(broadcast bool) *Data { return &Data{ ID: d.ID, Validator: d.Validator, @@ -63,7 +80,7 @@ func (d *Data) BatchMessage() *Data { Hash: d.Hash, Datatype: d.Datatype, Value: d.Value, - Blob: d.Blob, + Blob: d.Blob.BatchBlobRef(broadcast), } } diff --git a/pkg/fftypes/data_test.go b/pkg/fftypes/data_test.go index 9532317959..a6eb65ee59 100644 --- a/pkg/fftypes/data_test.go +++ b/pkg/fftypes/data_test.go @@ -206,3 +206,23 @@ func TestHashDataNull(t *testing.T) { assert.Equal(t, expectedHash.String(), hash.String()) } + +func TestDataImmutable(t *testing.T) { + data := &Data{ + ID: NewUUID(), + Validator: ValidatorTypeJSON, + Namespace: "ns1", + Hash: NewRandB32(), + Created: Now(), + } + assert.True(t, data.Hash.Equals(data.BatchData(true).Hash)) + + data.Blob = &BlobRef{ + Hash: NewRandB32(), + Size: 12345, + Name: "name.txt", + Public: "sharedStorageRef", + } + assert.Equal(t, data.Blob, data.BatchData(true).Blob) + assert.Empty(t, data.BatchData(false).Blob.Public) +} diff --git a/pkg/fftypes/message.go b/pkg/fftypes/message.go index b97d31c588..c5825b24ff 100644 --- a/pkg/fftypes/message.go +++ b/pkg/fftypes/message.go @@ -95,15 +95,15 @@ type Message struct { Sequence int64 `json:"-"` // Local database sequence used internally for batch assembly } -// BatchMessage returns a copy of the fields in a message that are immutable, assured to be consistent on all partied, -// and cannot change after the batch containing the message is sealed. +// BatchMessage is the fields in a message record that are assured to be consistent on all parties. // This is what is transferred and hashed in a batch payload between nodes. func (m *Message) BatchMessage() *Message { return &Message{ Header: m.Header, Hash: m.Hash, Data: m.Data, - Pins: m.Pins, + // The pins are immutable once assigned by the sender, which happens before the batch is sealed + Pins: m.Pins, } } From b4a295618399aa932865b4edaf7fe06d2999b63f Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 4 Mar 2022 07:57:36 -0500 Subject: [PATCH 03/11] Update DB interaface to store manifest Signed-off-by: Peter Broadhurst --- ...00070_rename_batch_payload_column.down.sql | 1 + .../000070_rename_batch_payload_column.up.sql | 3 + ...00069_rename_batch_payload_column.down.sql | 3 - .../000069_rename_batch_payload_column.up.sql | 3 - ...00070_rename_batch_payload_column.down.sql | 3 + .../000070_rename_batch_payload_column.up.sql | 1 + internal/database/sqlcommon/batch_sql.go | 18 ++--- internal/database/sqlcommon/batch_sql_test.go | 80 ++++++++++--------- pkg/database/plugin.go | 6 +- 9 files changed, 62 insertions(+), 56 deletions(-) create mode 100644 db/migrations/postgres/000070_rename_batch_payload_column.down.sql create mode 100644 db/migrations/postgres/000070_rename_batch_payload_column.up.sql delete mode 100644 db/migrations/sqlite/000069_rename_batch_payload_column.down.sql delete mode 100644 db/migrations/sqlite/000069_rename_batch_payload_column.up.sql create mode 100644 db/migrations/sqlite/000070_rename_batch_payload_column.down.sql create mode 100644 db/migrations/sqlite/000070_rename_batch_payload_column.up.sql diff --git a/db/migrations/postgres/000070_rename_batch_payload_column.down.sql b/db/migrations/postgres/000070_rename_batch_payload_column.down.sql new file mode 100644 index 0000000000..b186daad1d --- /dev/null +++ b/db/migrations/postgres/000070_rename_batch_payload_column.down.sql @@ -0,0 +1 @@ +ALTER TABLE batches RENAME COLUMN manifest TO payload; \ No newline at end of file diff --git a/db/migrations/postgres/000070_rename_batch_payload_column.up.sql b/db/migrations/postgres/000070_rename_batch_payload_column.up.sql new file mode 100644 index 0000000000..8f9f9cefab --- /dev/null +++ b/db/migrations/postgres/000070_rename_batch_payload_column.up.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE batches RENAME COLUMN payload TO manifest; +COMMIT; diff --git a/db/migrations/sqlite/000069_rename_batch_payload_column.down.sql b/db/migrations/sqlite/000069_rename_batch_payload_column.down.sql deleted file mode 100644 index 05adb7ac53..0000000000 --- a/db/migrations/sqlite/000069_rename_batch_payload_column.down.sql +++ /dev/null @@ -1,3 +0,0 @@ -DROP INDEX blockchainevents_listener_id; -ALTER TABLE blockchainevents RENAME COLUMN listener_id TO subscription_id; -CREATE INDEX blockchainevents_subscription_id ON blockchainevents(subscription_id); \ No newline at end of file diff --git a/db/migrations/sqlite/000069_rename_batch_payload_column.up.sql b/db/migrations/sqlite/000069_rename_batch_payload_column.up.sql deleted file mode 100644 index 0ed89de72c..0000000000 --- a/db/migrations/sqlite/000069_rename_batch_payload_column.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -DROP INDEX blockchainevents_subscription_id; -ALTER TABLE blockchainevents RENAME COLUMN subscription_id TO listener_id; -CREATE INDEX blockchainevents_listener_id ON blockchainevents(listener_id); \ No newline at end of file diff --git a/db/migrations/sqlite/000070_rename_batch_payload_column.down.sql b/db/migrations/sqlite/000070_rename_batch_payload_column.down.sql new file mode 100644 index 0000000000..8259011d2a --- /dev/null +++ b/db/migrations/sqlite/000070_rename_batch_payload_column.down.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE batches RENAME COLUMN manifest TO payload; +COMMIT; \ No newline at end of file diff --git a/db/migrations/sqlite/000070_rename_batch_payload_column.up.sql b/db/migrations/sqlite/000070_rename_batch_payload_column.up.sql new file mode 100644 index 0000000000..ab488aac59 --- /dev/null +++ b/db/migrations/sqlite/000070_rename_batch_payload_column.up.sql @@ -0,0 +1 @@ +ALTER TABLE batches RENAME COLUMN payload TO manifest; diff --git a/internal/database/sqlcommon/batch_sql.go b/internal/database/sqlcommon/batch_sql.go index 67668a35bd..3b1f059dbc 100644 --- a/internal/database/sqlcommon/batch_sql.go +++ b/internal/database/sqlcommon/batch_sql.go @@ -95,11 +95,11 @@ func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.BatchPersist Set("group_hash", batch.Group). Set("created", batch.Created). Set("hash", batch.Hash). - Set("manifest", batch.BatchManifest). + Set("manifest", batch.Manifest). Set("payload_ref", batch.PayloadRef). Set("confirmed", batch.Confirmed). - Set("tx_type", batch.Payload.TX.Type). - Set("tx_id", batch.Payload.TX.ID). + Set("tx_type", batch.TX.Type). + Set("tx_id", batch.TX.ID). Set("node_id", batch.Node). Where(sq.Eq{"id": batch.ID}), func() { @@ -122,11 +122,11 @@ func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.BatchPersist batch.Group, batch.Created, batch.Hash, - batch.Payload, + batch.Manifest, batch.PayloadRef, batch.Confirmed, - batch.Payload.TX.Type, - batch.Payload.TX.ID, + batch.TX.Type, + batch.TX.ID, batch.Node, ), func() { @@ -151,11 +151,11 @@ func (s *SQLCommon) batchResult(ctx context.Context, row *sql.Rows) (*fftypes.Ba &batch.Group, &batch.Created, &batch.Hash, - &batch.Payload, + &batch.Manifest, &batch.PayloadRef, &batch.Confirmed, - &batch.Payload.TX.Type, - &batch.Payload.TX.ID, + &batch.TX.Type, + &batch.TX.ID, &batch.Node, ) if err != nil { diff --git a/internal/database/sqlcommon/batch_sql_test.go b/internal/database/sqlcommon/batch_sql_test.go index 2d32766366..f854c80b1b 100644 --- a/internal/database/sqlcommon/batch_sql_test.go +++ b/internal/database/sqlcommon/batch_sql_test.go @@ -39,24 +39,26 @@ func TestBatch2EWithDB(t *testing.T) { batchID := fftypes.NewUUID() msgID1 := fftypes.NewUUID() batch := &fftypes.BatchPersisted{ - ID: batchID, - Type: fftypes.MessageTypeBroadcast, - SignerRef: fftypes.SignerRef{ - Key: "0x12345", - Author: "did:firefly:org/abcd", - }, - Namespace: "ns1", - Hash: fftypes.NewRandB32(), - Created: fftypes.Now(), - Node: fftypes.NewUUID(), - Payload: fftypes.BatchPersistedPayload{ - Messages: []*fftypes.Message{ - {Header: fftypes.MessageHeader{ID: msgID1}}, - }, - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeUnpinned, + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + Type: fftypes.MessageTypeBroadcast, + SignerRef: fftypes.SignerRef{ + Key: "0x12345", + Author: "did:firefly:org/abcd", }, + Namespace: "ns1", + Hash: fftypes.NewRandB32(), + Node: fftypes.NewUUID(), }, + Created: fftypes.Now(), + TX: fftypes.TransactionRef{ + Type: fftypes.TransactionTypeUnpinned, + }, + Manifest: (&fftypes.BatchManifest{ + Messages: []fftypes.MessageRef{ + {ID: msgID1}, + }, + }).String(), } s.callbacks.On("UUIDCollectionNSEvent", database.CollectionBatches, fftypes.ChangeEventTypeCreated, "ns1", batchID, mock.Anything).Return() @@ -79,26 +81,28 @@ func TestBatch2EWithDB(t *testing.T) { msgID2 := fftypes.NewUUID() payloadRef := "" batchUpdated := &fftypes.BatchPersisted{ - ID: batchID, - Type: fftypes.MessageTypeBroadcast, - SignerRef: fftypes.SignerRef{ - Key: "0x12345", - Author: "did:firefly:org/abcd", - }, - Namespace: "ns1", - Hash: fftypes.NewRandB32(), - Created: fftypes.Now(), - Node: fftypes.NewUUID(), - Payload: fftypes.BatchPersistedPayload{ - TX: fftypes.TransactionRef{ - ID: txid, - Type: fftypes.TransactionTypeBatchPin, - }, - Messages: []*fftypes.Message{ - {Header: fftypes.MessageHeader{ID: msgID1}}, - {Header: fftypes.MessageHeader{ID: msgID2}}, + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + Type: fftypes.MessageTypeBroadcast, + SignerRef: fftypes.SignerRef{ + Key: "0x12345", + Author: "did:firefly:org/abcd", }, + Namespace: "ns1", + Hash: fftypes.NewRandB32(), + Node: fftypes.NewUUID(), }, + Created: fftypes.Now(), + TX: fftypes.TransactionRef{ + ID: txid, + Type: fftypes.TransactionTypeBatchPin, + }, + Manifest: (&fftypes.BatchManifest{ + Messages: []fftypes.MessageRef{ + {ID: msgID1}, + {ID: msgID2}, + }, + }).String(), PayloadRef: payloadRef, Confirmed: fftypes.Now(), } @@ -175,7 +179,7 @@ func TestUpsertBatchFailSelect(t *testing.T) { mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() batchID := fftypes.NewUUID() - err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{ID: batchID}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{BatchHeader: fftypes.BatchHeader{ID: batchID}}) assert.Regexp(t, "FF10115", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -187,7 +191,7 @@ func TestUpsertBatchFailInsert(t *testing.T) { mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() batchID := fftypes.NewUUID() - err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{ID: batchID}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{BatchHeader: fftypes.BatchHeader{ID: batchID}}) assert.Regexp(t, "FF10116", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -200,7 +204,7 @@ func TestUpsertBatchFailUpdate(t *testing.T) { mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"hash"}).AddRow(hash)) mock.ExpectExec("UPDATE .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() - err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{ID: batchID, Hash: hash}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{BatchHeader: fftypes.BatchHeader{ID: batchID, Hash: hash}}) assert.Regexp(t, "FF10117", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -212,7 +216,7 @@ func TestUpsertBatchFailCommit(t *testing.T) { mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"id"})) mock.ExpectExec("INSERT .*").WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{ID: batchID}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{BatchHeader: fftypes.BatchHeader{ID: batchID}}) assert.Regexp(t, "FF10119", err) assert.NoError(t, mock.ExpectationsWereMet()) } diff --git a/pkg/database/plugin.go b/pkg/database/plugin.go index c5e2b454ec..7ffd8f968f 100644 --- a/pkg/database/plugin.go +++ b/pkg/database/plugin.go @@ -118,16 +118,16 @@ type iDataCollection interface { type iBatchCollection interface { // UpsertBatch - Upsert a batch - the hash cannot change - UpsertBatch(ctx context.Context, data *fftypes.Batch) (err error) + UpsertBatch(ctx context.Context, data *fftypes.BatchPersisted) (err error) // UpdateBatch - Update data UpdateBatch(ctx context.Context, id *fftypes.UUID, update Update) (err error) // GetBatchByID - Get a batch by ID - GetBatchByID(ctx context.Context, id *fftypes.UUID) (message *fftypes.Batch, err error) + GetBatchByID(ctx context.Context, id *fftypes.UUID) (message *fftypes.BatchPersisted, err error) // GetBatches - Get batches - GetBatches(ctx context.Context, filter Filter) (message []*fftypes.Batch, res *FilterResult, err error) + GetBatches(ctx context.Context, filter Filter) (message []*fftypes.BatchPersisted, res *FilterResult, err error) } type iTransactionCollection interface { From fe67f91c3cc899599238572c9ba2f204ebffa08f Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 4 Mar 2022 12:06:27 -0500 Subject: [PATCH 04/11] Interim work on batch flush state interface Signed-off-by: Peter Broadhurst --- internal/batch/batch_manager.go | 11 ++- internal/batch/batch_manager_test.go | 32 ++++--- internal/batch/batch_processor.go | 86 ++++++++++++------ internal/batchpin/batchpin.go | 6 +- internal/batchpin/batchpin_test.go | 58 ++++++------ internal/batchpin/operations.go | 8 +- internal/batchpin/operations_test.go | 10 +- internal/broadcast/manager.go | 9 +- internal/broadcast/manager_test.go | 10 +- internal/broadcast/operations.go | 8 +- internal/data/data_manager.go | 41 +++++++++ internal/data/data_manager_test.go | 131 +++++++++++++++++++++++++++ internal/i18n/en_translations.go | 1 + mocks/batchpinmocks/submitter.go | 4 +- mocks/databasemocks/plugin.go | 20 ++-- mocks/datamocks/manager.go | 23 +++++ pkg/fftypes/batch.go | 8 +- 17 files changed, 356 insertions(+), 110 deletions(-) diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index 239f5cc277..b32a4ad178 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -96,13 +96,14 @@ type batchManager struct { startupOffsetRetryAttempts int } -type DispatchHandler func(context.Context, *fftypes.Batch, []*fftypes.Bytes32) error +type DispatchHandler func(context.Context, *BatchFlushState) error type DispatcherOptions struct { - BatchMaxSize uint - BatchMaxBytes int64 - BatchTimeout time.Duration - DisposeTimeout time.Duration + RequiresSharedDataPayloadRefs bool + BatchMaxSize uint + BatchMaxBytes int64 + BatchTimeout time.Duration + DisposeTimeout time.Duration } type dispatcher struct { diff --git a/internal/batch/batch_manager_test.go b/internal/batch/batch_manager_test.go index c9566e862d..6c63b18134 100644 --- a/internal/batch/batch_manager_test.go +++ b/internal/batch/batch_manager_test.go @@ -43,7 +43,7 @@ func TestE2EDispatchBroadcast(t *testing.T) { mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) readyForDispatch := make(chan bool) waitForDispatch := make(chan *fftypes.Batch) - handler := func(ctx context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + handler := func(ctx context.Context, bp *fftypes.BatchPersisted, b *fftypes.Batch, s []*fftypes.Bytes32) error { _, ok := <-readyForDispatch if !ok { return nil @@ -158,7 +158,7 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { waitForDispatch := make(chan *fftypes.Batch) var groupID fftypes.Bytes32 _ = groupID.UnmarshalText([]byte("44dc0861e69d9bab17dd5e90a8898c2ea156ad04e5fabf83119cc010486e6c1b")) - handler := func(ctx context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + handler := func(ctx context.Context, bp *fftypes.BatchPersisted, b *fftypes.Batch, s []*fftypes.Bytes32) error { _, ok := <-readyForDispatch if !ok { return nil @@ -352,9 +352,12 @@ func TestMessageSequencerUpdateMessagesFail(t *testing.T) { mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) ctx, cancelCtx := context.WithCancel(context.Background()) bm, _ := NewBatchManager(ctx, mni, mdi, mdm) - bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - return nil - }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}) + bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, + func(c context.Context, bp *fftypes.BatchPersisted, b *fftypes.Batch, s []*fftypes.Bytes32) error { + return nil + }, + DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}, + ) dataID := fftypes.NewUUID() mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Message{ @@ -402,10 +405,12 @@ func TestMessageSequencerDispatchFail(t *testing.T) { mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) ctx, cancelCtx := context.WithCancel(context.Background()) bm, _ := NewBatchManager(ctx, mni, mdi, mdm) - bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - cancelCtx() - return fmt.Errorf("fizzle") - }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}) + bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, + func(c context.Context, bp *fftypes.BatchPersisted, b *fftypes.Batch, s []*fftypes.Bytes32) error { + cancelCtx() + return fmt.Errorf("fizzle") + }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}, + ) dataID := fftypes.NewUUID() mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Message{ @@ -438,9 +443,12 @@ func TestMessageSequencerUpdateBatchFail(t *testing.T) { mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) ctx, cancelCtx := context.WithCancel(context.Background()) bm, _ := NewBatchManager(ctx, mni, mdi, mdm) - bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - return nil - }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}) + bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, + func(c context.Context, bp *fftypes.BatchPersisted, b *fftypes.Batch, s []*fftypes.Bytes32) error { + return nil + }, + DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}, + ) dataID := fftypes.NewUUID() mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Message{ diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 435454e2a9..b13df5c633 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -90,6 +90,13 @@ type batchProcessor struct { conf *batchProcessorConf } +type BatchFlushState struct { + manifest fftypes.BatchManifest + persisted fftypes.BatchPersisted + payload fftypes.BatchPayload + pins []*fftypes.Bytes32 +} + const batchSizeEstimateBase = int64(512) func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, conf *batchProcessorConf, baseRetryConf *retry.Retry) *batchProcessor { @@ -348,47 +355,63 @@ func (bp *batchProcessor) assemblyLoop() { func (bp *batchProcessor) flush(overflow bool) error { id, flushWork, byteSize := bp.startFlush(overflow) - batch := bp.buildFlushBatch(id, flushWork) + state := bp.initFlushState(id, flushWork) - pins, err := bp.persistBatch(batch) + err := bp.sealBatch(state) if err != nil { return err } - err = bp.dispatchBatch(batch, pins) + err = bp.dispatchBatch(state, pins) if err != nil { return err } - err = bp.markMessagesDispatched(batch) + err = bp.markMessagesDispatched(state) if err != nil { return err } - bp.endFlush(batch, byteSize) + bp.endFlush(state, byteSize) return nil } -func (bp *batchProcessor) buildFlushBatch(id *fftypes.UUID, newWork []*batchWork) *fftypes.Batch { +func (bp *batchProcessor) initFlushState(id *fftypes.UUID, flushWork []*batchWork) *BatchFlushState { log.L(bp.ctx).Debugf("Flushing batch %s", id) - batch := &fftypes.Batch{ - ID: id, - Namespace: bp.conf.namespace, - SignerRef: bp.conf.identity, - Group: bp.conf.group, - Payload: fftypes.BatchPayload{}, - Created: fftypes.Now(), - Node: bp.ni.GetNodeUUID(bp.ctx), + state := &BatchFlushState{ + metadata: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: id, + Namespace: bp.conf.namespace, + SignerRef: bp.conf.identity, + Group: bp.conf.group, + Node: bp.ni.GetNodeUUID(bp.ctx), + }, + Created: fftypes.Now(), + }, + manifest: fftypes.BatchManifest{ + ID: id, + }, } - for _, w := range newWork { + for _, w := range flushWork { if w.msg != nil { - w.msg.BatchID = batch.ID + w.msg.BatchID = state.metadata.ID w.msg.State = "" // state should always be set by receivers when loading the batch - batch.Payload.Messages = append(batch.Payload.Messages, w.msg) + state.payload.Messages = append(state.payload.Messages, w.msg.BatchMessage()) + state.manifest.Messages = append(state.manifest.Messages, fftypes.MessageRef{ + ID: w.msg.Header.ID, + Hash: w.msg.Hash, + }) + } + for _, d := range w.data { + state.payload.Data = append(state.payload.Data, d.BatchData(bp.conf.RequiresSharedDataPayloadRefs)) + state.manifest.Data = append(state.manifest.Data, fftypes.DataRef{ + ID: d.ID, + Hash: d.Hash, + }) } - batch.Payload.Data = append(batch.Payload.Data, w.data...) } - return batch + return state } func (bp *batchProcessor) maskContext(ctx context.Context, msg *fftypes.Message, topic string) (contextOrPin *fftypes.Bytes32, err error) { @@ -470,35 +493,42 @@ func (bp *batchProcessor) maskContexts(ctx context.Context, batch *fftypes.Batch return contextsOrPins, nil } -func (bp *batchProcessor) persistBatch(batch *fftypes.Batch) (contexts []*fftypes.Bytes32, err error) { +func (bp *batchProcessor) sealBatch(state *BatchFlushState) (err error) { err = bp.retry.Do(bp.ctx, "batch persist", func(attempt int) (retry bool, err error) { return true, bp.database.RunAsGroup(bp.ctx, func(ctx context.Context) (err error) { if bp.conf.txType == fftypes.TransactionTypeBatchPin { // Generate a new Transaction, which will be used to record status of the associated transaction as it happens - if contexts, err = bp.maskContexts(ctx, batch); err != nil { + if state.pins, err = bp.maskContexts(ctx, state); err != nil { return err } } - batch.Payload.TX.Type = bp.conf.txType - if batch.Payload.TX.ID, err = bp.txHelper.SubmitNewTransaction(ctx, batch.Namespace, bp.conf.txType); err != nil { + state.metadata.TX.Type = bp.conf.txType + if state.metadata.TX.ID, err = bp.txHelper.SubmitNewTransaction(ctx, state.metadata.Namespace, bp.conf.txType); err != nil { return err } - batch.Hash = batch.Payload.Hash() + // The hash of the batch, is the hash of the manifest to minimize the compute cost. + // Note in v0.13 and before, it was the hash of the payload - so the inbound route has a fallback to accepting the full payload hash + state.metadata.Manifest = + + mani + log.L(ctx).Debugf("Batch %s sealed. Hash=%s", batch.ID, batch.Hash) - return bp.database.UpsertBatch(ctx, batch) + + // At this point the manifest of the batch is finalized. We write it to the database + return bp.database.UpsertBatch(ctx, batchPersisted) }) }) - return contexts, err + return err } -func (bp *batchProcessor) dispatchBatch(batch *fftypes.Batch, pins []*fftypes.Bytes32) error { +func (bp *batchProcessor) dispatchBatch(state *BatchFlushState) error { // Call the dispatcher to do the heavy lifting - will only exit if we're closed return operations.RunWithOperationCache(bp.ctx, func(ctx context.Context) error { return bp.retry.Do(ctx, "batch dispatch", func(attempt int) (retry bool, err error) { - return true, bp.conf.dispatch(ctx, batch, pins) + return true, bp.conf.dispatch(ctx, state) }) }) } diff --git a/internal/batchpin/batchpin.go b/internal/batchpin/batchpin.go index d480f790e4..60f9c652fb 100644 --- a/internal/batchpin/batchpin.go +++ b/internal/batchpin/batchpin.go @@ -31,7 +31,7 @@ import ( type Submitter interface { fftypes.Named - SubmitPinnedBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error + SubmitPinnedBatch(ctx context.Context, batch *fftypes.BatchPersisted, contexts []*fftypes.Bytes32) error // From operations.OperationHandler PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) @@ -67,12 +67,12 @@ func (bp *batchPinSubmitter) Name() string { return "BatchPinSubmitter" } -func (bp *batchPinSubmitter) SubmitPinnedBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error { +func (bp *batchPinSubmitter) SubmitPinnedBatch(ctx context.Context, batch *fftypes.BatchPersisted, contexts []*fftypes.Bytes32) error { // The pending blockchain transaction op := fftypes.NewOperation( bp.blockchain, batch.Namespace, - batch.Payload.TX.ID, + batch.TX.ID, fftypes.OpTypeBlockchainBatchPin) addBatchPinInputs(op, batch.ID, contexts) if err := bp.operations.AddOrReuseOperation(ctx, op); err != nil { diff --git a/internal/batchpin/batchpin_test.go b/internal/batchpin/batchpin_test.go index ef3402ecde..e4e62c2ed8 100644 --- a/internal/batchpin/batchpin_test.go +++ b/internal/batchpin/batchpin_test.go @@ -69,24 +69,24 @@ func TestSubmitPinnedBatchOk(t *testing.T) { mmi := bp.metrics.(*metricsmocks.Manager) mom := bp.operations.(*operationmocks.Manager) - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "id1", - Key: "0x12345", - }, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - ID: fftypes.NewUUID(), + batch := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "id1", + Key: "0x12345", }, }, + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, } contexts := []*fftypes.Bytes32{} mom.On("AddOrReuseOperation", ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { assert.Equal(t, fftypes.OpTypeBlockchainBatchPin, op.Type) assert.Equal(t, "ut", op.Plugin) - assert.Equal(t, *batch.Payload.TX.ID, *op.Transaction) + assert.Equal(t, *batch.TX.ID, *op.Transaction) return true })).Return(nil) mmi.On("IsMetricsEnabled").Return(false) @@ -111,24 +111,24 @@ func TestSubmitPinnedBatchWithMetricsOk(t *testing.T) { mmi := bp.metrics.(*metricsmocks.Manager) mom := bp.operations.(*operationmocks.Manager) - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "id1", - Key: "0x12345", - }, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - ID: fftypes.NewUUID(), + batch := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "id1", + Key: "0x12345", }, }, + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, } contexts := []*fftypes.Bytes32{} mom.On("AddOrReuseOperation", ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { assert.Equal(t, fftypes.OpTypeBlockchainBatchPin, op.Type) assert.Equal(t, "ut", op.Plugin) - assert.Equal(t, *batch.Payload.TX.ID, *op.Transaction) + assert.Equal(t, *batch.TX.ID, *op.Transaction) return true })).Return(nil) mmi.On("IsMetricsEnabled").Return(true) @@ -152,17 +152,17 @@ func TestSubmitPinnedBatchOpFail(t *testing.T) { mom := bp.operations.(*operationmocks.Manager) mmi := bp.metrics.(*metricsmocks.Manager) - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "id1", - Key: "0x12345", - }, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - ID: fftypes.NewUUID(), + batch := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "id1", + Key: "0x12345", }, }, + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, } contexts := []*fftypes.Bytes32{} diff --git a/internal/batchpin/operations.go b/internal/batchpin/operations.go index 80b481b868..3196827312 100644 --- a/internal/batchpin/operations.go +++ b/internal/batchpin/operations.go @@ -25,8 +25,8 @@ import ( ) type batchPinData struct { - Batch *fftypes.Batch `json:"batch"` - Contexts []*fftypes.Bytes32 `json:"contexts"` + Batch *fftypes.BatchPersisted `json:"batch"` + Contexts []*fftypes.Bytes32 `json:"contexts"` } func addBatchPinInputs(op *fftypes.Operation, batchID *fftypes.UUID, contexts []*fftypes.Bytes32) { @@ -82,7 +82,7 @@ func (bp *batchPinSubmitter) RunOperation(ctx context.Context, op *fftypes.Prepa batch := data.Batch return false, bp.blockchain.SubmitBatchPin(ctx, op.ID, nil /* TODO: ledger selection */, batch.Key, &blockchain.BatchPin{ Namespace: batch.Namespace, - TransactionID: batch.Payload.TX.ID, + TransactionID: batch.TX.ID, BatchID: batch.ID, BatchHash: batch.Hash, BatchPayloadRef: batch.PayloadRef, @@ -94,7 +94,7 @@ func (bp *batchPinSubmitter) RunOperation(ctx context.Context, op *fftypes.Prepa } } -func opBatchPin(op *fftypes.Operation, batch *fftypes.Batch, contexts []*fftypes.Bytes32) *fftypes.PreparedOperation { +func opBatchPin(op *fftypes.Operation, batch *fftypes.BatchPersisted, contexts []*fftypes.Bytes32) *fftypes.PreparedOperation { return &fftypes.PreparedOperation{ ID: op.ID, Type: op.Type, diff --git a/internal/batchpin/operations_test.go b/internal/batchpin/operations_test.go index 4ffa84ab6c..28a8d99eb6 100644 --- a/internal/batchpin/operations_test.go +++ b/internal/batchpin/operations_test.go @@ -33,10 +33,12 @@ func TestPrepareAndRunBatchPin(t *testing.T) { Type: fftypes.OpTypeBlockchainBatchPin, ID: fftypes.NewUUID(), } - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Key: "0x123", + batch := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Key: "0x123", + }, }, } contexts := []*fftypes.Bytes32{ diff --git a/internal/broadcast/manager.go b/internal/broadcast/manager.go index e4f9a17d3a..0cebac57fc 100644 --- a/internal/broadcast/manager.go +++ b/internal/broadcast/manager.go @@ -95,10 +95,11 @@ func NewBroadcastManager(ctx context.Context, di database.Plugin, im identity.Ma } bo := batch.DispatcherOptions{ - BatchMaxSize: config.GetUint(config.BroadcastBatchSize), - BatchMaxBytes: bm.maxBatchPayloadLength, - BatchTimeout: config.GetDuration(config.BroadcastBatchTimeout), - DisposeTimeout: config.GetDuration(config.BroadcastBatchAgentTimeout), + RequiresSharedDataPayloadRefs: true, + BatchMaxSize: config.GetUint(config.BroadcastBatchSize), + BatchMaxBytes: bm.maxBatchPayloadLength, + BatchTimeout: config.GetDuration(config.BroadcastBatchTimeout), + DisposeTimeout: config.GetDuration(config.BroadcastBatchAgentTimeout), } ba.RegisterDispatcher(broadcastDispatcherName, diff --git a/internal/broadcast/manager_test.go b/internal/broadcast/manager_test.go index ef47cc8671..edffd32dc7 100644 --- a/internal/broadcast/manager_test.go +++ b/internal/broadcast/manager_test.go @@ -183,7 +183,9 @@ func TestDispatchBatchSubmitBatchPinSucceed(t *testing.T) { defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, } mdi := bm.database.(*databasemocks.Plugin) @@ -208,7 +210,11 @@ func TestDispatchBatchSubmitBroadcastFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - batch := &fftypes.Batch{SignerRef: fftypes.SignerRef{Author: "wrong", Key: "wrong"}} + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + SignerRef: fftypes.SignerRef{Author: "wrong", Key: "wrong"}, + }, + } mdi := bm.database.(*databasemocks.Plugin) mbp := bm.batchpin.(*batchpinmocks.Submitter) diff --git a/internal/broadcast/operations.go b/internal/broadcast/operations.go index 372637ee9b..74c7fe2b90 100644 --- a/internal/broadcast/operations.go +++ b/internal/broadcast/operations.go @@ -47,12 +47,16 @@ func (bm *broadcastManager) PrepareOperation(ctx context.Context, op *fftypes.Op if err != nil { return nil, err } - batch, err := bm.database.GetBatchByID(ctx, id) + bp, err := bm.database.GetBatchByID(ctx, id) if err != nil { return nil, err - } else if batch == nil { + } else if bp == nil { return nil, i18n.NewError(ctx, i18n.Msg404NotFound) } + batch, err := bm.data.HydrateBatch(ctx, bp) + if err != nil { + return nil, err + } return opBatchBroadcast(op, batch), nil default: diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index 7d2ac40b2a..c9101b0cc2 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -18,6 +18,7 @@ package data import ( "context" + "encoding/json" "fmt" "io" "time" @@ -44,6 +45,7 @@ type Manager interface { UploadBLOB(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) CopyBlobPStoDX(ctx context.Context, data *fftypes.Data) (blob *fftypes.Blob, err error) DownloadBLOB(ctx context.Context, ns, dataID string) (*fftypes.Blob, io.ReadCloser, error) + HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) } type dataManager struct { @@ -345,3 +347,42 @@ func (dm *dataManager) resolveInlineData(ctx context.Context, ns string, inData } return refs, dataToPublish, nil } + +// HydrateBatch fetches the full messages for a persited batch, ready for transmission +func (dm *dataManager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) { + + var manifest fftypes.BatchManifest + err := json.Unmarshal([]byte(persistedBatch.Manifest), &manifest) + if err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, fmt.Sprintf("batch %s manifest", persistedBatch.ID)) + } + + batch := &fftypes.Batch{ + BatchHeader: persistedBatch.BatchHeader, + PayloadRef: persistedBatch.PayloadRef, + Payload: fftypes.BatchPayload{ + TX: persistedBatch.TX, + Messages: make([]*fftypes.Message, len(manifest.Messages)), + Data: make([]*fftypes.Data, len(manifest.Data)), + }, + } + + for i, mr := range manifest.Messages { + m, err := dm.database.GetMessageByID(ctx, mr.ID) + if err != nil || m == nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgFailedToRetrieve, "message", mr.ID) + } + // BatchMessage removes any fields that could change after the batch was first assembled on the sender + batch.Payload.Messages[i] = m.BatchMessage() + } + for i, dr := range manifest.Data { + d, err := dm.database.GetDataByID(ctx, dr.ID, true) + if err != nil || d == nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgFailedToRetrieve, "data", dr.ID) + } + // BatchData removes any fields that could change after the batch was first assembled on the sender + batch.Payload.Data[i] = d.BatchData(batch.Type) + } + + return batch, nil +} diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index 6a88691775..e34b68cd90 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -675,3 +675,134 @@ func TestVerifyNamespaceExistsOk(t *testing.T) { err := dm.VerifyNamespaceExists(ctx, "ns1") assert.NoError(t, err) } + +func TestHydrateBatchOK(t *testing.T) { + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + batchID := fftypes.NewUUID() + msgID := fftypes.NewUUID() + msgHash := fftypes.NewRandB32() + dataID := fftypes.NewUUID() + dataHash := fftypes.NewRandB32() + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Type: fftypes.MessageTypeBroadcast, + ID: batchID, + Namespace: "ns1", + }, + Manifest: fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, + batchID, msgID, msgHash, dataID, dataHash, + ), + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, + } + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("GetMessageByID", ctx, msgID).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: msgID}, + Hash: msgHash, + Confirmed: fftypes.Now(), + }, nil) + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ + ID: dataID, + Hash: dataHash, + Created: fftypes.Now(), + }, nil) + + batch, err := dm.HydrateBatch(ctx, bp) + assert.NoError(t, err) + assert.Equal(t, bp.BatchHeader, batch.BatchHeader) + assert.Equal(t, bp.TX, batch.Payload.TX) + assert.Equal(t, msgID, batch.Payload.Messages[0].Header.ID) + assert.Equal(t, msgHash, batch.Payload.Messages[0].Hash) + assert.Nil(t, batch.Payload.Messages[0].Confirmed) + assert.Equal(t, dataID, batch.Payload.Data[0].ID) + assert.Equal(t, dataHash, batch.Payload.Data[0].Hash) + assert.Equal(t, dataHash, batch.Payload.Data[0].Hash) + assert.Nil(t, batch.Payload.Data[0].Created) + + mdi.AssertExpectations(t) +} + +func TestHydrateBatchDataFail(t *testing.T) { + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + batchID := fftypes.NewUUID() + msgID := fftypes.NewUUID() + msgHash := fftypes.NewRandB32() + dataID := fftypes.NewUUID() + dataHash := fftypes.NewRandB32() + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Type: fftypes.MessageTypeBroadcast, + ID: batchID, + Namespace: "ns1", + }, + Manifest: fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, + batchID, msgID, msgHash, dataID, dataHash, + ), + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, + } + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("GetMessageByID", ctx, msgID).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: msgID}, + Hash: msgHash, + Confirmed: fftypes.Now(), + }, nil) + mdi.On("GetDataByID", ctx, dataID, true).Return(nil, fmt.Errorf("pop")) + + _, err := dm.HydrateBatch(ctx, bp) + assert.Regexp(t, "FF10372.*pop", err) + + mdi.AssertExpectations(t) +} + +func TestHydrateBatchMsgNotFound(t *testing.T) { + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + batchID := fftypes.NewUUID() + msgID := fftypes.NewUUID() + msgHash := fftypes.NewRandB32() + dataID := fftypes.NewUUID() + dataHash := fftypes.NewRandB32() + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Type: fftypes.MessageTypeBroadcast, + ID: batchID, + Namespace: "ns1", + }, + Manifest: fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, + batchID, msgID, msgHash, dataID, dataHash, + ), + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, + } + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("GetMessageByID", ctx, msgID).Return(nil, nil) + + _, err := dm.HydrateBatch(ctx, bp) + assert.Regexp(t, "FF10372", err) + + mdi.AssertExpectations(t) +} + +func TestHydrateBatchMsgBadManifest(t *testing.T) { + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + bp := &fftypes.BatchPersisted{ + Manifest: `!json`, + } + + _, err := dm.HydrateBatch(ctx, bp) + assert.Regexp(t, "FF10151", err) +} diff --git a/internal/i18n/en_translations.go b/internal/i18n/en_translations.go index de93364dde..3d3b638484 100644 --- a/internal/i18n/en_translations.go +++ b/internal/i18n/en_translations.go @@ -288,4 +288,5 @@ var ( MsgTokenApprovalFailed = ffm("FF10369", "Token approval with ID '%s' failed. Please check the FireFly logs for more information") MsgEventNotFound = ffm("FF10370", "Event with name '%s' not found", 400) MsgOperationNotSupported = ffm("FF10371", "Operation not supported", 400) + MsgFailedToRetrieve = ffm("FF10372", "Failed to retrieve %s %s", 500) ) diff --git a/mocks/batchpinmocks/submitter.go b/mocks/batchpinmocks/submitter.go index 612c3ef257..cff0bd0f08 100644 --- a/mocks/batchpinmocks/submitter.go +++ b/mocks/batchpinmocks/submitter.go @@ -73,11 +73,11 @@ func (_m *Submitter) RunOperation(ctx context.Context, op *fftypes.PreparedOpera } // SubmitPinnedBatch provides a mock function with given fields: ctx, batch, contexts -func (_m *Submitter) SubmitPinnedBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error { +func (_m *Submitter) SubmitPinnedBatch(ctx context.Context, batch *fftypes.BatchPersisted, contexts []*fftypes.Bytes32) error { ret := _m.Called(ctx, batch, contexts) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Batch, []*fftypes.Bytes32) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.BatchPersisted, []*fftypes.Bytes32) error); ok { r0 = rf(ctx, batch, contexts) } else { r0 = ret.Error(0) diff --git a/mocks/databasemocks/plugin.go b/mocks/databasemocks/plugin.go index 0685d7f639..b7b75d9454 100644 --- a/mocks/databasemocks/plugin.go +++ b/mocks/databasemocks/plugin.go @@ -162,15 +162,15 @@ func (_m *Plugin) DeleteSubscriptionByID(ctx context.Context, id *fftypes.UUID) } // GetBatchByID provides a mock function with given fields: ctx, id -func (_m *Plugin) GetBatchByID(ctx context.Context, id *fftypes.UUID) (*fftypes.Batch, error) { +func (_m *Plugin) GetBatchByID(ctx context.Context, id *fftypes.UUID) (*fftypes.BatchPersisted, error) { ret := _m.Called(ctx, id) - var r0 *fftypes.Batch - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) *fftypes.Batch); ok { + var r0 *fftypes.BatchPersisted + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) *fftypes.BatchPersisted); ok { r0 = rf(ctx, id) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*fftypes.Batch) + r0 = ret.Get(0).(*fftypes.BatchPersisted) } } @@ -185,15 +185,15 @@ func (_m *Plugin) GetBatchByID(ctx context.Context, id *fftypes.UUID) (*fftypes. } // GetBatches provides a mock function with given fields: ctx, filter -func (_m *Plugin) GetBatches(ctx context.Context, filter database.Filter) ([]*fftypes.Batch, *database.FilterResult, error) { +func (_m *Plugin) GetBatches(ctx context.Context, filter database.Filter) ([]*fftypes.BatchPersisted, *database.FilterResult, error) { ret := _m.Called(ctx, filter) - var r0 []*fftypes.Batch - if rf, ok := ret.Get(0).(func(context.Context, database.Filter) []*fftypes.Batch); ok { + var r0 []*fftypes.BatchPersisted + if rf, ok := ret.Get(0).(func(context.Context, database.Filter) []*fftypes.BatchPersisted); ok { r0 = rf(ctx, filter) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Batch) + r0 = ret.Get(0).([]*fftypes.BatchPersisted) } } @@ -2558,11 +2558,11 @@ func (_m *Plugin) UpdateVerifier(ctx context.Context, hash *fftypes.Bytes32, upd } // UpsertBatch provides a mock function with given fields: ctx, data -func (_m *Plugin) UpsertBatch(ctx context.Context, data *fftypes.Batch) error { +func (_m *Plugin) UpsertBatch(ctx context.Context, data *fftypes.BatchPersisted) error { ret := _m.Called(ctx, data) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Batch) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.BatchPersisted) error); ok { r0 = rf(ctx, data) } else { r0 = ret.Error(0) diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go index b932143e37..954dbeff99 100644 --- a/mocks/datamocks/manager.go +++ b/mocks/datamocks/manager.go @@ -116,6 +116,29 @@ func (_m *Manager) GetMessageData(ctx context.Context, msg *fftypes.Message, wit return r0, r1, r2 } +// HydrateBatch provides a mock function with given fields: ctx, persistedBatch +func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) { + ret := _m.Called(ctx, persistedBatch) + + var r0 *fftypes.Batch + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.BatchPersisted) *fftypes.Batch); ok { + r0 = rf(ctx, persistedBatch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.Batch) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.BatchPersisted) error); ok { + r1 = rf(ctx, persistedBatch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ResolveInlineDataBroadcast provides a mock function with given fields: ctx, ns, inData func (_m *Manager) ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, []*fftypes.DataAndBlob, error) { ret := _m.Called(ctx, ns, inData) diff --git a/pkg/fftypes/batch.go b/pkg/fftypes/batch.go index fe5e7559f1..d788cd3723 100644 --- a/pkg/fftypes/batch.go +++ b/pkg/fftypes/batch.go @@ -23,10 +23,9 @@ import ( // BatchHeader is the common fields between the serialized batch, and the batch manifest type BatchHeader struct { - ID *UUID `json:"id"` - Namespace string `json:"namespace"` - Type MessageType `json:"type"` - Node *UUID `json:"node,omitempty"` + ID *UUID `json:"id"` + Namespace string `json:"namespace"` + Node *UUID `json:"node,omitempty"` SignerRef Group *Bytes32 `jdon:"group,omitempty"` Hash *Bytes32 `json:"hash"` @@ -46,7 +45,6 @@ type Batch struct { BatchHeader Payload BatchPayload `json:"payload"` PayloadRef string `json:"payloadRef,omitempty"` - Blobs []*Bytes32 `json:"blobs,omitempty"` } // BatchPersisted is the structure written to the database From 2b903a44982735f957b154be0b20411db30e5a84 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 4 Mar 2022 14:31:42 -0500 Subject: [PATCH 05/11] Broadcast interface with batch.DispatchState Signed-off-by: Peter Broadhurst --- internal/batch/batch_manager.go | 2 +- internal/batch/batch_manager_test.go | 30 +++---- internal/batch/batch_processor.go | 99 +++++++++++------------ internal/batch/batch_processor_test.go | 104 ++++++++++++------------- internal/broadcast/manager.go | 14 ++-- internal/broadcast/manager_test.go | 47 +++++++---- internal/broadcast/operations.go | 2 +- internal/broadcast/operations_test.go | 55 +++++++++++-- internal/data/data_manager.go | 6 +- internal/data/data_manager_test.go | 11 +-- internal/i18n/en_translations.go | 1 + mocks/datamocks/manager.go | 14 ++-- pkg/fftypes/batch.go | 24 +++--- pkg/fftypes/batch_test.go | 2 - pkg/fftypes/data.go | 8 +- 15 files changed, 236 insertions(+), 183 deletions(-) diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index b32a4ad178..d3ab1dbd35 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -96,7 +96,7 @@ type batchManager struct { startupOffsetRetryAttempts int } -type DispatchHandler func(context.Context, *BatchFlushState) error +type DispatchHandler func(context.Context, *DispatchState) error type DispatcherOptions struct { RequiresSharedDataPayloadRefs bool diff --git a/internal/batch/batch_manager_test.go b/internal/batch/batch_manager_test.go index 6c63b18134..c38fd5ca32 100644 --- a/internal/batch/batch_manager_test.go +++ b/internal/batch/batch_manager_test.go @@ -42,20 +42,20 @@ func TestE2EDispatchBroadcast(t *testing.T) { mni := &sysmessagingmocks.LocalNodeInfo{} mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) readyForDispatch := make(chan bool) - waitForDispatch := make(chan *fftypes.Batch) - handler := func(ctx context.Context, bp *fftypes.BatchPersisted, b *fftypes.Batch, s []*fftypes.Bytes32) error { + waitForDispatch := make(chan *DispatchState) + handler := func(ctx context.Context, state *DispatchState) error { _, ok := <-readyForDispatch if !ok { return nil } - assert.Len(t, s, 2) + assert.Len(t, state.Pins, 2) h := sha256.New() nonceBytes, _ := hex.DecodeString( "746f70696331", /*| topic1 | */ ) // little endian 12345 in 8 byte hex h.Write(nonceBytes) - assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), s[0].String()) + assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), state.Pins[0].String()) h = sha256.New() nonceBytes, _ = hex.DecodeString( @@ -63,9 +63,9 @@ func TestE2EDispatchBroadcast(t *testing.T) { /*| topic2 | */ ) // little endian 12345 in 8 byte hex h.Write(nonceBytes) - assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), s[1].String()) + assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), state.Pins[1].String()) - waitForDispatch <- b + waitForDispatch <- state return nil } ctx, cancel := context.WithCancel(context.Background()) @@ -155,15 +155,15 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { mni := &sysmessagingmocks.LocalNodeInfo{} mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) readyForDispatch := make(chan bool) - waitForDispatch := make(chan *fftypes.Batch) + waitForDispatch := make(chan *DispatchState) var groupID fftypes.Bytes32 _ = groupID.UnmarshalText([]byte("44dc0861e69d9bab17dd5e90a8898c2ea156ad04e5fabf83119cc010486e6c1b")) - handler := func(ctx context.Context, bp *fftypes.BatchPersisted, b *fftypes.Batch, s []*fftypes.Bytes32) error { + handler := func(ctx context.Context, state *DispatchState) error { _, ok := <-readyForDispatch if !ok { return nil } - assert.Len(t, s, 2) + assert.Len(t, state.Pins, 2) h := sha256.New() nonceBytes, _ := hex.DecodeString( "746f70696331" + "44dc0861e69d9bab17dd5e90a8898c2ea156ad04e5fabf83119cc010486e6c1b" + "6469643a66697265666c793a6f72672f61626364" + "0000000000003039", @@ -171,7 +171,7 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { /*| context | | sender + nonce */ ) // little endian 12345 in 8 byte hex h.Write(nonceBytes) - assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), s[0].String()) + assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), state.Pins[0].String()) h = sha256.New() nonceBytes, _ = hex.DecodeString( @@ -180,8 +180,8 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { /*| context | | sender + nonce */ ) // little endian 12345 in 8 byte hex h.Write(nonceBytes) - assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), s[1].String()) - waitForDispatch <- b + assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), state.Pins[1].String()) + waitForDispatch <- state return nil } ctx, cancel := context.WithCancel(context.Background()) @@ -353,7 +353,7 @@ func TestMessageSequencerUpdateMessagesFail(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) bm, _ := NewBatchManager(ctx, mni, mdi, mdm) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, - func(c context.Context, bp *fftypes.BatchPersisted, b *fftypes.Batch, s []*fftypes.Bytes32) error { + func(c context.Context, state *DispatchState) error { return nil }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}, @@ -406,7 +406,7 @@ func TestMessageSequencerDispatchFail(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) bm, _ := NewBatchManager(ctx, mni, mdi, mdm) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, - func(c context.Context, bp *fftypes.BatchPersisted, b *fftypes.Batch, s []*fftypes.Bytes32) error { + func(c context.Context, state *DispatchState) error { cancelCtx() return fmt.Errorf("fizzle") }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}, @@ -444,7 +444,7 @@ func TestMessageSequencerUpdateBatchFail(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) bm, _ := NewBatchManager(ctx, mni, mdi, mdm) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, - func(c context.Context, bp *fftypes.BatchPersisted, b *fftypes.Batch, s []*fftypes.Bytes32) error { + func(c context.Context, state *DispatchState) error { return nil }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}, diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index b13df5c633..0525e5978b 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -90,11 +90,11 @@ type batchProcessor struct { conf *batchProcessorConf } -type BatchFlushState struct { - manifest fftypes.BatchManifest - persisted fftypes.BatchPersisted - payload fftypes.BatchPayload - pins []*fftypes.Bytes32 +type DispatchState struct { + Manifest *fftypes.BatchManifest + Persisted fftypes.BatchPersisted + Payload fftypes.BatchPayload + Pins []*fftypes.Bytes32 } const batchSizeEstimateBase = int64(512) @@ -242,7 +242,7 @@ func (bp *batchProcessor) startFlush(overflow bool) (id *fftypes.UUID, flushAsse return id, flushAssembly, byteSize } -func (bp *batchProcessor) endFlush(batch *fftypes.Batch, byteSize int64) { +func (bp *batchProcessor) endFlush(state *DispatchState, byteSize int64) { bp.statusMux.Lock() defer bp.statusMux.Unlock() fs := &bp.flushStatus @@ -258,10 +258,10 @@ func (bp *batchProcessor) endFlush(batch *fftypes.Batch, byteSize int64) { fs.totalBytesFlushed += byteSize fs.AverageBatchBytes = (fs.totalBytesFlushed / fs.TotalBatches) - fs.totalMessagesFlushed += int64(len(batch.Payload.Messages)) + fs.totalMessagesFlushed += int64(len(state.Payload.Messages)) fs.AverageBatchMessages = math.Round((float64(fs.totalMessagesFlushed)/float64(fs.TotalBatches))*100) / 100 - fs.totalDataFlushed += int64(len(batch.Payload.Data)) + fs.totalDataFlushed += int64(len(state.Payload.Data)) fs.AverageBatchData = math.Round((float64(fs.totalDataFlushed)/float64(fs.TotalBatches))*100) / 100 } @@ -357,17 +357,25 @@ func (bp *batchProcessor) flush(overflow bool) error { id, flushWork, byteSize := bp.startFlush(overflow) state := bp.initFlushState(id, flushWork) + // Sealing phase: assigns persisted pins to messages, and finalizes the manifest err := bp.sealBatch(state) if err != nil { return err } - err = bp.dispatchBatch(state, pins) + // Dispatch phase: the heavy lifting work - calling plugins to do the hard work of the batch. + // Must manage its own database updates if it performs them, and any that result in updates + // to the payload must be reflected back on the payload objects. + // For example updates to the Blob.Public of a Data entry must be written do the DB, + // and updated in Payload.Data[] array. + err = bp.dispatchBatch(state) if err != nil { return err } - err = bp.markMessagesDispatched(state) + // Dispatched phase: Writes back the changes to the DB, so that these messages will not be + // are all tagged as part of this batch, and won't be included in any future batches. + err = bp.markPayloadDispatched(state) if err != nil { return err } @@ -376,10 +384,10 @@ func (bp *batchProcessor) flush(overflow bool) error { return nil } -func (bp *batchProcessor) initFlushState(id *fftypes.UUID, flushWork []*batchWork) *BatchFlushState { +func (bp *batchProcessor) initFlushState(id *fftypes.UUID, flushWork []*batchWork) *DispatchState { log.L(bp.ctx).Debugf("Flushing batch %s", id) - state := &BatchFlushState{ - metadata: fftypes.BatchPersisted{ + state := &DispatchState{ + Persisted: fftypes.BatchPersisted{ BatchHeader: fftypes.BatchHeader{ ID: id, Namespace: bp.conf.namespace, @@ -389,28 +397,16 @@ func (bp *batchProcessor) initFlushState(id *fftypes.UUID, flushWork []*batchWor }, Created: fftypes.Now(), }, - manifest: fftypes.BatchManifest{ - ID: id, - }, } for _, w := range flushWork { if w.msg != nil { - w.msg.BatchID = state.metadata.ID - w.msg.State = "" // state should always be set by receivers when loading the batch - state.payload.Messages = append(state.payload.Messages, w.msg.BatchMessage()) - state.manifest.Messages = append(state.manifest.Messages, fftypes.MessageRef{ - ID: w.msg.Header.ID, - Hash: w.msg.Hash, - }) + state.Payload.Messages = append(state.Payload.Messages, w.msg.BatchMessage()) } for _, d := range w.data { - state.payload.Data = append(state.payload.Data, d.BatchData(bp.conf.RequiresSharedDataPayloadRefs)) - state.manifest.Data = append(state.manifest.Data, fftypes.DataRef{ - ID: d.ID, - Hash: d.Hash, - }) + state.Payload.Data = append(state.Payload.Data, d.BatchData(bp.conf.RequiresSharedDataPayloadRefs)) } } + state.Manifest = state.Payload.Manifest(id) return state } @@ -456,11 +452,11 @@ func (bp *batchProcessor) maskContext(ctx context.Context, msg *fftypes.Message, return fftypes.HashResult(hashBuilder), err } -func (bp *batchProcessor) maskContexts(ctx context.Context, batch *fftypes.Batch) ([]*fftypes.Bytes32, error) { +func (bp *batchProcessor) maskContexts(ctx context.Context, payload *fftypes.BatchPayload) ([]*fftypes.Bytes32, error) { // Calculate the sequence hashes pinsAssigned := false - contextsOrPins := make([]*fftypes.Bytes32, 0, len(batch.Payload.Messages)) - for _, msg := range batch.Payload.Messages { + contextsOrPins := make([]*fftypes.Bytes32, 0, len(payload.Messages)) + for _, msg := range payload.Messages { if len(msg.Pins) > 0 { // We have already allocated pins to this message, we cannot re-allocate. log.L(ctx).Debugf("Message %s already has %d pins allocated", msg.Header.ID, len(msg.Pins)) @@ -493,38 +489,37 @@ func (bp *batchProcessor) maskContexts(ctx context.Context, batch *fftypes.Batch return contextsOrPins, nil } -func (bp *batchProcessor) sealBatch(state *BatchFlushState) (err error) { +func (bp *batchProcessor) sealBatch(state *DispatchState) (err error) { err = bp.retry.Do(bp.ctx, "batch persist", func(attempt int) (retry bool, err error) { return true, bp.database.RunAsGroup(bp.ctx, func(ctx context.Context) (err error) { if bp.conf.txType == fftypes.TransactionTypeBatchPin { // Generate a new Transaction, which will be used to record status of the associated transaction as it happens - if state.pins, err = bp.maskContexts(ctx, state); err != nil { + if state.Pins, err = bp.maskContexts(ctx, &state.Payload); err != nil { return err } } - state.metadata.TX.Type = bp.conf.txType - if state.metadata.TX.ID, err = bp.txHelper.SubmitNewTransaction(ctx, state.metadata.Namespace, bp.conf.txType); err != nil { + state.Persisted.TX.Type = bp.conf.txType + if state.Persisted.TX.ID, err = bp.txHelper.SubmitNewTransaction(ctx, state.Persisted.Namespace, bp.conf.txType); err != nil { return err } // The hash of the batch, is the hash of the manifest to minimize the compute cost. // Note in v0.13 and before, it was the hash of the payload - so the inbound route has a fallback to accepting the full payload hash - state.metadata.Manifest = - - mani + state.Persisted.Manifest = state.Manifest.String() + state.Persisted.Hash = state.Manifest.Hash() - log.L(ctx).Debugf("Batch %s sealed. Hash=%s", batch.ID, batch.Hash) + log.L(ctx).Debugf("Batch %s sealed. Hash=%s", state.Persisted.ID, state.Persisted.Hash) // At this point the manifest of the batch is finalized. We write it to the database - return bp.database.UpsertBatch(ctx, batchPersisted) + return bp.database.UpsertBatch(ctx, &state.Persisted) }) }) return err } -func (bp *batchProcessor) dispatchBatch(state *BatchFlushState) error { +func (bp *batchProcessor) dispatchBatch(state *DispatchState) error { // Call the dispatcher to do the heavy lifting - will only exit if we're closed return operations.RunWithOperationCache(bp.ctx, func(ctx context.Context) error { return bp.retry.Do(ctx, "batch dispatch", func(attempt int) (retry bool, err error) { @@ -533,12 +528,12 @@ func (bp *batchProcessor) dispatchBatch(state *BatchFlushState) error { }) } -func (bp *batchProcessor) markMessagesDispatched(batch *fftypes.Batch) error { +func (bp *batchProcessor) markPayloadDispatched(state *DispatchState) error { return bp.retry.Do(bp.ctx, "mark dispatched messages", func(attempt int) (retry bool, err error) { return true, bp.database.RunAsGroup(bp.ctx, func(ctx context.Context) (err error) { // Update all the messages in the batch with the batch ID - msgIDs := make([]driver.Value, len(batch.Payload.Messages)) - for i, msg := range batch.Payload.Messages { + msgIDs := make([]driver.Value, len(state.Payload.Messages)) + for i, msg := range state.Payload.Messages { msgIDs[i] = msg.Header.ID } fb := database.MessageQueryFactory.NewFilter(ctx) @@ -547,28 +542,28 @@ func (bp *batchProcessor) markMessagesDispatched(batch *fftypes.Batch) error { fb.Eq("state", fftypes.MessageStateReady), // In the outside chance the next state transition happens first (which supersedes this) ) - var update database.Update + var allMsgsUpdate database.Update if bp.conf.txType == fftypes.TransactionTypeBatchPin { // Sent state waiting for confirm - update = database.MessageQueryFactory.NewUpdate(ctx). - Set("batch", batch.ID). // Mark the batch they are in + allMsgsUpdate = database.MessageQueryFactory.NewUpdate(ctx). + Set("batch", state.Persisted.ID). // Mark the batch they are in Set("state", fftypes.MessageStateSent) // Set them sent, so they won't be picked up and re-sent after restart/rewind } else { // Immediate confirmation if no batch pinning - update = database.MessageQueryFactory.NewUpdate(ctx). - Set("batch", batch.ID). + allMsgsUpdate = database.MessageQueryFactory.NewUpdate(ctx). + Set("batch", state.Persisted.ID). Set("state", fftypes.MessageStateConfirmed). Set("confirmed", fftypes.Now()) } - if err = bp.database.UpdateMessages(ctx, filter, update); err != nil { + if err = bp.database.UpdateMessages(ctx, filter, allMsgsUpdate); err != nil { return err } if bp.conf.txType == fftypes.TransactionTypeUnpinned { - for _, msg := range batch.Payload.Messages { + for _, msg := range state.Payload.Messages { // Emit a confirmation event locally immediately - event := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, batch.Namespace, msg.Header.ID, batch.Payload.TX.ID) + event := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, state.Persisted.Namespace, msg.Header.ID, state.Persisted.TX.ID) event.Correlator = msg.Header.CID if err := bp.database.InsertEvent(ctx, event); err != nil { return err diff --git a/internal/batch/batch_processor_test.go b/internal/batch/batch_processor_test.go index 6f7a6a0d2c..7c69fc61e1 100644 --- a/internal/batch/batch_processor_test.go +++ b/internal/batch/batch_processor_test.go @@ -66,9 +66,9 @@ func TestUnfilledBatch(t *testing.T) { log.SetLevel("debug") config.Reset() - dispatched := make(chan *fftypes.Batch) - mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - dispatched <- b + dispatched := make(chan *DispatchState) + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { + dispatched <- state return nil }) @@ -104,9 +104,9 @@ func TestBatchSizeOverflow(t *testing.T) { log.SetLevel("debug") config.Reset() - dispatched := make(chan *fftypes.Batch) - mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - dispatched <- b + dispatched := make(chan *DispatchState) + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { + dispatched <- state return nil }) bp.conf.BatchMaxBytes = batchSizeEstimateBase + (&fftypes.Message{}).EstimateSize(false) + 100 @@ -119,11 +119,11 @@ func TestBatchSizeOverflow(t *testing.T) { mth.On("SubmitNewTransaction", mock.Anything, "ns1", fftypes.TransactionTypeBatchPin).Return(fftypes.NewUUID(), nil) // Dispatch the work + msgIDs := []*fftypes.UUID{fftypes.NewUUID(), fftypes.NewUUID()} go func() { for i := 0; i < 2; i++ { - msgid := fftypes.NewUUID() bp.newWork <- &batchWork{ - msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgid}, Sequence: int64(1000 + i)}, + msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgIDs[i]}, Sequence: int64(1000 + i)}, } } }() @@ -134,26 +134,26 @@ func TestBatchSizeOverflow(t *testing.T) { // Check we got all messages across two batches assert.Equal(t, 1, len(batch1.Payload.Messages)) - assert.Equal(t, int64(1000), batch1.Payload.Messages[0].Sequence) + assert.Equal(t, msgIDs[0], batch1.Payload.Messages[0].Header.ID) assert.Equal(t, 1, len(batch2.Payload.Messages)) - assert.Equal(t, int64(1001), batch2.Payload.Messages[0].Sequence) + assert.Equal(t, msgIDs[1], batch2.Payload.Messages[0].Header.ID) bp.cancelCtx() <-bp.done } func TestCloseToUnblockDispatch(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return fmt.Errorf("pop") }) bp.cancelCtx() - bp.dispatchBatch(&fftypes.Batch{}, []*fftypes.Bytes32{}) + bp.dispatchBatch(&DispatchState{}) <-bp.done } func TestCloseToUnblockUpsertBatch(t *testing.T) { - mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) bp.retry.MaximumDelay = 1 * time.Microsecond @@ -187,7 +187,7 @@ func TestCloseToUnblockUpsertBatch(t *testing.T) { } func TestCalcPinsFail(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) bp.cancelCtx() @@ -196,8 +196,12 @@ func TestCalcPinsFail(t *testing.T) { mockRunAsGroupPassthrough(mdi) gid := fftypes.NewRandB32() - _, err := bp.persistBatch(&fftypes.Batch{ - Group: gid, + err := bp.sealBatch(&DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Group: gid, + }, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ {Header: fftypes.MessageHeader{ @@ -215,7 +219,7 @@ func TestCalcPinsFail(t *testing.T) { } func TestAddWorkInRecentlyFlushed(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) bp.flushedSequences = []int64{100, 500, 400, 900, 200, 700} @@ -229,7 +233,7 @@ func TestAddWorkInRecentlyFlushed(t *testing.T) { } func TestAddWorkInSortDeDup(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) bp.assemblyQueue = []*batchWork{ @@ -254,7 +258,7 @@ func TestAddWorkInSortDeDup(t *testing.T) { } func TestStartFlushOverflow(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) batchID := fftypes.NewUUID() @@ -283,7 +287,7 @@ func TestStartFlushOverflow(t *testing.T) { } func TestStartQuiesceNonBlocking(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) bp.startQuiesce() @@ -294,9 +298,9 @@ func TestMarkMessageDispatchedUnpinnedOK(t *testing.T) { log.SetLevel("debug") config.Reset() - dispatched := make(chan *fftypes.Batch) - mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - dispatched <- b + dispatched := make(chan *DispatchState) + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { + dispatched <- state return nil }) bp.conf.txType = fftypes.TransactionTypeUnpinned @@ -336,35 +340,33 @@ func TestMaskContextsDuplicate(t *testing.T) { log.SetLevel("debug") config.Reset() - dispatched := make(chan *fftypes.Batch) - mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - dispatched <- b + dispatched := make(chan *DispatchState) + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { + dispatched <- state return nil }) mdi.On("UpsertNonceNext", mock.Anything, mock.Anything).Return(nil).Once() mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - batch := &fftypes.Batch{ - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{ - { - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - Type: fftypes.MessageTypePrivate, - Group: fftypes.NewRandB32(), - Topics: fftypes.FFStringArray{"topic1"}, - }, + payload := &fftypes.BatchPayload{ + Messages: []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.MessageTypePrivate, + Group: fftypes.NewRandB32(), + Topics: fftypes.FFStringArray{"topic1"}, }, }, }, } - _, err := bp.maskContexts(bp.ctx, batch) + _, err := bp.maskContexts(bp.ctx, payload) assert.NoError(t, err) // 2nd time no DB ops - _, err = bp.maskContexts(bp.ctx, batch) + _, err = bp.maskContexts(bp.ctx, payload) assert.NoError(t, err) bp.cancelCtx() @@ -377,31 +379,29 @@ func TestMaskContextsUpdataMessageFail(t *testing.T) { log.SetLevel("debug") config.Reset() - dispatched := make(chan *fftypes.Batch) - mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - dispatched <- b + dispatched := make(chan *DispatchState) + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { + dispatched <- state return nil }) mdi.On("UpsertNonceNext", mock.Anything, mock.Anything).Return(nil).Once() mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")).Once() - batch := &fftypes.Batch{ - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{ - { - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - Type: fftypes.MessageTypePrivate, - Group: fftypes.NewRandB32(), - Topics: fftypes.FFStringArray{"topic1"}, - }, + payload := &fftypes.BatchPayload{ + Messages: []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.MessageTypePrivate, + Group: fftypes.NewRandB32(), + Topics: fftypes.FFStringArray{"topic1"}, }, }, }, } - _, err := bp.maskContexts(bp.ctx, batch) + _, err := bp.maskContexts(bp.ctx, payload) assert.Regexp(t, "pop", err) bp.cancelCtx() diff --git a/internal/broadcast/manager.go b/internal/broadcast/manager.go index 0cebac57fc..90e453afec 100644 --- a/internal/broadcast/manager.go +++ b/internal/broadcast/manager.go @@ -121,22 +121,26 @@ func (bm *broadcastManager) Name() string { return "BroadcastManager" } -func (bm *broadcastManager) dispatchBatch(ctx context.Context, batch *fftypes.Batch, pins []*fftypes.Bytes32) error { +func (bm *broadcastManager) dispatchBatch(ctx context.Context, state *batch.DispatchState) error { // The completed SharedStorage upload op := fftypes.NewOperation( bm.sharedstorage, - batch.Namespace, - batch.Payload.TX.ID, + state.Persisted.Namespace, + state.Persisted.TX.ID, fftypes.OpTypeSharedStorageBatchBroadcast) - addBatchBroadcastInputs(op, batch.ID) + addBatchBroadcastInputs(op, state.Persisted.ID) if err := bm.operations.AddOrReuseOperation(ctx, op); err != nil { return err } + batch := &fftypes.Batch{ + BatchHeader: state.Persisted.BatchHeader, + Payload: state.Payload, + } if err := bm.operations.RunOperation(ctx, opBatchBroadcast(op, batch)); err != nil { return err } log.L(ctx).Infof("Pinning broadcast batch %s with author=%s key=%s", batch.ID, batch.Author, batch.Key) - return bm.batchpin.SubmitPinnedBatch(ctx, batch, pins) + return bm.batchpin.SubmitPinnedBatch(ctx, &state.Persisted, state.Pins) } func (bm *broadcastManager) publishBlobs(ctx context.Context, dataToPublish []*fftypes.DataAndBlob) error { diff --git a/internal/broadcast/manager_test.go b/internal/broadcast/manager_test.go index edffd32dc7..b9fc0f63b6 100644 --- a/internal/broadcast/manager_test.go +++ b/internal/broadcast/manager_test.go @@ -24,6 +24,7 @@ import ( "io/ioutil" "testing" + "github.com/hyperledger/firefly/internal/batch" "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/mocks/batchmocks" "github.com/hyperledger/firefly/mocks/batchpinmocks" @@ -148,12 +149,14 @@ func TestDispatchBatchInsertOpFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - batch := &fftypes.Batch{} + state := &batch.DispatchState{ + Pins: []*fftypes.Bytes32{fftypes.NewRandB32()}, + } mom := bm.operations.(*operationmocks.Manager) mom.On("AddOrReuseOperation", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - err := bm.dispatchBatch(context.Background(), batch, []*fftypes.Bytes32{fftypes.NewRandB32()}) + err := bm.dispatchBatch(context.Background(), state) assert.EqualError(t, err, "pop") mom.AssertExpectations(t) @@ -163,16 +166,23 @@ func TestDispatchBatchUploadFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - batch := &fftypes.Batch{} + state := &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + }, + Pins: []*fftypes.Bytes32{fftypes.NewRandB32()}, + } mom := bm.operations.(*operationmocks.Manager) mom.On("AddOrReuseOperation", mock.Anything, mock.Anything).Return(nil) mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { data := op.Data.(batchBroadcastData) - return op.Type == fftypes.OpTypeSharedStorageBatchBroadcast && data.Batch == batch + return op.Type == fftypes.OpTypeSharedStorageBatchBroadcast && data.Batch.ID.Equals(state.Persisted.ID) })).Return(fmt.Errorf("pop")) - err := bm.dispatchBatch(context.Background(), batch, []*fftypes.Bytes32{fftypes.NewRandB32()}) + err := bm.dispatchBatch(context.Background(), state) assert.EqualError(t, err, "pop") mom.AssertExpectations(t) @@ -182,10 +192,13 @@ func TestDispatchBatchSubmitBatchPinSucceed(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - batch := &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - ID: fftypes.NewUUID(), + state := &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, }, + Pins: []*fftypes.Bytes32{fftypes.NewRandB32()}, } mdi := bm.database.(*databasemocks.Plugin) @@ -195,10 +208,10 @@ func TestDispatchBatchSubmitBatchPinSucceed(t *testing.T) { mbp.On("SubmitPinnedBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { data := op.Data.(batchBroadcastData) - return op.Type == fftypes.OpTypeSharedStorageBatchBroadcast && data.Batch == batch + return op.Type == fftypes.OpTypeSharedStorageBatchBroadcast && data.Batch.ID.Equals(state.Persisted.ID) })).Return(nil) - err := bm.dispatchBatch(context.Background(), batch, []*fftypes.Bytes32{fftypes.NewRandB32()}) + err := bm.dispatchBatch(context.Background(), state) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -210,10 +223,14 @@ func TestDispatchBatchSubmitBroadcastFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - batch := &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - SignerRef: fftypes.SignerRef{Author: "wrong", Key: "wrong"}, + state := &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{Author: "wrong", Key: "wrong"}, + }, }, + Pins: []*fftypes.Bytes32{fftypes.NewRandB32()}, } mdi := bm.database.(*databasemocks.Plugin) @@ -223,10 +240,10 @@ func TestDispatchBatchSubmitBroadcastFail(t *testing.T) { mbp.On("SubmitPinnedBatch", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { data := op.Data.(batchBroadcastData) - return op.Type == fftypes.OpTypeSharedStorageBatchBroadcast && data.Batch == batch + return op.Type == fftypes.OpTypeSharedStorageBatchBroadcast && data.Batch.ID.Equals(state.Persisted.ID) })).Return(nil) - err := bm.dispatchBatch(context.Background(), batch, []*fftypes.Bytes32{fftypes.NewRandB32()}) + err := bm.dispatchBatch(context.Background(), state) assert.EqualError(t, err, "pop") mdi.AssertExpectations(t) diff --git a/internal/broadcast/operations.go b/internal/broadcast/operations.go index 74c7fe2b90..2c395ff855 100644 --- a/internal/broadcast/operations.go +++ b/internal/broadcast/operations.go @@ -53,7 +53,7 @@ func (bm *broadcastManager) PrepareOperation(ctx context.Context, op *fftypes.Op } else if bp == nil { return nil, i18n.NewError(ctx, i18n.Msg404NotFound) } - batch, err := bm.data.HydrateBatch(ctx, bp) + batch, err := bm.data.HydrateBatch(ctx, bp, true) if err != nil { return nil, err } diff --git a/internal/broadcast/operations_test.go b/internal/broadcast/operations_test.go index d5821283a6..d38d55973f 100644 --- a/internal/broadcast/operations_test.go +++ b/internal/broadcast/operations_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/sharedstoragemocks" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" @@ -34,16 +35,23 @@ func TestPrepareAndRunBatchBroadcast(t *testing.T) { op := &fftypes.Operation{ Type: fftypes.OpTypeSharedStorageBatchBroadcast, } + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), + BatchHeader: bp.BatchHeader, } - addBatchBroadcastInputs(op, batch.ID) + addBatchBroadcastInputs(op, bp.ID) mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) mdi := bm.database.(*databasemocks.Plugin) - mdi.On("GetBatchByID", context.Background(), batch.ID).Return(batch, nil) + mdm := bm.data.(*datamocks.Manager) + mdm.On("HydrateBatch", context.Background(), bp, true).Return(batch, nil) + mdi.On("GetBatchByID", context.Background(), bp.ID).Return(bp, nil) mps.On("PublishData", context.Background(), mock.Anything).Return("123", nil) - mdi.On("UpdateBatch", context.Background(), batch.ID, mock.MatchedBy(func(update database.Update) bool { + mdi.On("UpdateBatch", context.Background(), bp.ID, mock.MatchedBy(func(update database.Update) bool { info, _ := update.Finalize() assert.Equal(t, 1, len(info.SetOperations)) assert.Equal(t, "payloadref", info.SetOperations[0].Field) @@ -54,7 +62,7 @@ func TestPrepareAndRunBatchBroadcast(t *testing.T) { po, err := bm.PrepareOperation(context.Background(), op) assert.NoError(t, err) - assert.Equal(t, batch, po.Data.(batchBroadcastData).Batch) + assert.Equal(t, bp.ID, po.Data.(batchBroadcastData).Batch.ID) complete, err := bm.RunOperation(context.Background(), opBatchBroadcast(op, batch)) @@ -63,6 +71,35 @@ func TestPrepareAndRunBatchBroadcast(t *testing.T) { mps.AssertExpectations(t) mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestPrepareAndRunBatchBroadcastHydrateFail(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageBatchBroadcast, + } + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } + addBatchBroadcastInputs(op, bp.ID) + + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mdi := bm.database.(*databasemocks.Plugin) + mdm := bm.data.(*datamocks.Manager) + mdm.On("HydrateBatch", context.Background(), bp, true).Return(nil, fmt.Errorf("pop")) + mdi.On("GetBatchByID", context.Background(), bp.ID).Return(bp, nil) + + _, err := bm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "pop", err) + + mps.AssertExpectations(t) + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestPrepareOperationNotSupported(t *testing.T) { @@ -156,7 +193,9 @@ func TestRunOperationBatchBroadcastPublishFail(t *testing.T) { op := &fftypes.Operation{} batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, } mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) @@ -176,7 +215,9 @@ func TestRunOperationBatchBroadcast(t *testing.T) { op := &fftypes.Operation{} batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, } mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index c9101b0cc2..62e31cca52 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -45,7 +45,7 @@ type Manager interface { UploadBLOB(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) CopyBlobPStoDX(ctx context.Context, data *fftypes.Data) (blob *fftypes.Blob, err error) DownloadBLOB(ctx context.Context, ns, dataID string) (*fftypes.Blob, io.ReadCloser, error) - HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) + HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted, requiresSharedDataPayloadRefs bool) (*fftypes.Batch, error) } type dataManager struct { @@ -349,7 +349,7 @@ func (dm *dataManager) resolveInlineData(ctx context.Context, ns string, inData } // HydrateBatch fetches the full messages for a persited batch, ready for transmission -func (dm *dataManager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) { +func (dm *dataManager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted, requiresSharedDataPayloadRefs bool) (*fftypes.Batch, error) { var manifest fftypes.BatchManifest err := json.Unmarshal([]byte(persistedBatch.Manifest), &manifest) @@ -381,7 +381,7 @@ func (dm *dataManager) HydrateBatch(ctx context.Context, persistedBatch *fftypes return nil, i18n.WrapError(ctx, err, i18n.MsgFailedToRetrieve, "data", dr.ID) } // BatchData removes any fields that could change after the batch was first assembled on the sender - batch.Payload.Data[i] = d.BatchData(batch.Type) + batch.Payload.Data[i] = d.BatchData(requiresSharedDataPayloadRefs) } return batch, nil diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index e34b68cd90..7eef71495d 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -687,7 +687,6 @@ func TestHydrateBatchOK(t *testing.T) { dataHash := fftypes.NewRandB32() bp := &fftypes.BatchPersisted{ BatchHeader: fftypes.BatchHeader{ - Type: fftypes.MessageTypeBroadcast, ID: batchID, Namespace: "ns1", }, @@ -711,7 +710,7 @@ func TestHydrateBatchOK(t *testing.T) { Created: fftypes.Now(), }, nil) - batch, err := dm.HydrateBatch(ctx, bp) + batch, err := dm.HydrateBatch(ctx, bp, true) assert.NoError(t, err) assert.Equal(t, bp.BatchHeader, batch.BatchHeader) assert.Equal(t, bp.TX, batch.Payload.TX) @@ -737,7 +736,6 @@ func TestHydrateBatchDataFail(t *testing.T) { dataHash := fftypes.NewRandB32() bp := &fftypes.BatchPersisted{ BatchHeader: fftypes.BatchHeader{ - Type: fftypes.MessageTypeBroadcast, ID: batchID, Namespace: "ns1", }, @@ -757,7 +755,7 @@ func TestHydrateBatchDataFail(t *testing.T) { }, nil) mdi.On("GetDataByID", ctx, dataID, true).Return(nil, fmt.Errorf("pop")) - _, err := dm.HydrateBatch(ctx, bp) + _, err := dm.HydrateBatch(ctx, bp, true) assert.Regexp(t, "FF10372.*pop", err) mdi.AssertExpectations(t) @@ -774,7 +772,6 @@ func TestHydrateBatchMsgNotFound(t *testing.T) { dataHash := fftypes.NewRandB32() bp := &fftypes.BatchPersisted{ BatchHeader: fftypes.BatchHeader{ - Type: fftypes.MessageTypeBroadcast, ID: batchID, Namespace: "ns1", }, @@ -789,7 +786,7 @@ func TestHydrateBatchMsgNotFound(t *testing.T) { mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetMessageByID", ctx, msgID).Return(nil, nil) - _, err := dm.HydrateBatch(ctx, bp) + _, err := dm.HydrateBatch(ctx, bp, true) assert.Regexp(t, "FF10372", err) mdi.AssertExpectations(t) @@ -803,6 +800,6 @@ func TestHydrateBatchMsgBadManifest(t *testing.T) { Manifest: `!json`, } - _, err := dm.HydrateBatch(ctx, bp) + _, err := dm.HydrateBatch(ctx, bp, true) assert.Regexp(t, "FF10151", err) } diff --git a/internal/i18n/en_translations.go b/internal/i18n/en_translations.go index 3d3b638484..b5294e3015 100644 --- a/internal/i18n/en_translations.go +++ b/internal/i18n/en_translations.go @@ -289,4 +289,5 @@ var ( MsgEventNotFound = ffm("FF10370", "Event with name '%s' not found", 400) MsgOperationNotSupported = ffm("FF10371", "Operation not supported", 400) MsgFailedToRetrieve = ffm("FF10372", "Failed to retrieve %s %s", 500) + MsgBlobMissingPublic = ffm("FF10373", "Blob for data %s missing public payload reference while flushing batch", 500) ) diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go index 954dbeff99..7cc0c56962 100644 --- a/mocks/datamocks/manager.go +++ b/mocks/datamocks/manager.go @@ -116,13 +116,13 @@ func (_m *Manager) GetMessageData(ctx context.Context, msg *fftypes.Message, wit return r0, r1, r2 } -// HydrateBatch provides a mock function with given fields: ctx, persistedBatch -func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) { - ret := _m.Called(ctx, persistedBatch) +// HydrateBatch provides a mock function with given fields: ctx, persistedBatch, requiresSharedDataPayloadRefs +func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted, requiresSharedDataPayloadRefs bool) (*fftypes.Batch, error) { + ret := _m.Called(ctx, persistedBatch, requiresSharedDataPayloadRefs) var r0 *fftypes.Batch - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.BatchPersisted) *fftypes.Batch); ok { - r0 = rf(ctx, persistedBatch) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.BatchPersisted, bool) *fftypes.Batch); ok { + r0 = rf(ctx, persistedBatch, requiresSharedDataPayloadRefs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*fftypes.Batch) @@ -130,8 +130,8 @@ func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.Bat } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *fftypes.BatchPersisted) error); ok { - r1 = rf(ctx, persistedBatch) + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.BatchPersisted, bool) error); ok { + r1 = rf(ctx, persistedBatch, requiresSharedDataPayloadRefs) } else { r1 = ret.Error(1) } diff --git a/pkg/fftypes/batch.go b/pkg/fftypes/batch.go index d788cd3723..01b1387255 100644 --- a/pkg/fftypes/batch.go +++ b/pkg/fftypes/batch.go @@ -72,9 +72,6 @@ type BatchPayload struct { } func (bm *BatchManifest) String() string { - if bm == nil { - return "" - } b, _ := json.Marshal(&bm) return string(b) } @@ -90,22 +87,25 @@ func (ma *BatchPayload) Hash() *Bytes32 { return &b32 } -func (b *Batch) Manifest() *BatchManifest { - if b == nil { - return nil - } +func (ma *BatchPayload) Manifest(id *UUID) *BatchManifest { tm := &BatchManifest{ - ID: b.ID, - Messages: make([]MessageRef, len(b.Payload.Messages)), - Data: make([]DataRef, len(b.Payload.Data)), + ID: id, + Messages: make([]MessageRef, len(ma.Messages)), + Data: make([]DataRef, len(ma.Data)), } - for i, m := range b.Payload.Messages { + for i, m := range ma.Messages { tm.Messages[i].ID = m.Header.ID tm.Messages[i].Hash = m.Hash } - for i, d := range b.Payload.Data { + for i, d := range ma.Data { tm.Data[i].ID = d.ID tm.Data[i].Hash = d.Hash } return tm } +func (b *Batch) Manifest() *BatchManifest { + if b == nil { + return nil + } + return b.Payload.Manifest(b.ID) +} diff --git a/pkg/fftypes/batch_test.go b/pkg/fftypes/batch_test.go index f55644775c..76f8d5cb4d 100644 --- a/pkg/fftypes/batch_test.go +++ b/pkg/fftypes/batch_test.go @@ -44,8 +44,6 @@ func TestSQLSerializedManifest(t *testing.T) { }, } - assert.Equal(t, "", ((*BatchManifest)(nil)).String()) - mfString := batch.Manifest().String() var mf *BatchManifest err := json.Unmarshal([]byte(mfString), &mf) diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index bcd0fe5d0d..a65a9df358 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -52,13 +52,13 @@ type Data struct { ValueSize int64 `json:"-"` // Used internally for message size calcuation, without full payload retrieval } -func (br *BlobRef) BatchBlobRef(broadcast bool) *BlobRef { +func (br *BlobRef) BatchBlobRef(requiresSharedDataPayloadRefs bool) *BlobRef { if br == nil { return nil } // For broadcast data the blob reference contains the "public" (shared storage) reference, which // must have been allocated to this data item before sealing the batch. - if broadcast { + if requiresSharedDataPayloadRefs { return br } // For private we omit the "public" ref in all cases, to avoid an potential for the batch pay to change due @@ -72,7 +72,7 @@ func (br *BlobRef) BatchBlobRef(broadcast bool) *BlobRef { // BatchData is the fields in a data record that are assured to be consistent on all parties. // This is what is transferred and hashed in a batch payload between nodes. -func (d *Data) BatchData(broadcast bool) *Data { +func (d *Data) BatchData(requiresSharedDataPayloadRefs bool) *Data { return &Data{ ID: d.ID, Validator: d.Validator, @@ -80,7 +80,7 @@ func (d *Data) BatchData(broadcast bool) *Data { Hash: d.Hash, Datatype: d.Datatype, Value: d.Value, - Blob: d.Blob.BatchBlobRef(broadcast), + Blob: d.Blob.BatchBlobRef(requiresSharedDataPayloadRefs), } } From 9f55e22027abdd88459bbfd045b3563c0d6af5d5 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 4 Mar 2022 15:40:24 -0500 Subject: [PATCH 06/11] Move over the private batch dispatcher Signed-off-by: Peter Broadhurst --- ...00070_rename_batch_payload_column.down.sql | 4 +- ...00070_rename_batch_payload_column.down.sql | 2 - internal/batch/batch_manager.go | 10 +- internal/batch/batch_processor.go | 3 +- internal/broadcast/manager.go | 10 +- internal/broadcast/operations.go | 2 +- internal/broadcast/operations_test.go | 4 +- internal/data/data_manager.go | 6 +- internal/data/data_manager_test.go | 11 +- internal/database/sqlcommon/batch_sql_test.go | 4 +- internal/orchestrator/data_query.go | 6 +- internal/orchestrator/orchestrator.go | 4 +- internal/orchestrator/txn_status_test.go | 8 +- internal/privatemessaging/message_test.go | 76 +++-------- internal/privatemessaging/operations.go | 8 +- internal/privatemessaging/operations_test.go | 58 +++++++- internal/privatemessaging/privatemessaging.go | 26 ++-- .../privatemessaging/privatemessaging_test.go | 125 +++++++++--------- mocks/datamocks/manager.go | 14 +- pkg/fftypes/batch.go | 17 ++- pkg/fftypes/data.go | 28 ++-- pkg/fftypes/data_test.go | 6 +- 22 files changed, 235 insertions(+), 197 deletions(-) diff --git a/db/migrations/postgres/000070_rename_batch_payload_column.down.sql b/db/migrations/postgres/000070_rename_batch_payload_column.down.sql index b186daad1d..8259011d2a 100644 --- a/db/migrations/postgres/000070_rename_batch_payload_column.down.sql +++ b/db/migrations/postgres/000070_rename_batch_payload_column.down.sql @@ -1 +1,3 @@ -ALTER TABLE batches RENAME COLUMN manifest TO payload; \ No newline at end of file +BEGIN; +ALTER TABLE batches RENAME COLUMN manifest TO payload; +COMMIT; \ No newline at end of file diff --git a/db/migrations/sqlite/000070_rename_batch_payload_column.down.sql b/db/migrations/sqlite/000070_rename_batch_payload_column.down.sql index 8259011d2a..5fc109c3c9 100644 --- a/db/migrations/sqlite/000070_rename_batch_payload_column.down.sql +++ b/db/migrations/sqlite/000070_rename_batch_payload_column.down.sql @@ -1,3 +1 @@ -BEGIN; ALTER TABLE batches RENAME COLUMN manifest TO payload; -COMMIT; \ No newline at end of file diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index d3ab1dbd35..145a13fbe7 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -99,11 +99,11 @@ type batchManager struct { type DispatchHandler func(context.Context, *DispatchState) error type DispatcherOptions struct { - RequiresSharedDataPayloadRefs bool - BatchMaxSize uint - BatchMaxBytes int64 - BatchTimeout time.Duration - DisposeTimeout time.Duration + BatchType fftypes.BatchType + BatchMaxSize uint + BatchMaxBytes int64 + BatchTimeout time.Duration + DisposeTimeout time.Duration } type dispatcher struct { diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 0525e5978b..50307c1f7b 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -390,6 +390,7 @@ func (bp *batchProcessor) initFlushState(id *fftypes.UUID, flushWork []*batchWor Persisted: fftypes.BatchPersisted{ BatchHeader: fftypes.BatchHeader{ ID: id, + Type: bp.conf.DispatcherOptions.BatchType, Namespace: bp.conf.namespace, SignerRef: bp.conf.identity, Group: bp.conf.group, @@ -403,7 +404,7 @@ func (bp *batchProcessor) initFlushState(id *fftypes.UUID, flushWork []*batchWor state.Payload.Messages = append(state.Payload.Messages, w.msg.BatchMessage()) } for _, d := range w.data { - state.Payload.Data = append(state.Payload.Data, d.BatchData(bp.conf.RequiresSharedDataPayloadRefs)) + state.Payload.Data = append(state.Payload.Data, d.BatchData(state.Persisted.Type)) } } state.Manifest = state.Payload.Manifest(id) diff --git a/internal/broadcast/manager.go b/internal/broadcast/manager.go index 90e453afec..b5519ff910 100644 --- a/internal/broadcast/manager.go +++ b/internal/broadcast/manager.go @@ -95,11 +95,11 @@ func NewBroadcastManager(ctx context.Context, di database.Plugin, im identity.Ma } bo := batch.DispatcherOptions{ - RequiresSharedDataPayloadRefs: true, - BatchMaxSize: config.GetUint(config.BroadcastBatchSize), - BatchMaxBytes: bm.maxBatchPayloadLength, - BatchTimeout: config.GetDuration(config.BroadcastBatchTimeout), - DisposeTimeout: config.GetDuration(config.BroadcastBatchAgentTimeout), + BatchType: fftypes.BatchTypeBroadcast, + BatchMaxSize: config.GetUint(config.BroadcastBatchSize), + BatchMaxBytes: bm.maxBatchPayloadLength, + BatchTimeout: config.GetDuration(config.BroadcastBatchTimeout), + DisposeTimeout: config.GetDuration(config.BroadcastBatchAgentTimeout), } ba.RegisterDispatcher(broadcastDispatcherName, diff --git a/internal/broadcast/operations.go b/internal/broadcast/operations.go index 2c395ff855..74c7fe2b90 100644 --- a/internal/broadcast/operations.go +++ b/internal/broadcast/operations.go @@ -53,7 +53,7 @@ func (bm *broadcastManager) PrepareOperation(ctx context.Context, op *fftypes.Op } else if bp == nil { return nil, i18n.NewError(ctx, i18n.Msg404NotFound) } - batch, err := bm.data.HydrateBatch(ctx, bp, true) + batch, err := bm.data.HydrateBatch(ctx, bp) if err != nil { return nil, err } diff --git a/internal/broadcast/operations_test.go b/internal/broadcast/operations_test.go index d38d55973f..06d6a7feaa 100644 --- a/internal/broadcast/operations_test.go +++ b/internal/broadcast/operations_test.go @@ -48,7 +48,7 @@ func TestPrepareAndRunBatchBroadcast(t *testing.T) { mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) - mdm.On("HydrateBatch", context.Background(), bp, true).Return(batch, nil) + mdm.On("HydrateBatch", context.Background(), bp).Return(batch, nil) mdi.On("GetBatchByID", context.Background(), bp.ID).Return(bp, nil) mps.On("PublishData", context.Background(), mock.Anything).Return("123", nil) mdi.On("UpdateBatch", context.Background(), bp.ID, mock.MatchedBy(func(update database.Update) bool { @@ -91,7 +91,7 @@ func TestPrepareAndRunBatchBroadcastHydrateFail(t *testing.T) { mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) - mdm.On("HydrateBatch", context.Background(), bp, true).Return(nil, fmt.Errorf("pop")) + mdm.On("HydrateBatch", context.Background(), bp).Return(nil, fmt.Errorf("pop")) mdi.On("GetBatchByID", context.Background(), bp.ID).Return(bp, nil) _, err := bm.PrepareOperation(context.Background(), op) diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index 62e31cca52..f88b86ff21 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -45,7 +45,7 @@ type Manager interface { UploadBLOB(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) CopyBlobPStoDX(ctx context.Context, data *fftypes.Data) (blob *fftypes.Blob, err error) DownloadBLOB(ctx context.Context, ns, dataID string) (*fftypes.Blob, io.ReadCloser, error) - HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted, requiresSharedDataPayloadRefs bool) (*fftypes.Batch, error) + HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) } type dataManager struct { @@ -349,7 +349,7 @@ func (dm *dataManager) resolveInlineData(ctx context.Context, ns string, inData } // HydrateBatch fetches the full messages for a persited batch, ready for transmission -func (dm *dataManager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted, requiresSharedDataPayloadRefs bool) (*fftypes.Batch, error) { +func (dm *dataManager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) { var manifest fftypes.BatchManifest err := json.Unmarshal([]byte(persistedBatch.Manifest), &manifest) @@ -381,7 +381,7 @@ func (dm *dataManager) HydrateBatch(ctx context.Context, persistedBatch *fftypes return nil, i18n.WrapError(ctx, err, i18n.MsgFailedToRetrieve, "data", dr.ID) } // BatchData removes any fields that could change after the batch was first assembled on the sender - batch.Payload.Data[i] = d.BatchData(requiresSharedDataPayloadRefs) + batch.Payload.Data[i] = d.BatchData(persistedBatch.Type) } return batch, nil diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index 7eef71495d..1168c5fb3c 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -687,6 +687,7 @@ func TestHydrateBatchOK(t *testing.T) { dataHash := fftypes.NewRandB32() bp := &fftypes.BatchPersisted{ BatchHeader: fftypes.BatchHeader{ + Type: fftypes.BatchTypeBroadcast, ID: batchID, Namespace: "ns1", }, @@ -710,7 +711,7 @@ func TestHydrateBatchOK(t *testing.T) { Created: fftypes.Now(), }, nil) - batch, err := dm.HydrateBatch(ctx, bp, true) + batch, err := dm.HydrateBatch(ctx, bp) assert.NoError(t, err) assert.Equal(t, bp.BatchHeader, batch.BatchHeader) assert.Equal(t, bp.TX, batch.Payload.TX) @@ -736,6 +737,7 @@ func TestHydrateBatchDataFail(t *testing.T) { dataHash := fftypes.NewRandB32() bp := &fftypes.BatchPersisted{ BatchHeader: fftypes.BatchHeader{ + Type: fftypes.BatchTypeBroadcast, ID: batchID, Namespace: "ns1", }, @@ -755,7 +757,7 @@ func TestHydrateBatchDataFail(t *testing.T) { }, nil) mdi.On("GetDataByID", ctx, dataID, true).Return(nil, fmt.Errorf("pop")) - _, err := dm.HydrateBatch(ctx, bp, true) + _, err := dm.HydrateBatch(ctx, bp) assert.Regexp(t, "FF10372.*pop", err) mdi.AssertExpectations(t) @@ -772,6 +774,7 @@ func TestHydrateBatchMsgNotFound(t *testing.T) { dataHash := fftypes.NewRandB32() bp := &fftypes.BatchPersisted{ BatchHeader: fftypes.BatchHeader{ + Type: fftypes.BatchTypeBroadcast, ID: batchID, Namespace: "ns1", }, @@ -786,7 +789,7 @@ func TestHydrateBatchMsgNotFound(t *testing.T) { mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetMessageByID", ctx, msgID).Return(nil, nil) - _, err := dm.HydrateBatch(ctx, bp, true) + _, err := dm.HydrateBatch(ctx, bp) assert.Regexp(t, "FF10372", err) mdi.AssertExpectations(t) @@ -800,6 +803,6 @@ func TestHydrateBatchMsgBadManifest(t *testing.T) { Manifest: `!json`, } - _, err := dm.HydrateBatch(ctx, bp, true) + _, err := dm.HydrateBatch(ctx, bp) assert.Regexp(t, "FF10151", err) } diff --git a/internal/database/sqlcommon/batch_sql_test.go b/internal/database/sqlcommon/batch_sql_test.go index f854c80b1b..80d652f0f2 100644 --- a/internal/database/sqlcommon/batch_sql_test.go +++ b/internal/database/sqlcommon/batch_sql_test.go @@ -41,7 +41,7 @@ func TestBatch2EWithDB(t *testing.T) { batch := &fftypes.BatchPersisted{ BatchHeader: fftypes.BatchHeader{ ID: batchID, - Type: fftypes.MessageTypeBroadcast, + Type: fftypes.BatchTypeBroadcast, SignerRef: fftypes.SignerRef{ Key: "0x12345", Author: "did:firefly:org/abcd", @@ -83,7 +83,7 @@ func TestBatch2EWithDB(t *testing.T) { batchUpdated := &fftypes.BatchPersisted{ BatchHeader: fftypes.BatchHeader{ ID: batchID, - Type: fftypes.MessageTypeBroadcast, + Type: fftypes.BatchTypePrivate, SignerRef: fftypes.SignerRef{ Key: "0x12345", Author: "did:firefly:org/abcd", diff --git a/internal/orchestrator/data_query.go b/internal/orchestrator/data_query.go index 2251262d69..eac1d89a03 100644 --- a/internal/orchestrator/data_query.go +++ b/internal/orchestrator/data_query.go @@ -100,7 +100,7 @@ func (or *orchestrator) GetMessageByIDWithData(ctx context.Context, ns, id strin return or.fetchMessageData(ctx, msg) } -func (or *orchestrator) GetBatchByID(ctx context.Context, ns, id string) (*fftypes.Batch, error) { +func (or *orchestrator) GetBatchByID(ctx context.Context, ns, id string) (*fftypes.BatchPersisted, error) { u, err := or.verifyIDAndNamespace(ctx, ns, id) if err != nil { return nil, err @@ -209,7 +209,7 @@ func (or *orchestrator) getMessageTransactionID(ctx context.Context, ns, id stri if batch == nil { return nil, i18n.NewError(ctx, i18n.MsgBatchNotFound, msg.BatchID) } - txID = batch.Payload.TX.ID + txID = batch.TX.ID if txID == nil { return nil, i18n.NewError(ctx, i18n.MsgBatchTXNotSet, msg.BatchID) } @@ -253,7 +253,7 @@ func (or *orchestrator) GetMessageEvents(ctx context.Context, ns, id string, fil return or.database.GetEvents(ctx, filter) } -func (or *orchestrator) GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Batch, *database.FilterResult, error) { +func (or *orchestrator) GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.BatchPersisted, *database.FilterResult, error) { filter = or.scopeNS(ns, filter) return or.database.GetBatches(ctx, filter) } diff --git a/internal/orchestrator/orchestrator.go b/internal/orchestrator/orchestrator.go index 1492f40cde..6e70a70f5f 100644 --- a/internal/orchestrator/orchestrator.go +++ b/internal/orchestrator/orchestrator.go @@ -107,8 +107,8 @@ type Orchestrator interface { GetMessageEvents(ctx context.Context, ns, id string, filter database.AndFilter) ([]*fftypes.Event, *database.FilterResult, error) GetMessageData(ctx context.Context, ns, id string) ([]*fftypes.Data, error) GetMessagesForData(ctx context.Context, ns, dataID string, filter database.AndFilter) ([]*fftypes.Message, *database.FilterResult, error) - GetBatchByID(ctx context.Context, ns, id string) (*fftypes.Batch, error) - GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Batch, *database.FilterResult, error) + GetBatchByID(ctx context.Context, ns, id string) (*fftypes.BatchPersisted, error) + GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.BatchPersisted, *database.FilterResult, error) GetDataByID(ctx context.Context, ns, id string) (*fftypes.Data, error) GetData(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Data, *database.FilterResult, error) GetDatatypeByID(ctx context.Context, ns, id string) (*fftypes.Datatype, error) diff --git a/internal/orchestrator/txn_status_test.go b/internal/orchestrator/txn_status_test.go index 56a36e1520..b6972ed2b3 100644 --- a/internal/orchestrator/txn_status_test.go +++ b/internal/orchestrator/txn_status_test.go @@ -60,10 +60,12 @@ func TestGetTransactionStatusBatchPinSuccess(t *testing.T) { Info: fftypes.JSONObject{"transactionHash": "0x100"}, }, } - batches := []*fftypes.Batch{ + batches := []*fftypes.BatchPersisted{ { - ID: fftypes.NewUUID(), - Type: fftypes.MessageTypeBroadcast, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.BatchTypeBroadcast, + }, Confirmed: fftypes.UnixTime(2), }, } diff --git a/internal/privatemessaging/message_test.go b/internal/privatemessaging/message_test.go index 0407059a45..a0e55abf17 100644 --- a/internal/privatemessaging/message_test.go +++ b/internal/privatemessaging/message_test.go @@ -21,6 +21,7 @@ import ( "fmt" "testing" + "github.com/hyperledger/firefly/internal/batch" "github.com/hyperledger/firefly/internal/syncasync" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/dataexchangemocks" @@ -442,10 +443,12 @@ func TestSendUnpinnedMessageGroupLookupFail(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, groupID).Return(nil, fmt.Errorf("pop")).Once() - err := pm.dispatchUnpinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - ID: fftypes.NewUUID(), - Group: groupID, + err := pm.dispatchUnpinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ @@ -460,7 +463,7 @@ func TestSendUnpinnedMessageGroupLookupFail(t *testing.T) { }, }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -688,51 +691,6 @@ func TestRequestReplySuccess(t *testing.T) { assert.NoError(t, err) } -func TestDispatchedUnpinnedMessageMarshalFail(t *testing.T) { - - pm, cancel := newTestPrivateMessaging(t) - defer cancel() - - mim := pm.identity.(*identitymanagermocks.Manager) - mim.On("ResolveInputSigningIdentity", pm.ctx, "ns1", mock.MatchedBy(func(identity *fftypes.SignerRef) bool { - assert.Equal(t, "localorg", identity.Author) - return true - })).Return(nil) - - groupID := fftypes.NewRandB32() - node1 := newTestNode("node1", newTestOrg("localorg")) - node2 := newTestNode("node2", newTestOrg("remoteorg")) - - mdi := pm.database.(*databasemocks.Plugin) - mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{ - Hash: groupID, - GroupIdentity: fftypes.GroupIdentity{ - Members: fftypes.Members{ - {Node: node1.ID, Identity: "localorg"}, - {Node: node1.ID, Identity: "remoteorg"}, - }, - }, - }, nil).Once() - mdi.On("GetIdentityByID", pm.ctx, node1.ID).Return(node1, nil).Once() - mdi.On("GetIdentityByID", pm.ctx, node1.ID).Return(node2, nil).Once() - - err := pm.dispatchUnpinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - ID: fftypes.NewUUID(), - Group: groupID, - }, - Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ - {Value: fftypes.JSONAnyPtr("!Bad JSON")}, - }, - }, - }, []*fftypes.Bytes32{}) - assert.Regexp(t, "FF10137", err) - - mdi.AssertExpectations(t) - -} - func TestDispatchedUnpinnedMessageOK(t *testing.T) { pm, cancel := newTestPrivateMessaging(t) @@ -773,10 +731,12 @@ func TestDispatchedUnpinnedMessageOK(t *testing.T) { return op.Type == fftypes.OpTypeDataExchangeBatchSend && *data.Node.ID == *node2.ID })).Return(nil) - err := pm.dispatchUnpinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - ID: fftypes.NewUUID(), - Group: groupID, + err := pm.dispatchUnpinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -795,7 +755,7 @@ func TestDispatchedUnpinnedMessageOK(t *testing.T) { }, }, }, - }, []*fftypes.Bytes32{}) + }) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -848,7 +808,7 @@ func TestSendDataTransferBlobsFail(t *testing.T) { }, }, }, - }, nodes) + }, nodes, "manifest-data") assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -895,7 +855,7 @@ func TestSendDataTransferFail(t *testing.T) { }, }, }, - }, nodes) + }, nodes, "manifest-data") assert.Regexp(t, "pop", err) mim.AssertExpectations(t) @@ -943,7 +903,7 @@ func TestSendDataTransferInsertOperationFail(t *testing.T) { }, }, }, - }, nodes) + }, nodes, "manifest-data") assert.Regexp(t, "pop", err) } diff --git a/internal/privatemessaging/operations.go b/internal/privatemessaging/operations.go index 38fa052698..53ea7ef4a5 100644 --- a/internal/privatemessaging/operations.go +++ b/internal/privatemessaging/operations.go @@ -110,12 +110,16 @@ func (pm *privateMessaging) PrepareOperation(ctx context.Context, op *fftypes.Op } else if group == nil { return nil, i18n.NewError(ctx, i18n.Msg404NotFound) } - batch, err := pm.database.GetBatchByID(ctx, batchID) + bp, err := pm.database.GetBatchByID(ctx, batchID) if err != nil { return nil, err - } else if batch == nil { + } else if bp == nil { return nil, i18n.NewError(ctx, i18n.Msg404NotFound) } + batch, err := pm.data.HydrateBatch(ctx, bp) + if err != nil { + return nil, err + } transport := &fftypes.TransportWrapper{Group: group, Batch: batch} return opBatchSend(op, node, transport), nil diff --git a/internal/privatemessaging/operations_test.go b/internal/privatemessaging/operations_test.go index 82423e5496..78b3f7fc1f 100644 --- a/internal/privatemessaging/operations_test.go +++ b/internal/privatemessaging/operations_test.go @@ -21,6 +21,7 @@ import ( "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/dataexchangemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -91,16 +92,23 @@ func TestPrepareAndRunBatchSend(t *testing.T) { group := &fftypes.Group{ Hash: fftypes.NewRandB32(), } + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), + BatchHeader: bp.BatchHeader, } addBatchSendInputs(op, node.ID, group.Hash, batch.ID, "manifest-info") mdi := pm.database.(*databasemocks.Plugin) mdx := pm.exchange.(*dataexchangemocks.Plugin) + mdm := pm.data.(*datamocks.Manager) + mdm.On("HydrateBatch", context.Background(), bp).Return(batch, nil) mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) mdi.On("GetGroupByHash", context.Background(), group.Hash).Return(group, nil) - mdi.On("GetBatchByID", context.Background(), batch.ID).Return(batch, nil) + mdi.On("GetBatchByID", context.Background(), batch.ID).Return(bp, nil) mdx.On("SendMessage", context.Background(), op.ID, "peer1", mock.Anything).Return(nil) po, err := pm.PrepareOperation(context.Background(), op) @@ -116,6 +124,52 @@ func TestPrepareAndRunBatchSend(t *testing.T) { mdi.AssertExpectations(t) mdx.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestPrepareAndRunBatchSendHydrateFail(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeBatchSend, + ID: fftypes.NewUUID(), + } + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + IdentityProfile: fftypes.IdentityProfile{ + Profile: fftypes.JSONObject{ + "id": "peer1", + }, + }, + } + group := &fftypes.Group{ + Hash: fftypes.NewRandB32(), + } + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } + batch := &fftypes.Batch{ + BatchHeader: bp.BatchHeader, + } + addBatchSendInputs(op, node.ID, group.Hash, batch.ID, "manifest-info") + + mdi := pm.database.(*databasemocks.Plugin) + mdm := pm.data.(*datamocks.Manager) + mdm.On("HydrateBatch", context.Background(), bp).Return(nil, fmt.Errorf("pop")) + mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) + mdi.On("GetGroupByHash", context.Background(), group.Hash).Return(group, nil) + mdi.On("GetBatchByID", context.Background(), batch.ID).Return(bp, nil) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestPrepareOperationNotSupported(t *testing.T) { diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index b1fe02e218..0cb7e54d0b 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -114,6 +114,7 @@ func NewPrivateMessaging(ctx context.Context, di database.Plugin, im identity.Ma ) bo := batch.DispatcherOptions{ + BatchType: fftypes.BatchTypePrivate, BatchMaxSize: config.GetUint(config.PrivateMessagingBatchSize), BatchMaxBytes: pm.maxBatchPayloadLength, BatchTimeout: config.GetDuration(config.PrivateMessagingBatchTimeout), @@ -152,21 +153,25 @@ func (pm *privateMessaging) Start() error { return pm.exchange.Start() } -func (pm *privateMessaging) dispatchPinnedBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error { - err := pm.dispatchBatchCommon(ctx, batch) +func (pm *privateMessaging) dispatchPinnedBatch(ctx context.Context, state *batch.DispatchState) error { + err := pm.dispatchBatchCommon(ctx, state) if err != nil { return err } - log.L(ctx).Infof("Pinning private batch %s with author=%s key=%s group=%s", batch.ID, batch.Author, batch.Key, batch.Group) - return pm.batchpin.SubmitPinnedBatch(ctx, batch, contexts) + log.L(ctx).Infof("Pinning private batch %s with author=%s key=%s group=%s", state.Persisted.ID, state.Persisted.Author, state.Persisted.Key, state.Persisted.Group) + return pm.batchpin.SubmitPinnedBatch(ctx, &state.Persisted, state.Pins) } -func (pm *privateMessaging) dispatchUnpinnedBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error { - return pm.dispatchBatchCommon(ctx, batch) +func (pm *privateMessaging) dispatchUnpinnedBatch(ctx context.Context, state *batch.DispatchState) error { + return pm.dispatchBatchCommon(ctx, state) } -func (pm *privateMessaging) dispatchBatchCommon(ctx context.Context, batch *fftypes.Batch) error { +func (pm *privateMessaging) dispatchBatchCommon(ctx context.Context, state *batch.DispatchState) error { + batch := &fftypes.Batch{ + BatchHeader: state.Persisted.BatchHeader, + Payload: state.Payload, + } tw := &fftypes.TransportWrapper{ Batch: batch, } @@ -183,7 +188,7 @@ func (pm *privateMessaging) dispatchBatchCommon(ctx context.Context, batch *ffty tw.Group = group } - return pm.sendData(ctx, tw, nodes) + return pm.sendData(ctx, tw, nodes, state.Persisted.Manifest) } func (pm *privateMessaging) transferBlobs(ctx context.Context, data []*fftypes.Data, txid *fftypes.UUID, node *fftypes.Identity) error { @@ -215,7 +220,7 @@ func (pm *privateMessaging) transferBlobs(ctx context.Context, data []*fftypes.D return nil } -func (pm *privateMessaging) sendData(ctx context.Context, tw *fftypes.TransportWrapper, nodes []*fftypes.Identity) (err error) { +func (pm *privateMessaging) sendData(ctx context.Context, tw *fftypes.TransportWrapper, nodes []*fftypes.Identity, manifest string) (err error) { l := log.L(ctx) batch := tw.Batch @@ -251,7 +256,8 @@ func (pm *privateMessaging) sendData(ctx context.Context, tw *fftypes.TransportW var groupHash *fftypes.Bytes32 if tw.Group != nil { groupHash = tw.Group.Hash - addBatchSendInputs(op, node.ID, groupHash, batch.ID, tw.Batch.Manifest().String()) + } + addBatchSendInputs(op, node.ID, groupHash, batch.ID, manifest) if err = pm.operations.AddOrReuseOperation(ctx, op); err != nil { return err } diff --git a/internal/privatemessaging/privatemessaging_test.go b/internal/privatemessaging/privatemessaging_test.go index 5b6388e14d..b609f1aa3c 100644 --- a/internal/privatemessaging/privatemessaging_test.go +++ b/internal/privatemessaging/privatemessaging_test.go @@ -21,6 +21,7 @@ import ( "fmt" "testing" + "github.com/hyperledger/firefly/internal/batch" "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/mocks/batchmocks" "github.com/hyperledger/firefly/mocks/batchpinmocks" @@ -176,15 +177,17 @@ func TestDispatchBatchWithBlobs(t *testing.T) { mbp.On("SubmitPinnedBatch", pm.ctx, mock.Anything, mock.Anything).Return(nil) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - ID: batchID, - SignerRef: fftypes.SignerRef{ - Author: "org1", + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, + Group: groupID, + Namespace: "ns1", + Hash: batchHash, }, - Group: groupID, - Namespace: "ns1", - Hash: batchHash, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -194,7 +197,8 @@ func TestDispatchBatchWithBlobs(t *testing.T) { {ID: dataID1, Blob: &fftypes.BlobRef{Hash: blob1}}, }, }, - }, []*fftypes.Bytes32{pin1, pin2}) + Pins: []*fftypes.Bytes32{pin1, pin2}, + }) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -209,27 +213,6 @@ func TestNewPrivateMessagingMissingDeps(t *testing.T) { assert.Regexp(t, "FF10128", err) } -func TestDispatchBatchBadData(t *testing.T) { - pm, cancel := newTestPrivateMessaging(t) - defer cancel() - - groupID := fftypes.NewRandB32() - mdi := pm.database.(*databasemocks.Plugin) - mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{}, nil) - - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - Group: groupID, - }, - Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ - {Value: fftypes.JSONAnyPtr(`{!json}`)}, - }, - }, - }, []*fftypes.Bytes32{}) - assert.Regexp(t, "FF10137", err) -} - func TestDispatchErrorFindingGroup(t *testing.T) { pm, cancel := newTestPrivateMessaging(t) defer cancel() @@ -237,7 +220,7 @@ func TestDispatchErrorFindingGroup(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{}, []*fftypes.Bytes32{}) + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{}) assert.Regexp(t, "pop", err) } @@ -251,13 +234,15 @@ func TestSendAndSubmitBatchBadID(t *testing.T) { mbp := pm.batchpin.(*batchpinmocks.Submitter) mbp.On("SubmitPinnedBatch", pm.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - SignerRef: fftypes.SignerRef{ - Author: "badauthor", + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + SignerRef: fftypes.SignerRef{ + Author: "badauthor", + }, }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -288,14 +273,16 @@ func TestSendAndSubmitBatchUnregisteredNode(t *testing.T) { mim := pm.identity.(*identitymanagermocks.Manager) mim.On("GetNodeOwnerOrg", pm.ctx).Return(nil, fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "badauthor", + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "badauthor", + }, }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -309,13 +296,15 @@ func TestSendImmediateFail(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - SignerRef: fftypes.SignerRef{ - Author: "org1", + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -349,11 +338,13 @@ func TestSendSubmitInsertOperationFail(t *testing.T) { mom := pm.operations.(*operationmocks.Manager) mom.On("AddOrReuseOperation", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "org1", + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, }, Payload: fftypes.BatchPayload{ @@ -361,7 +352,7 @@ func TestSendSubmitInsertOperationFail(t *testing.T) { ID: fftypes.NewUUID(), }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -407,11 +398,13 @@ func TestSendSubmitBlobTransferFail(t *testing.T) { return op.Type == fftypes.OpTypeDataExchangeBlobSend && *data.Node.ID == *node2.ID })).Return(fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "org1", + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, }, Payload: fftypes.BatchPayload{ @@ -419,7 +412,7 @@ func TestSendSubmitBlobTransferFail(t *testing.T) { {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{Hash: blob1}}, }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -479,11 +472,13 @@ func TestWriteTransactionSubmitBatchPinFail(t *testing.T) { mbp := pm.batchpin.(*batchpinmocks.Submitter) mbp.On("SubmitPinnedBatch", pm.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - BatchHeader: fftypes.BatchHeader{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "org1", + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, }, Payload: fftypes.BatchPayload{ @@ -491,7 +486,7 @@ func TestWriteTransactionSubmitBatchPinFail(t *testing.T) { {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{Hash: blob1}}, }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go index 7cc0c56962..954dbeff99 100644 --- a/mocks/datamocks/manager.go +++ b/mocks/datamocks/manager.go @@ -116,13 +116,13 @@ func (_m *Manager) GetMessageData(ctx context.Context, msg *fftypes.Message, wit return r0, r1, r2 } -// HydrateBatch provides a mock function with given fields: ctx, persistedBatch, requiresSharedDataPayloadRefs -func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted, requiresSharedDataPayloadRefs bool) (*fftypes.Batch, error) { - ret := _m.Called(ctx, persistedBatch, requiresSharedDataPayloadRefs) +// HydrateBatch provides a mock function with given fields: ctx, persistedBatch +func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) { + ret := _m.Called(ctx, persistedBatch) var r0 *fftypes.Batch - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.BatchPersisted, bool) *fftypes.Batch); ok { - r0 = rf(ctx, persistedBatch, requiresSharedDataPayloadRefs) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.BatchPersisted) *fftypes.Batch); ok { + r0 = rf(ctx, persistedBatch) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*fftypes.Batch) @@ -130,8 +130,8 @@ func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.Bat } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *fftypes.BatchPersisted, bool) error); ok { - r1 = rf(ctx, persistedBatch, requiresSharedDataPayloadRefs) + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.BatchPersisted) error); ok { + r1 = rf(ctx, persistedBatch) } else { r1 = ret.Error(1) } diff --git a/pkg/fftypes/batch.go b/pkg/fftypes/batch.go index 01b1387255..442b798b34 100644 --- a/pkg/fftypes/batch.go +++ b/pkg/fftypes/batch.go @@ -21,11 +21,22 @@ import ( "encoding/json" ) +// BatchType is the type of a batch +type BatchType = FFEnum + +var ( + // BatchTypeBroadcast is a batch that is broadcast via the shared data interface + BatchTypeBroadcast BatchType = ffEnum("batchtype", "broadcast") + // BatchTypePrivate is a batch that is sent privately to a group + BatchTypePrivate BatchType = ffEnum("batchtype", "private") +) + // BatchHeader is the common fields between the serialized batch, and the batch manifest type BatchHeader struct { - ID *UUID `json:"id"` - Namespace string `json:"namespace"` - Node *UUID `json:"node,omitempty"` + ID *UUID `json:"id"` + Type BatchType `json:"type" ffenum:"batchtype"` + Namespace string `json:"namespace"` + Node *UUID `json:"node,omitempty"` SignerRef Group *Bytes32 `jdon:"group,omitempty"` Hash *Bytes32 `json:"hash"` diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index a65a9df358..8c78ac1f6c 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -52,27 +52,29 @@ type Data struct { ValueSize int64 `json:"-"` // Used internally for message size calcuation, without full payload retrieval } -func (br *BlobRef) BatchBlobRef(requiresSharedDataPayloadRefs bool) *BlobRef { +func (br *BlobRef) BatchBlobRef(batchType BatchType) *BlobRef { if br == nil { return nil } - // For broadcast data the blob reference contains the "public" (shared storage) reference, which - // must have been allocated to this data item before sealing the batch. - if requiresSharedDataPayloadRefs { + switch batchType { + case BatchTypePrivate: + // For private we omit the "public" ref in all cases, to avoid an potential for the batch pay to change due + // to the same data being allocated by the same data being sent in a broadcast batch (thus assigining a public ref). + return &BlobRef{ + Hash: br.Hash, + Size: br.Size, + Name: br.Name, + } + default: + // For broadcast data the blob reference contains the "public" (shared storage) reference, which + // must have been allocated to this data item before sealing the batch. return br } - // For private we omit the "public" ref in all cases, to avoid an potential for the batch pay to change due - // to the same data being allocated by the same data being sent in a broadcast batch (thus assigining a public ref). - return &BlobRef{ - Hash: br.Hash, - Size: br.Size, - Name: br.Name, - } } // BatchData is the fields in a data record that are assured to be consistent on all parties. // This is what is transferred and hashed in a batch payload between nodes. -func (d *Data) BatchData(requiresSharedDataPayloadRefs bool) *Data { +func (d *Data) BatchData(batchType BatchType) *Data { return &Data{ ID: d.ID, Validator: d.Validator, @@ -80,7 +82,7 @@ func (d *Data) BatchData(requiresSharedDataPayloadRefs bool) *Data { Hash: d.Hash, Datatype: d.Datatype, Value: d.Value, - Blob: d.Blob.BatchBlobRef(requiresSharedDataPayloadRefs), + Blob: d.Blob.BatchBlobRef(batchType), } } diff --git a/pkg/fftypes/data_test.go b/pkg/fftypes/data_test.go index a6eb65ee59..da20613243 100644 --- a/pkg/fftypes/data_test.go +++ b/pkg/fftypes/data_test.go @@ -215,7 +215,7 @@ func TestDataImmutable(t *testing.T) { Hash: NewRandB32(), Created: Now(), } - assert.True(t, data.Hash.Equals(data.BatchData(true).Hash)) + assert.True(t, data.Hash.Equals(data.BatchData(BatchTypeBroadcast).Hash)) data.Blob = &BlobRef{ Hash: NewRandB32(), @@ -223,6 +223,6 @@ func TestDataImmutable(t *testing.T) { Name: "name.txt", Public: "sharedStorageRef", } - assert.Equal(t, data.Blob, data.BatchData(true).Blob) - assert.Empty(t, data.BatchData(false).Blob.Public) + assert.Equal(t, data.Blob, data.BatchData(BatchTypeBroadcast).Blob) + assert.Empty(t, data.BatchData(BatchTypePrivate).Blob.Public) } From fa48b6b78b42e3ec2745df83d9ed1fdfd044b124 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Sun, 6 Mar 2022 21:59:35 -0500 Subject: [PATCH 07/11] Implement message cache Signed-off-by: Peter Broadhurst --- internal/batch/batch_processor.go | 2 +- internal/config/config.go | 6 ++ internal/data/data_manager.go | 100 ++++++++++++++++- internal/data/data_manager_test.go | 148 +++++++++++++++++++++++--- internal/events/dx_callbacks.go | 14 +-- internal/events/persist_batch.go | 54 ++++++---- internal/events/persist_batch_test.go | 10 +- pkg/fftypes/batch.go | 5 - pkg/fftypes/batch_test.go | 2 +- pkg/fftypes/bytetypes.go | 7 ++ 10 files changed, 292 insertions(+), 56 deletions(-) diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 50307c1f7b..2bc349ebd5 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -509,7 +509,7 @@ func (bp *batchProcessor) sealBatch(state *DispatchState) (err error) { // The hash of the batch, is the hash of the manifest to minimize the compute cost. // Note in v0.13 and before, it was the hash of the payload - so the inbound route has a fallback to accepting the full payload hash state.Persisted.Manifest = state.Manifest.String() - state.Persisted.Hash = state.Manifest.Hash() + state.Persisted.Hash = fftypes.HashString(state.Persisted.Manifest) log.L(ctx).Debugf("Batch %s sealed. Hash=%s", state.Persisted.ID, state.Persisted.Hash) diff --git a/internal/config/config.go b/internal/config/config.go index 0bc94c44ca..3ca8ebe99e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -184,6 +184,10 @@ var ( LogMaxAge = rootKey("log.maxAge") // LogCompress sets whether to compress backups LogCompress = rootKey("log.compress") + // MessageCacheSize + MessageCacheSize = rootKey("message.cache.size") + // MessageCacheTTL + MessageCacheTTL = rootKey("message.cache.ttl") // MetricsEnabled determines whether metrics will be instrumented and if the metrics server will be enabled or not MetricsEnabled = rootKey("metrics.enabled") // MetricsPath determines what path to serve the Prometheus metrics from @@ -353,6 +357,8 @@ func Reset() { viper.SetDefault(string(UIEnabled), true) viper.SetDefault(string(ValidatorCacheSize), "1Mb") viper.SetDefault(string(ValidatorCacheTTL), "1h") + viper.SetDefault(string(MessageCacheSize), "10Mb") + viper.SetDefault(string(MessageCacheTTL), "5m") viper.SetDefault(string(IdentityManagerCacheLimit), 100 /* items */) viper.SetDefault(string(IdentityManagerCacheTTL), "1h") diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index f88b86ff21..243ecc0426 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -36,7 +36,9 @@ import ( type Manager interface { CheckDatatype(ctx context.Context, ns string, datatype *fftypes.Datatype) error ValidateAll(ctx context.Context, data []*fftypes.Data) (valid bool, err error) - GetMessageData(ctx context.Context, msg *fftypes.Message, withValue bool) (data []*fftypes.Data, foundAll bool, err error) + GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data []*fftypes.Data, foundAllData bool, err error) + GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data []*fftypes.Data, foundAll bool, err error) + UpdateMessageCache(msg *fftypes.Message, data []*fftypes.Data) ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, error) ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, []*fftypes.DataAndBlob, error) VerifyNamespaceExists(ctx context.Context, ns string) error @@ -54,8 +56,27 @@ type dataManager struct { exchange dataexchange.Plugin validatorCache *ccache.Cache validatorCacheTTL time.Duration + messageCache *ccache.Cache + messageCacheTTL time.Duration } +type messageCacheEntry struct { + msg *fftypes.Message + data []*fftypes.Data + size int64 +} + +func (mce *messageCacheEntry) Size() int64 { + return mce.size +} + +type CacheReadOption int + +const ( + CRONone = iota + CRORequirePublicBlobRefs +) + func NewDataManager(ctx context.Context, di database.Plugin, pi sharedstorage.Plugin, dx dataexchange.Plugin) (Manager, error) { if di == nil || pi == nil || dx == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) @@ -76,6 +97,11 @@ func NewDataManager(ctx context.Context, di database.Plugin, pi sharedstorage.Pl ccache.Configure(). MaxSize(config.GetByteSize(config.ValidatorCacheSize)), ) + dm.messageCache = ccache.New( + // We use a LRU cache with a size-aware max + ccache.Configure(). + MaxSize(config.GetByteSize(config.MessageCacheTTL)), + ) return dm, nil } @@ -133,15 +159,81 @@ func (dm *dataManager) getValidatorForDatatype(ctx context.Context, ns string, v return v, err } -// GetMessageData looks for all the data attached to the message. +// GetMessageWithData performs a cached lookup of a message with all of the associated data. +// - Use this in performance sensitive code, but note mutable fields like the status of the +// message are NOT returned as they cannot be relied upon (due to the caching). +func (dm *dataManager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data []*fftypes.Data, foundAllData bool, err error) { + if mce := dm.queryMessageCache(ctx, msgID, options...); mce != nil { + return mce.msg, mce.data, true, nil + } + msg, err = dm.database.GetMessageByID(ctx, msgID) + if err != nil || msg == nil { + return nil, nil, false, err + } + data, foundAllData, err = dm.dataLookupAndCache(ctx, msg) + return msg, data, foundAllData, err +} + +// GetMessageData looks for all the data attached to the message, including caching. // It only returns persistence errors. // For all cases where the data is not found (or the hashes mismatch) -func (dm *dataManager) GetMessageData(ctx context.Context, msg *fftypes.Message, withValue bool) (data []*fftypes.Data, foundAll bool, err error) { +func (dm *dataManager) GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data []*fftypes.Data, foundAll bool, err error) { + if mce := dm.queryMessageCache(ctx, msg.Header.ID, options...); mce != nil { + return mce.data, true, nil + } + return dm.dataLookupAndCache(ctx, msg) +} + +// cachedMessageAndDataLookup is the common function that can lookup and cache a message with its data +func (dm *dataManager) dataLookupAndCache(ctx context.Context, msg *fftypes.Message) (data []*fftypes.Data, foundAllData bool, err error) { + data, foundAllData, err = dm.getMessageData(ctx, msg) + if err != nil { + return nil, false, err + } + if !foundAllData { + return data, false, err + } + dm.UpdateMessageCache(msg, data) + return data, true, nil +} + +func (dm *dataManager) queryMessageCache(ctx context.Context, id *fftypes.UUID, options ...CacheReadOption) *messageCacheEntry { + cached := dm.messageCache.Get(id.String()) + if cached == nil { + return nil + } + mce := cached.Value().(*messageCacheEntry) + for _, opt := range options { + if opt == CRORequirePublicBlobRefs { + for idx, d := range mce.data { + if d.Blob != nil && d.Blob.Public == "" { + log.L(ctx).Debugf("Cache entry for data %d (%s) in message %s is missing public blob ref", idx, d.ID, mce.msg.Header.ID) + return nil + } + } + } + } + log.L(ctx).Debugf("Returning msg %s from cache", id) + return mce +} + +// UpdateMessageCache pushes an entry to the message cache. It is exposed out of the package, so that +// code which generates (or augments) message/data can populate the cache. +func (dm *dataManager) UpdateMessageCache(msg *fftypes.Message, data []*fftypes.Data) { + cacheEntry := &messageCacheEntry{ + msg: msg, + data: data, + size: msg.EstimateSize(true), + } + dm.messageCache.Set(msg.Header.ID.String(), cacheEntry, dm.messageCacheTTL) +} + +func (dm *dataManager) getMessageData(ctx context.Context, msg *fftypes.Message) (data []*fftypes.Data, foundAll bool, err error) { // Load all the data - must all be present for us to send data = make([]*fftypes.Data, 0, len(msg.Data)) foundAll = true for i, dataRef := range msg.Data { - d, err := dm.resolveRef(ctx, msg.Header.Namespace, dataRef, withValue) + d, err := dm.resolveRef(ctx, msg.Header.Namespace, dataRef, true) if err != nil { return nil, false, err } diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index 1168c5fb3c..b9ee402fdd 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -160,10 +160,10 @@ func TestGetMessageDataDBError(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(nil, fmt.Errorf("pop")) - data, foundAll, err := dm.GetMessageData(ctx, &fftypes.Message{ + data, foundAll, err := dm.GetMessageDataCached(ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, Data: fftypes.DataRefs{{ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}}, - }, true) + }) assert.Nil(t, data) assert.False(t, foundAll) assert.EqualError(t, err, "pop") @@ -176,10 +176,10 @@ func TestGetMessageDataNilEntry(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(nil, nil) - data, foundAll, err := dm.GetMessageData(ctx, &fftypes.Message{ + data, foundAll, err := dm.GetMessageDataCached(ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, Data: fftypes.DataRefs{nil}, - }, true) + }) assert.Empty(t, data) assert.False(t, foundAll) assert.NoError(t, err) @@ -192,10 +192,10 @@ func TestGetMessageDataNotFound(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(nil, nil) - data, foundAll, err := dm.GetMessageData(ctx, &fftypes.Message{ + data, foundAll, err := dm.GetMessageDataCached(ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, Data: fftypes.DataRefs{{ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}}, - }, true) + }) assert.Empty(t, data) assert.False(t, foundAll) assert.NoError(t, err) @@ -212,10 +212,10 @@ func TestGetMessageDataHashMismatch(t *testing.T) { ID: dataID, Hash: fftypes.NewRandB32(), }, nil) - data, foundAll, err := dm.GetMessageData(ctx, &fftypes.Message{ + data, foundAll, err := dm.GetMessageDataCached(ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, Data: fftypes.DataRefs{{ID: dataID, Hash: fftypes.NewRandB32()}}, - }, true) + }) assert.Empty(t, data) assert.False(t, foundAll) assert.NoError(t, err) @@ -229,19 +229,29 @@ func TestGetMessageDataOk(t *testing.T) { mdi := dm.database.(*databasemocks.Plugin) dataID := fftypes.NewUUID() hash := fftypes.NewRandB32() + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, + Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, + } + mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(&fftypes.Data{ ID: dataID, Hash: hash, - }, nil) - data, foundAll, err := dm.GetMessageData(ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, - Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, - }, true) + }, nil).Once() + data, foundAll, err := dm.GetMessageDataCached(ctx, msg) + assert.NotEmpty(t, data) + assert.Equal(t, *dataID, *data[0].ID) + assert.True(t, foundAll) + assert.NoError(t, err) + + // Check cache kicks in for second call + data, foundAll, err = dm.GetMessageDataCached(ctx, msg) assert.NotEmpty(t, data) assert.Equal(t, *dataID, *data[0].ID) assert.True(t, foundAll) assert.NoError(t, err) + mdi.AssertExpectations(t) } func TestCheckDatatypeVerifiesTheSchema(t *testing.T) { @@ -806,3 +816,115 @@ func TestHydrateBatchMsgBadManifest(t *testing.T) { _, err := dm.HydrateBatch(ctx, bp) assert.Regexp(t, "FF10151", err) } + +func TestGetMessageWithDataOk(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + mdi := dm.database.(*databasemocks.Plugin) + dataID := fftypes.NewUUID() + hash := fftypes.NewRandB32() + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, + Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, + } + + mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, nil).Once() + mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(&fftypes.Data{ + ID: dataID, + Hash: hash, + }, nil).Once() + msgRet, data, foundAll, err := dm.GetMessageWithDataCached(ctx, msg.Header.ID) + assert.Equal(t, msg, msgRet) + assert.NotEmpty(t, data) + assert.Equal(t, *dataID, *data[0].ID) + assert.True(t, foundAll) + assert.NoError(t, err) + + // Check cache kicks in for second call + msgRet, data, foundAll, err = dm.GetMessageWithDataCached(ctx, msg.Header.ID) + assert.Equal(t, msg, msgRet) + assert.NotEmpty(t, data) + assert.Equal(t, *dataID, *data[0].ID) + assert.True(t, foundAll) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestGetMessageWithDataCRORequirePublicBlobRefs(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + mdi := dm.database.(*databasemocks.Plugin) + dataID := fftypes.NewUUID() + hash := fftypes.NewRandB32() + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, + Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, + } + + mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, nil).Twice() + mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(&fftypes.Data{ + ID: dataID, + Hash: hash, + Blob: &fftypes.BlobRef{ + Hash: fftypes.NewRandB32(), + }, + }, nil).Twice() + msgRet, data, foundAll, err := dm.GetMessageWithDataCached(ctx, msg.Header.ID) + assert.Equal(t, msg, msgRet) + assert.NotEmpty(t, data) + assert.Equal(t, *dataID, *data[0].ID) + assert.True(t, foundAll) + assert.NoError(t, err) + + // Check cache does not kick in as we have missing blob ref + msgRet, data, foundAll, err = dm.GetMessageWithDataCached(ctx, msg.Header.ID, CRORequirePublicBlobRefs) + assert.Equal(t, msg, msgRet) + assert.NotEmpty(t, data) + assert.Equal(t, *dataID, *data[0].ID) + assert.True(t, foundAll) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestGetMessageWithDataReadDataFail(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + mdi := dm.database.(*databasemocks.Plugin) + dataID := fftypes.NewUUID() + hash := fftypes.NewRandB32() + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, + Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, + } + + mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, nil) + mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(nil, fmt.Errorf("pop")) + _, _, _, err := dm.GetMessageWithDataCached(ctx, msg.Header.ID) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) +} + +func TestGetMessageWithDataReadMessageFail(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + mdi := dm.database.(*databasemocks.Plugin) + dataID := fftypes.NewUUID() + hash := fftypes.NewRandB32() + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, + Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, + } + + mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, fmt.Errorf("pop")) + _, _, _, err := dm.GetMessageWithDataCached(ctx, msg.Header.ID) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) +} diff --git a/internal/events/dx_callbacks.go b/internal/events/dx_callbacks.go index f915fd0ca2..34d3104d48 100644 --- a/internal/events/dx_callbacks.go +++ b/internal/events/dx_callbacks.go @@ -56,12 +56,8 @@ func (em *eventManager) MessageReceived(dx dataexchange.Plugin, peerID string, d } } - mf, err := em.privateBatchReceived(peerID, wrapper.Batch) - manifestBytes := []byte{} - if err == nil && mf != nil { - manifestBytes, err = json.Marshal(&mf) - } - return string(manifestBytes), err + manifestString, err := em.privateBatchReceived(peerID, wrapper.Batch) + return manifestString, err } // Check data exchange peer the data came from, has been registered to the org listed in the batch. @@ -113,7 +109,7 @@ func (em *eventManager) checkReceivedOffchainIdentity(ctx context.Context, peerI return node, nil } -func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch) (manifest *fftypes.Manifest, err error) { +func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch) (manifest string, err error) { // Retry for persistence errors (not validation errors) err = em.retry.Do(em.ctx, "private batch received", func(attempt int) (bool, error) { @@ -129,7 +125,7 @@ func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch return nil } - valid, err := em.persistBatch(ctx, batch) + persistedBatch, valid, err := em.persistBatch(ctx, batch) if err != nil || !valid { l.Errorf("Batch received from org=%s node=%s processing failed valid=%t: %s", node.Parent, node.Name, valid, err) return err // retry - persistBatch only returns retryable errors @@ -144,7 +140,7 @@ func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch return err } } - manifest = batch.Manifest() + manifest = persistedBatch.Manifest return nil }) }) diff --git a/internal/events/persist_batch.go b/internal/events/persist_batch.go index 3357e012d1..58fd93f20a 100644 --- a/internal/events/persist_batch.go +++ b/internal/events/persist_batch.go @@ -31,18 +31,19 @@ func (em *eventManager) persistBatchFromBroadcast(ctx context.Context /* db TX c return false, nil // This is not retryable. skip this batch } - return em.persistBatch(ctx, batch) + _, valid, err = em.persistBatch(ctx, batch) + return valid, err } // persistBatch performs very simple validation on each message/data element (hashes) and either persists // or discards them. Errors are returned only in the case of database failures, which should be retried. -func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, batch *fftypes.Batch) (valid bool, err error) { +func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, batch *fftypes.Batch) (persistedBatch *fftypes.BatchPersisted, valid bool, err error) { l := log.L(ctx) now := fftypes.Now() if batch.ID == nil || batch.Payload.TX.ID == nil { l.Errorf("Invalid batch '%s'. Missing ID or transaction ID (%v)", batch.ID, batch.Payload.TX.ID) - return false, nil // This is not retryable. skip this batch + return nil, false, nil // This is not retryable. skip this batch } switch batch.Payload.TX.Type { @@ -50,28 +51,43 @@ func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, bat case fftypes.TransactionTypeUnpinned: default: l.Errorf("Invalid batch '%s'. Invalid transaction type: %s", batch.ID, batch.Payload.TX.Type) - return false, nil // This is not retryable. skip this batch - } - - // Verify the hash calculation - hash := batch.Payload.Hash() - if batch.Hash == nil || *batch.Hash != *hash { - l.Errorf("Invalid batch '%s'. Hash does not match payload. Found=%s Expected=%s", batch.ID, hash, batch.Hash) - return false, nil // This is not retryable. skip this batch + return nil, false, nil // This is not retryable. skip this batch + } + + // Re-generate the manifest + manifest := batch.Manifest() + manifestString := manifest.String() + manifestHash := fftypes.HashString(manifestString) + + // Verify the hash calculation. + if !manifestHash.Equals(batch.Hash) { + // To cope with existing batches written by v0.13 and older environments, we have to do a more expensive + // hashing of the whole payload before we reject. + payloadHash := batch.Payload.Hash() + if payloadHash.Equals(batch.Hash) { + l.Infof("Persisting migrated batch '%s'. Hash is a payload hash: %s", batch.ID, batch.Hash) + } else { + l.Errorf("Invalid batch '%s'. Hash does not match payload. Found=%s Expected=%s", batch.ID, manifestHash, batch.Hash) + return nil, false, nil // This is not retryable. skip this batch + } } // Set confirmed on the batch (the messages should not be confirmed at this point - that's the aggregator's job) - batch.Confirmed = now + persistedBatch = &fftypes.BatchPersisted{ + BatchHeader: batch.BatchHeader, + Manifest: manifestString, + Confirmed: now, + } - // Upsert the batch itself, ensuring the hash does not change - err = em.database.UpsertBatch(ctx, batch) + // Upsert the batch + err = em.database.UpsertBatch(ctx, persistedBatch) if err != nil { if err == database.HashMismatch { l.Errorf("Invalid batch '%s'. Batch hash mismatch with existing record", batch.ID) - return false, nil // This is not retryable. skip this batch + return nil, false, nil // This is not retryable. skip this batch } l.Errorf("Failed to insert batch '%s': %s", batch.ID, err) - return false, err // a persistence failure here is considered retryable (so returned) + return nil, false, err // a persistence failure here is considered retryable (so returned) } optimization := em.getOptimization(ctx, batch) @@ -79,18 +95,18 @@ func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, bat // Insert the data entries for i, data := range batch.Payload.Data { if err = em.persistBatchData(ctx, batch, i, data, optimization); err != nil { - return false, err + return nil, false, err } } // Insert the message entries for i, msg := range batch.Payload.Messages { if valid, err = em.persistBatchMessage(ctx, batch, i, msg, optimization); !valid || err != nil { - return valid, err + return nil, valid, err } } - return true, nil + return persistedBatch, true, nil } func (em *eventManager) getOptimization(ctx context.Context, batch *fftypes.Batch) database.UpsertOptimization { diff --git a/internal/events/persist_batch_test.go b/internal/events/persist_batch_test.go index bb90e00ddb..b75c1390a7 100644 --- a/internal/events/persist_batch_test.go +++ b/internal/events/persist_batch_test.go @@ -45,10 +45,12 @@ func TestPersistBatchFromBroadcast(t *testing.T) { } batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "did:firefly:org/12345", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "did:firefly:org/12345", + Key: "0x12345", + }, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ diff --git a/pkg/fftypes/batch.go b/pkg/fftypes/batch.go index 442b798b34..e46a6154b6 100644 --- a/pkg/fftypes/batch.go +++ b/pkg/fftypes/batch.go @@ -87,11 +87,6 @@ func (bm *BatchManifest) String() string { return string(b) } -func (bm *BatchManifest) Hash() *Bytes32 { - var b32 Bytes32 = sha256.Sum256([]byte(bm.String())) - return &b32 -} - func (ma *BatchPayload) Hash() *Bytes32 { b, _ := json.Marshal(&ma) var b32 Bytes32 = sha256.Sum256(b) diff --git a/pkg/fftypes/batch_test.go b/pkg/fftypes/batch_test.go index 76f8d5cb4d..43ec0e6a7b 100644 --- a/pkg/fftypes/batch_test.go +++ b/pkg/fftypes/batch_test.go @@ -51,7 +51,7 @@ func TestSQLSerializedManifest(t *testing.T) { assert.Equal(t, msgID1, mf.Messages[0].ID) assert.Equal(t, msgID2, mf.Messages[1].ID) mfHash := sha256.Sum256([]byte(mfString)) - assert.Equal(t, batch.Manifest().Hash().String(), hex.EncodeToString(mfHash[:])) + assert.Equal(t, HashString(batch.Manifest().String()).String(), hex.EncodeToString(mfHash[:])) assert.NotEqual(t, batch.Payload.Hash().String(), hex.EncodeToString(mfHash[:])) diff --git a/pkg/fftypes/bytetypes.go b/pkg/fftypes/bytetypes.go index 0e12e43243..9be2610c5f 100644 --- a/pkg/fftypes/bytetypes.go +++ b/pkg/fftypes/bytetypes.go @@ -19,6 +19,7 @@ package fftypes import ( "context" "crypto/rand" + "crypto/sha256" "database/sql/driver" "encoding/hex" "hash" @@ -43,6 +44,12 @@ func HashResult(hash hash.Hash) *Bytes32 { return &b32 } +func HashString(s string) *Bytes32 { + hash := sha256.New() + hash.Write([]byte(s)) + return HashResult(hash) +} + func (b32 Bytes32) MarshalText() ([]byte, error) { hexstr := make([]byte, 64) hex.Encode(hexstr, b32[0:32]) From 6661b22409bab8437f1515da8c820dd1924d4777 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Mon, 7 Mar 2022 17:29:45 -0500 Subject: [PATCH 08/11] Move resolve data interface to return full data, so we can populate cache on upsert Signed-off-by: Peter Broadhurst --- internal/batch/batch_manager.go | 33 +++---- internal/batch/batch_manager_test.go | 42 ++++++--- internal/batch/batch_processor.go | 4 +- internal/batch/batch_processor_test.go | 2 +- internal/broadcast/datatype_test.go | 1 + internal/broadcast/manager_test.go | 6 +- internal/broadcast/message.go | 8 +- internal/broadcast/message_test.go | 15 +-- internal/broadcast/namespace_test.go | 1 + internal/broadcast/tokenpool_test.go | 1 + internal/data/data_manager.go | 59 +++++------- internal/data/data_manager_test.go | 60 ++++++------ internal/orchestrator/data_query.go | 4 +- internal/privatemessaging/groupmanager.go | 2 +- .../privatemessaging/groupmanager_test.go | 12 +-- internal/privatemessaging/message.go | 18 ++-- internal/privatemessaging/message_test.go | 19 ++-- internal/syncasync/sync_async_bridge.go | 2 +- internal/syncasync/sync_async_bridge_test.go | 8 +- mocks/datamocks/manager.go | 93 +++++++++++++++---- pkg/fftypes/data.go | 14 +++ pkg/fftypes/data_test.go | 26 ++++++ 22 files changed, 277 insertions(+), 153 deletions(-) diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index 145a13fbe7..0d1e616b90 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -142,7 +142,7 @@ func (bm *batchManager) NewMessages() chan<- int64 { return bm.newMessages } -func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fftypes.MessageType, group *fftypes.Bytes32, namespace string, identity *fftypes.SignerRef) (*batchProcessor, error) { +func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fftypes.MessageType, group *fftypes.Bytes32, namespace string, signer *fftypes.SignerRef) (*batchProcessor, error) { bm.dispatcherMux.Lock() defer bm.dispatcherMux.Unlock() @@ -151,7 +151,7 @@ func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fft if !ok { return nil, i18n.NewError(bm.ctx, i18n.MsgUnregisteredBatchType, dispatcherKey) } - name := bm.getProcessorKey(namespace, identity, group) + name := bm.getProcessorKey(namespace, signer, group) processor, ok := dispatcher.processors[name] if !ok { processor = newBatchProcessor( @@ -164,7 +164,7 @@ func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fft txType: txType, dispatcherName: dispatcher.name, namespace: namespace, - identity: *identity, + signer: *signer, group: group, dispatch: dispatcher.handler, }, @@ -176,10 +176,14 @@ func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fft return processor, nil } -func (bm *batchManager) assembleMessageData(msg *fftypes.Message) (data []*fftypes.Data, err error) { +func (bm *batchManager) assembleMessageData(processor *batchProcessor, msg *fftypes.Message) (retData []*fftypes.Data, err error) { + var cro []data.CacheReadOption + if processor.conf.DispatcherOptions.BatchType == fftypes.BatchTypeBroadcast { + cro = append(cro, data.CRORequirePublicBlobRefs) + } var foundAll = false err = bm.retry.Do(bm.ctx, fmt.Sprintf("assemble message %s data", msg.Header.ID), func(attempt int) (retry bool, err error) { - data, foundAll, err = bm.data.GetMessageData(bm.ctx, msg, true) + retData, foundAll, err = bm.data.GetMessageDataCached(bm.ctx, msg, cro...) // continual retry for persistence error (distinct from not-found) return true, err }) @@ -189,7 +193,7 @@ func (bm *batchManager) assembleMessageData(msg *fftypes.Message) (data []*fftyp if !foundAll { return nil, i18n.NewError(bm.ctx, i18n.MsgDataNotFound, msg.Header.ID) } - return data, nil + return retData, nil } func (bm *batchManager) readPage() ([]*fftypes.Message, error) { @@ -225,17 +229,19 @@ func (bm *batchManager) messageSequencer() { if len(msgs) > 0 { for _, msg := range msgs { - data, err := bm.assembleMessageData(msg) + processor, err := bm.getProcessor(msg.Header.TxType, msg.Header.Type, msg.Header.Group, msg.Header.Namespace, &msg.Header.SignerRef) if err != nil { - l.Errorf("Failed to retrieve message data for %s: %s", msg.Header.ID, err) + l.Errorf("Failed to dispatch message %s: %s", msg.Header.ID, err) continue } - err = bm.dispatchMessage(msg, data...) + data, err := bm.assembleMessageData(processor, msg) if err != nil { - l.Errorf("Failed to dispatch message %s: %s", msg.Header.ID, err) + l.Errorf("Failed to retrieve message data for %s: %s", msg.Header.ID, err) continue } + + bm.dispatchMessage(processor, msg, data...) } // Next time round only read after the messages we just processed (unless we get a tap to rewind) @@ -296,19 +302,14 @@ func (bm *batchManager) waitForNewMessages() (done bool) { } } -func (bm *batchManager) dispatchMessage(msg *fftypes.Message, data ...*fftypes.Data) error { +func (bm *batchManager) dispatchMessage(processor *batchProcessor, msg *fftypes.Message, data ...*fftypes.Data) { l := log.L(bm.ctx) - processor, err := bm.getProcessor(msg.Header.TxType, msg.Header.Type, msg.Header.Group, msg.Header.Namespace, &msg.Header.SignerRef) - if err != nil { - return err - } l.Debugf("Dispatching message %s to %s batch processor %s", msg.Header.ID, msg.Header.Type, processor.conf.name) work := &batchWork{ msg: msg, data: data, } processor.newWork <- work - return nil } func (bm *batchManager) reapQuiescing() { diff --git a/internal/batch/batch_manager_test.go b/internal/batch/batch_manager_test.go index c38fd5ca32..742e9c8b96 100644 --- a/internal/batch/batch_manager_test.go +++ b/internal/batch/batch_manager_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" @@ -97,7 +98,7 @@ func TestE2EDispatchBroadcast(t *testing.T) { ID: dataID1, Hash: dataHash, } - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{data}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{data}, true, nil) mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -214,7 +215,7 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { ID: dataID1, Hash: dataHash, } - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{data}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{data}, true, nil) mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) // pins @@ -271,7 +272,7 @@ func TestDispatchUnknownType(t *testing.T) { msg := &fftypes.Message{} mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{}, true, nil) err := bm.Start() assert.NoError(t, err) @@ -293,8 +294,7 @@ func TestGetInvalidBatchTypeMsg(t *testing.T) { mni := &sysmessagingmocks.LocalNodeInfo{} bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) defer bm.Close() - msg := &fftypes.Message{Header: fftypes.MessageHeader{}} - err := bm.(*batchManager).dispatchMessage(msg) + _, err := bm.(*batchManager).getProcessor(fftypes.BatchTypeBroadcast, "wrong", nil, "ns1", &fftypes.SignerRef{}) assert.Regexp(t, "FF10126", err) } @@ -317,6 +317,12 @@ func TestMessageSequencerMissingMessageData(t *testing.T) { mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeNone, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, + func(c context.Context, state *DispatchState) error { + return nil + }, + DispatcherOptions{BatchType: fftypes.BatchTypeBroadcast}, + ) dataID := fftypes.NewUUID() mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything). @@ -324,7 +330,9 @@ func TestMessageSequencerMissingMessageData(t *testing.T) { { Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), + Type: fftypes.MessageTypeBroadcast, Namespace: "ns1", + TxType: fftypes.TransactionTypeNone, }, Data: []*fftypes.DataRef{ {ID: dataID}, @@ -335,7 +343,7 @@ func TestMessageSequencerMissingMessageData(t *testing.T) { }). Once() mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, false, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything, data.CRORequirePublicBlobRefs).Return([]*fftypes.Data{}, false, nil) bm.(*batchManager).messageSequencer() @@ -372,7 +380,7 @@ func TestMessageSequencerUpdateMessagesFail(t *testing.T) { {ID: dataID}, }}, }, nil, nil) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{{ID: dataID}}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{{ID: dataID}}, true, nil) mdi.On("InsertTransaction", mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) // transaction submit mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -425,7 +433,7 @@ func TestMessageSequencerDispatchFail(t *testing.T) { {ID: dataID}, }}, }, nil, nil) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{{ID: dataID}}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{{ID: dataID}}, true, nil) mdi.On("RunAsGroup", mock.Anything, mock.Anything, mock.Anything).Return(nil) bm.(*batchManager).messageSequencer() @@ -463,7 +471,7 @@ func TestMessageSequencerUpdateBatchFail(t *testing.T) { {ID: dataID}, }}, }, nil, nil) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{{ID: dataID}}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{{ID: dataID}}, true, nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("fizzle")) rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything, mock.Anything) rag.RunFn = func(a mock.Arguments) { @@ -515,8 +523,9 @@ func TestAssembleMessageDataNilData(t *testing.T) { mni := &sysmessagingmocks.LocalNodeInfo{} bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) bm.Close() - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, false, nil) - _, err := bm.(*batchManager).assembleMessageData(&fftypes.Message{ + _, bp := newTestBatchProcessor(func(c context.Context, ds *DispatchState) error { return nil }) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, false, nil) + _, err := bm.(*batchManager).assembleMessageData(bp, &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), }, @@ -530,9 +539,10 @@ func TestGetMessageDataFail(t *testing.T) { mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, false, fmt.Errorf("pop")) bm.Close() - _, _ = bm.(*batchManager).assembleMessageData(&fftypes.Message{ + _, bp := newTestBatchProcessor(func(c context.Context, ds *DispatchState) error { return nil }) + _, _ = bm.(*batchManager).assembleMessageData(bp, &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), }, @@ -548,9 +558,11 @@ func TestGetMessageNotFound(t *testing.T) { mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, false, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything, data.CRORequirePublicBlobRefs).Return(nil, false, nil) bm.Close() - _, err := bm.(*batchManager).assembleMessageData(&fftypes.Message{ + _, bp := newTestBatchProcessor(func(c context.Context, ds *DispatchState) error { return nil }) + bp.conf.DispatcherOptions.BatchType = fftypes.BatchTypeBroadcast + _, err := bm.(*batchManager).assembleMessageData(bp, &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), }, diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 2bc349ebd5..50f8a7f3bb 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -45,7 +45,7 @@ type batchProcessorConf struct { dispatcherName string txType fftypes.TransactionType namespace string - identity fftypes.SignerRef + signer fftypes.SignerRef group *fftypes.Bytes32 dispatch DispatchHandler } @@ -392,7 +392,7 @@ func (bp *batchProcessor) initFlushState(id *fftypes.UUID, flushWork []*batchWor ID: id, Type: bp.conf.DispatcherOptions.BatchType, Namespace: bp.conf.namespace, - SignerRef: bp.conf.identity, + SignerRef: bp.conf.signer, Group: bp.conf.group, Node: bp.ni.GetNodeUUID(bp.ctx), }, diff --git a/internal/batch/batch_processor_test.go b/internal/batch/batch_processor_test.go index 7c69fc61e1..e22256a694 100644 --- a/internal/batch/batch_processor_test.go +++ b/internal/batch/batch_processor_test.go @@ -38,7 +38,7 @@ func newTestBatchProcessor(dispatch DispatchHandler) (*databasemocks.Plugin, *ba bp := newBatchProcessor(context.Background(), mni, mdi, &batchProcessorConf{ namespace: "ns1", txType: fftypes.TransactionTypeBatchPin, - identity: fftypes.SignerRef{Author: "did:firefly:org/abcd", Key: "0x12345"}, + signer: fftypes.SignerRef{Author: "did:firefly:org/abcd", Key: "0x12345"}, dispatch: dispatch, DispatcherOptions: DispatcherOptions{ BatchMaxSize: 10, diff --git a/internal/broadcast/datatype_test.go b/internal/broadcast/datatype_test.go index 97381682ba..fe9b35300d 100644 --- a/internal/broadcast/datatype_test.go +++ b/internal/broadcast/datatype_test.go @@ -145,6 +145,7 @@ func TestBroadcastOk(t *testing.T) { mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) mdm.On("VerifyNamespaceExists", mock.Anything, "ns1").Return(nil) mdm.On("CheckDatatype", mock.Anything, "ns1", mock.Anything).Return(nil) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) _, err := bm.BroadcastDatatype(context.Background(), "ns1", &fftypes.Datatype{ diff --git a/internal/broadcast/manager_test.go b/internal/broadcast/manager_test.go index b9fc0f63b6..828361ebc5 100644 --- a/internal/broadcast/manager_test.go +++ b/internal/broadcast/manager_test.go @@ -109,7 +109,10 @@ func TestBroadcastMessageGood(t *testing.T) { defer cancel() msg := &fftypes.MessageInOut{} - bm.database.(*databasemocks.Plugin).On("UpsertMessage", mock.Anything, &msg.Message, database.UpsertOptimizationNew).Return(nil) + mdi := bm.database.(*databasemocks.Plugin) + mdi.On("UpsertMessage", mock.Anything, &msg.Message, database.UpsertOptimizationNew).Return(nil) + mdm := bm.data.(*datamocks.Manager) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() broadcast := broadcastSender{ mgr: bm, @@ -120,6 +123,7 @@ func TestBroadcastMessageGood(t *testing.T) { bm.Start() bm.WaitStop() + mdi.AssertExpectations(t) } func TestBroadcastMessageBad(t *testing.T) { diff --git a/internal/broadcast/message.go b/internal/broadcast/message.go index e26da5cb7a..3cf4d2db31 100644 --- a/internal/broadcast/message.go +++ b/internal/broadcast/message.go @@ -53,6 +53,7 @@ type broadcastSender struct { mgr *broadcastManager namespace string msg *fftypes.MessageInOut + data []*fftypes.Data resolved bool } @@ -141,8 +142,9 @@ func (s *broadcastSender) resolve(ctx context.Context) ([]*fftypes.DataAndBlob, } // The data manager is responsible for the heavy lifting of storing/validating all our in-line data elements - dataRefs, dataToPublish, err := s.mgr.data.ResolveInlineDataBroadcast(ctx, s.namespace, s.msg.InlineData) - s.msg.Message.Data = dataRefs + data, dataToPublish, err := s.mgr.data.ResolveInlineDataBroadcast(ctx, s.namespace, s.msg.InlineData) + s.data = data + s.msg.Message.Data = data.Refs() return dataToPublish, err } @@ -167,6 +169,8 @@ func (s *broadcastSender) sendInternal(ctx context.Context, method sendMethod) ( if err := s.mgr.database.UpsertMessage(ctx, &s.msg.Message, database.UpsertOptimizationNew); err != nil { return err } + s.mgr.data.UpdateMessageCache(&s.msg.Message, s.data) + s.data = nil // no need to keep hold of this log.L(ctx).Infof("Sent broadcast message %s:%s sequence=%d", s.msg.Header.Namespace, s.msg.Header.ID, s.msg.Sequence) return err diff --git a/internal/broadcast/message_test.go b/internal/broadcast/message_test.go index b0066517fb..bced577943 100644 --- a/internal/broadcast/message_test.go +++ b/internal/broadcast/message_test.go @@ -50,9 +50,10 @@ func TestBroadcastMessageOk(t *testing.T) { var fn = a[1].(func(context.Context) error) rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, }, []*fftypes.DataAndBlob{}, nil) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() mdi.On("UpsertMessage", ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) @@ -92,9 +93,10 @@ func TestBroadcastMessageWaitConfirmOk(t *testing.T) { var fn = a[1].(func(context.Context) error) rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, }, []*fftypes.DataAndBlob{}, nil) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) replyMsg := &fftypes.Message{ @@ -150,7 +152,7 @@ func TestBroadcastMessageWithBlobsOk(t *testing.T) { var fn = a[1].(func(context.Context) error) rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Hash: fftypes.NewRandB32()}, }, []*fftypes.DataAndBlob{ { @@ -166,6 +168,7 @@ func TestBroadcastMessageWithBlobsOk(t *testing.T) { }, }, }, nil) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() mdx.On("DownloadBLOB", ctx, "blob/1").Return(ioutil.NopCloser(bytes.NewReader([]byte(`some data`))), nil) mps.On("PublishData", ctx, mock.MatchedBy(func(reader io.ReadCloser) bool { b, err := ioutil.ReadAll(reader) @@ -215,7 +218,7 @@ func TestBroadcastMessageTooLarge(t *testing.T) { var fn = a[1].(func(context.Context) error) rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), ValueSize: 1000001}, }, []*fftypes.DataAndBlob{}, nil) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) @@ -302,7 +305,7 @@ func TestPublishBlobsSendMessageFail(t *testing.T) { rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} } mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Hash: fftypes.NewRandB32()}, }, []*fftypes.DataAndBlob{ { @@ -356,7 +359,7 @@ func TestBroadcastPrepare(t *testing.T) { var fn = a[1].(func(context.Context) error) rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, }, []*fftypes.DataAndBlob{}, nil) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) diff --git a/internal/broadcast/namespace_test.go b/internal/broadcast/namespace_test.go index bcf5066b84..cd00abcd6d 100644 --- a/internal/broadcast/namespace_test.go +++ b/internal/broadcast/namespace_test.go @@ -71,6 +71,7 @@ func TestBroadcastNamespaceBroadcastOk(t *testing.T) { mdi.On("GetNamespace", mock.Anything, mock.Anything).Return(&fftypes.Namespace{Name: "ns1"}, nil) mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) mdm.On("CheckDatatype", mock.Anything, "ns1", mock.Anything).Return(nil) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) buff := strings.Builder{} buff.Grow(4097) diff --git a/internal/broadcast/tokenpool_test.go b/internal/broadcast/tokenpool_test.go index 1b81ef3f88..c5611b04ed 100644 --- a/internal/broadcast/tokenpool_test.go +++ b/internal/broadcast/tokenpool_test.go @@ -129,6 +129,7 @@ func TestBroadcastTokenPoolOk(t *testing.T) { mim.On("ResolveInputSigningIdentity", mock.Anything, "ns1", mock.Anything).Return(nil) mdm.On("VerifyNamespaceExists", mock.Anything, "ns1").Return(nil) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index 243ecc0426..d6f780b8dc 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -39,8 +39,8 @@ type Manager interface { GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data []*fftypes.Data, foundAllData bool, err error) GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data []*fftypes.Data, foundAll bool, err error) UpdateMessageCache(msg *fftypes.Message, data []*fftypes.Data) - ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, error) - ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, []*fftypes.DataAndBlob, error) + ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataArray, error) + ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataArray, []*fftypes.DataAndBlob, error) VerifyNamespaceExists(ctx context.Context, ns string) error UploadJSON(ctx context.Context, ns string, inData *fftypes.DataRefOrValue) (*fftypes.Data, error) @@ -73,8 +73,7 @@ func (mce *messageCacheEntry) Size() int64 { type CacheReadOption int const ( - CRONone = iota - CRORequirePublicBlobRefs + CRORequirePublicBlobRefs CacheReadOption = iota ) func NewDataManager(ctx context.Context, di database.Plugin, pi sharedstorage.Plugin, dx dataexchange.Plugin) (Manager, error) { @@ -161,7 +160,7 @@ func (dm *dataManager) getValidatorForDatatype(ctx context.Context, ns string, v // GetMessageWithData performs a cached lookup of a message with all of the associated data. // - Use this in performance sensitive code, but note mutable fields like the status of the -// message are NOT returned as they cannot be relied upon (due to the caching). +// message CANNOT be relied upon (due to the caching). func (dm *dataManager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data []*fftypes.Data, foundAllData bool, err error) { if mce := dm.queryMessageCache(ctx, msgID, options...); mce != nil { return mce.msg, mce.data, true, nil @@ -233,7 +232,7 @@ func (dm *dataManager) getMessageData(ctx context.Context, msg *fftypes.Message) data = make([]*fftypes.Data, 0, len(msg.Data)) foundAll = true for i, dataRef := range msg.Data { - d, err := dm.resolveRef(ctx, msg.Header.Namespace, dataRef, true) + d, err := dm.resolveRef(ctx, msg.Header.Namespace, dataRef) if err != nil { return nil, false, err } @@ -267,12 +266,12 @@ func (dm *dataManager) ValidateAll(ctx context.Context, data []*fftypes.Data) (v return true, nil } -func (dm *dataManager) resolveRef(ctx context.Context, ns string, dataRef *fftypes.DataRef, withValue bool) (*fftypes.Data, error) { +func (dm *dataManager) resolveRef(ctx context.Context, ns string, dataRef *fftypes.DataRef) (*fftypes.Data, error) { if dataRef == nil || dataRef.ID == nil { log.L(ctx).Warnf("data is nil") return nil, nil } - d, err := dm.database.GetDataByID(ctx, dataRef.ID, withValue) + d, err := dm.database.GetDataByID(ctx, dataRef.ID, true) if err != nil { return nil, err } @@ -359,18 +358,14 @@ func (dm *dataManager) validateAndStore(ctx context.Context, ns string, validato return data, blob, nil } -func (dm *dataManager) validateAndStoreInlined(ctx context.Context, ns string, value *fftypes.DataRefOrValue) (*fftypes.Data, *fftypes.Blob, *fftypes.DataRef, error) { +func (dm *dataManager) validateAndStoreInlined(ctx context.Context, ns string, value *fftypes.DataRefOrValue) (*fftypes.Data, *fftypes.Blob, error) { data, blob, err := dm.validateAndStore(ctx, ns, value.Validator, value.Datatype, value.Value, value.Blob) if err != nil { - return nil, nil, nil, err + return nil, nil, err } // Return a ref to the newly saved data - return data, blob, &fftypes.DataRef{ - ID: data.ID, - Hash: data.Hash, - ValueSize: data.ValueSize, - }, nil + return data, blob, nil } func (dm *dataManager) UploadJSON(ctx context.Context, ns string, inData *fftypes.DataRefOrValue) (*fftypes.Data, error) { @@ -378,49 +373,45 @@ func (dm *dataManager) UploadJSON(ctx context.Context, ns string, inData *fftype return data, err } -func (dm *dataManager) ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (refs fftypes.DataRefs, err error) { - refs, _, err = dm.resolveInlineData(ctx, ns, inData, false) - return refs, err +func (dm *dataManager) ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataArray, error) { + data, _, err := dm.resolveInlineData(ctx, ns, inData, false) + return data, err } // ResolveInlineDataBroadcast ensures the data object are stored, and returns a list of any data that does not currently // have a shared storage reference, and hence must be published to sharedstorage before a broadcast message can be sent. // We deliberately do NOT perform those publishes inside of this action, as we expect to be in a RunAsGroup (trnasaction) // at this point, and hence expensive things like a multi-megabyte upload should be decoupled by our caller. -func (dm *dataManager) ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (refs fftypes.DataRefs, dataToPublish []*fftypes.DataAndBlob, err error) { +func (dm *dataManager) ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (data fftypes.DataArray, dataToPublish []*fftypes.DataAndBlob, err error) { return dm.resolveInlineData(ctx, ns, inData, true) } -func (dm *dataManager) resolveInlineData(ctx context.Context, ns string, inData fftypes.InlineData, broadcast bool) (refs fftypes.DataRefs, dataToPublish []*fftypes.DataAndBlob, err error) { +func (dm *dataManager) resolveInlineData(ctx context.Context, ns string, inData fftypes.InlineData, broadcast bool) (data fftypes.DataArray, dataToPublish []*fftypes.DataAndBlob, err error) { - refs = make(fftypes.DataRefs, len(inData)) + data = make(fftypes.DataArray, len(inData)) if broadcast { dataToPublish = make([]*fftypes.DataAndBlob, 0, len(inData)) } for i, dataOrValue := range inData { - var data *fftypes.Data + var d *fftypes.Data var blob *fftypes.Blob switch { case dataOrValue.ID != nil: // If an ID is supplied, then it must be a reference to existing data - data, err = dm.resolveRef(ctx, ns, &dataOrValue.DataRef, false /* do not need the value */) + d, err = dm.resolveRef(ctx, ns, &dataOrValue.DataRef) if err != nil { return nil, nil, err } - if data == nil { + if d == nil { return nil, nil, i18n.NewError(ctx, i18n.MsgDataReferenceUnresolvable, i) } - refs[i] = &fftypes.DataRef{ - ID: data.ID, - Hash: data.Hash, - ValueSize: data.ValueSize, - } - if blob, err = dm.resolveBlob(ctx, data.Blob); err != nil { + data[i] = d + if blob, err = dm.resolveBlob(ctx, d.Blob); err != nil { return nil, nil, err } case dataOrValue.Value != nil || dataOrValue.Blob != nil: // We've got a Value, so we can validate + store it - if data, blob, refs[i], err = dm.validateAndStoreInlined(ctx, ns, dataOrValue); err != nil { + if data[i], blob, err = dm.validateAndStoreInlined(ctx, ns, dataOrValue); err != nil { return nil, nil, err } default: @@ -430,14 +421,14 @@ func (dm *dataManager) resolveInlineData(ctx context.Context, ns string, inData // If the data is being resolved for public broadcast, and there is a blob attachment, that blob // needs to be published by our calller - if broadcast && blob != nil && data.Blob.Public == "" { + if broadcast && blob != nil && d.Blob.Public == "" { dataToPublish = append(dataToPublish, &fftypes.DataAndBlob{ - Data: data, + Data: d, Blob: blob, }) } } - return refs, dataToPublish, nil + return data, dataToPublish, nil } // HydrateBatch fetches the full messages for a persited batch, ready for transmission diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index b9ee402fdd..3ac8a45d49 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -266,9 +266,9 @@ func TestResolveInlineDataEmpty(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{}) + data, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{}) assert.NoError(t, err) - assert.Empty(t, refs) + assert.Empty(t, data) } @@ -280,19 +280,19 @@ func TestResolveInlineDataRefIDOnlyOK(t *testing.T) { dataID := fftypes.NewUUID() dataHash := fftypes.NewRandB32() - mdi.On("GetDataByID", ctx, dataID, false).Return(&fftypes.Data{ + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ ID: dataID, Namespace: "ns1", Hash: dataHash, }, nil) - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ + data, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ {DataRef: fftypes.DataRef{ID: dataID}}, }) assert.NoError(t, err) - assert.Len(t, refs, 1) - assert.Equal(t, dataID, refs[0].ID) - assert.Equal(t, dataHash, refs[0].Hash) + assert.Len(t, data, 1) + assert.Equal(t, dataID, data[0].ID) + assert.Equal(t, dataHash, data[0].Hash) } func TestResolveInlineDataBroadcastDataToPublish(t *testing.T) { @@ -304,7 +304,7 @@ func TestResolveInlineDataBroadcastDataToPublish(t *testing.T) { dataHash := fftypes.NewRandB32() blobHash := fftypes.NewRandB32() - mdi.On("GetDataByID", ctx, dataID, false).Return(&fftypes.Data{ + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ ID: dataID, Namespace: "ns1", Hash: dataHash, @@ -338,7 +338,7 @@ func TestResolveInlineDataBroadcastResolveBlobFail(t *testing.T) { dataHash := fftypes.NewRandB32() blobHash := fftypes.NewRandB32() - mdi.On("GetDataByID", ctx, dataID, false).Return(&fftypes.Data{ + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ ID: dataID, Namespace: "ns1", Hash: dataHash, @@ -362,17 +362,17 @@ func TestResolveInlineDataRefBadNamespace(t *testing.T) { dataID := fftypes.NewUUID() dataHash := fftypes.NewRandB32() - mdi.On("GetDataByID", ctx, dataID, false).Return(&fftypes.Data{ + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ ID: dataID, Namespace: "ns2", Hash: dataHash, }, nil) - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ + data, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ {DataRef: fftypes.DataRef{ID: dataID, Hash: dataHash}}, }) assert.Regexp(t, "FF10204", err) - assert.Empty(t, refs) + assert.Empty(t, data) } func TestResolveInlineDataRefBadHash(t *testing.T) { @@ -383,17 +383,17 @@ func TestResolveInlineDataRefBadHash(t *testing.T) { dataID := fftypes.NewUUID() dataHash := fftypes.NewRandB32() - mdi.On("GetDataByID", ctx, dataID, false).Return(&fftypes.Data{ + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ ID: dataID, Namespace: "ns2", Hash: dataHash, }, nil) - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ + data, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ {DataRef: fftypes.DataRef{ID: dataID, Hash: fftypes.NewRandB32()}}, }) assert.Regexp(t, "FF10204", err) - assert.Empty(t, refs) + assert.Empty(t, data) } func TestResolveInlineDataRefLookkupFail(t *testing.T) { @@ -403,7 +403,7 @@ func TestResolveInlineDataRefLookkupFail(t *testing.T) { dataID := fftypes.NewUUID() - mdi.On("GetDataByID", ctx, dataID, false).Return(nil, fmt.Errorf("pop")) + mdi.On("GetDataByID", ctx, dataID, true).Return(nil, fmt.Errorf("pop")) _, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ {DataRef: fftypes.DataRef{ID: dataID, Hash: fftypes.NewRandB32()}}, @@ -418,13 +418,13 @@ func TestResolveInlineDataValueNoValidatorOK(t *testing.T) { mdi.On("UpsertData", ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ + data, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ {Value: fftypes.JSONAnyPtr(`{"some":"json"}`)}, }) assert.NoError(t, err) - assert.Len(t, refs, 1) - assert.NotNil(t, refs[0].ID) - assert.NotNil(t, refs[0].Hash) + assert.Len(t, data, 1) + assert.NotNil(t, data[0].ID) + assert.NotNil(t, data[0].Hash) } func TestResolveInlineDataValueNoValidatorStoreFail(t *testing.T) { @@ -462,7 +462,7 @@ func TestResolveInlineDataValueWithValidation(t *testing.T) { }`), }, nil) - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ + data, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ { Datatype: &fftypes.DatatypeRef{ Name: "customer", @@ -472,9 +472,9 @@ func TestResolveInlineDataValueWithValidation(t *testing.T) { }, }) assert.NoError(t, err) - assert.Len(t, refs, 1) - assert.NotNil(t, refs[0].ID) - assert.NotNil(t, refs[0].Hash) + assert.Len(t, data, 1) + assert.NotNil(t, data[0].ID) + assert.NotNil(t, data[0].Hash) _, err = dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ { @@ -517,7 +517,7 @@ func TestValidateAndStoreLoadNilRef(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ Validator: fftypes.ValidatorTypeJSON, Datatype: nil, }) @@ -530,7 +530,7 @@ func TestValidateAndStoreLoadValidatorUnknown(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(nil, nil) - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ Validator: "wrong!", Datatype: &fftypes.DatatypeRef{ Name: "customer", @@ -547,7 +547,7 @@ func TestValidateAndStoreLoadBadRef(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(nil, nil) - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ Datatype: &fftypes.DatatypeRef{ // Missing name }, @@ -561,7 +561,7 @@ func TestValidateAndStoreNotFound(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(nil, nil) - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ Datatype: &fftypes.DatatypeRef{ Name: "customer", Version: "0.0.1", @@ -577,7 +577,7 @@ func TestValidateAndStoreBlobError(t *testing.T) { mdi := dm.database.(*databasemocks.Plugin) blobHash := fftypes.NewRandB32() mdi.On("GetBlobMatchingHash", mock.Anything, blobHash).Return(nil, fmt.Errorf("pop")) - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ Blob: &fftypes.BlobRef{ Hash: blobHash, }, @@ -592,7 +592,7 @@ func TestValidateAndStoreBlobNotFound(t *testing.T) { mdi := dm.database.(*databasemocks.Plugin) blobHash := fftypes.NewRandB32() mdi.On("GetBlobMatchingHash", mock.Anything, blobHash).Return(nil, nil) - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ Blob: &fftypes.BlobRef{ Hash: blobHash, }, diff --git a/internal/orchestrator/data_query.go b/internal/orchestrator/data_query.go index eac1d89a03..3a30e8ae2f 100644 --- a/internal/orchestrator/data_query.go +++ b/internal/orchestrator/data_query.go @@ -84,7 +84,7 @@ func (or *orchestrator) fetchMessageData(ctx context.Context, msg *fftypes.Messa Message: *msg, } // Lookup the full data - data, _, err := or.data.GetMessageData(ctx, msg, true) + data, _, err := or.data.GetMessageDataCached(ctx, msg) if err != nil { return nil, err } @@ -188,7 +188,7 @@ func (or *orchestrator) GetMessageData(ctx context.Context, ns, id string) ([]*f if err != nil || msg == nil { return nil, err } - data, _, err := or.data.GetMessageData(ctx, msg, true) + data, _, err := or.data.GetMessageDataCached(ctx, msg) return data, err } diff --git a/internal/privatemessaging/groupmanager.go b/internal/privatemessaging/groupmanager.go index 4b46e2fa9e..10d260ba61 100644 --- a/internal/privatemessaging/groupmanager.go +++ b/internal/privatemessaging/groupmanager.go @@ -206,7 +206,7 @@ func (gm *groupManager) getGroupNodes(ctx context.Context, groupHash *fftypes.By func (gm *groupManager) ResolveInitGroup(ctx context.Context, msg *fftypes.Message) (*fftypes.Group, error) { if msg.Header.Tag == fftypes.SystemTagDefineGroup { // Store the new group - data, foundAll, err := gm.data.GetMessageData(ctx, msg, true) + data, foundAll, err := gm.data.GetMessageDataCached(ctx, msg) if err != nil || !foundAll || len(data) == 0 { log.L(ctx).Warnf("Group %s definition in message %s invalid: missing data", msg.Header.Group, msg.Header.ID) return nil, err diff --git a/internal/privatemessaging/groupmanager_test.go b/internal/privatemessaging/groupmanager_test.go index 7f2dc26ccf..31fcd76fd9 100644 --- a/internal/privatemessaging/groupmanager_test.go +++ b/internal/privatemessaging/groupmanager_test.go @@ -86,7 +86,7 @@ func TestResolveInitGroupMissingData(t *testing.T) { defer cancel() mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{}, false, nil) + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{}, false, nil) _, err := pm.ResolveInitGroup(pm.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ @@ -109,7 +109,7 @@ func TestResolveInitGroupBadData(t *testing.T) { defer cancel() mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`!json`)}, }, true, nil) @@ -134,7 +134,7 @@ func TestResolveInitGroupBadValidation(t *testing.T) { defer cancel() mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`{}`)}, }, true, nil) @@ -172,7 +172,7 @@ func TestResolveInitGroupBadGroupID(t *testing.T) { b, _ := json.Marshal(&group) mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) @@ -210,7 +210,7 @@ func TestResolveInitGroupUpsertFail(t *testing.T) { b, _ := json.Marshal(&group) mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) mdi := pm.database.(*databasemocks.Plugin) @@ -250,7 +250,7 @@ func TestResolveInitGroupNewOk(t *testing.T) { b, _ := json.Marshal(&group) mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) mdi := pm.database.(*databasemocks.Plugin) diff --git a/internal/privatemessaging/message.go b/internal/privatemessaging/message.go index b7a82ed685..ca91c52383 100644 --- a/internal/privatemessaging/message.go +++ b/internal/privatemessaging/message.go @@ -66,6 +66,7 @@ type messageSender struct { mgr *privateMessaging namespace string msg *fftypes.MessageInOut + data []*fftypes.Data resolved bool } @@ -115,7 +116,7 @@ func (s *messageSender) resolveAndSend(ctx context.Context, method sendMethod) e // We optimize the DB storage of all the parts of the message using transaction semantics (assuming those are supported by the DB plugin) err := s.mgr.database.RunAsGroup(ctx, func(ctx context.Context) (err error) { if !s.resolved { - if err := s.resolve(ctx); err != nil { + if s.data, err = s.resolve(ctx); err != nil { return err } msgSizeEstimate := s.msg.EstimateSize(true) @@ -140,21 +141,22 @@ func (s *messageSender) resolveAndSend(ctx context.Context, method sendMethod) e return s.sendInternal(ctx, method) } -func (s *messageSender) resolve(ctx context.Context) error { +func (s *messageSender) resolve(ctx context.Context) ([]*fftypes.Data, error) { // Resolve the sending identity if err := s.mgr.identity.ResolveInputSigningIdentity(ctx, s.msg.Header.Namespace, &s.msg.Header.SignerRef); err != nil { - return i18n.WrapError(ctx, err, i18n.MsgAuthorInvalid) + return nil, i18n.WrapError(ctx, err, i18n.MsgAuthorInvalid) } // Resolve the member list into a group if err := s.mgr.resolveRecipientList(ctx, s.msg); err != nil { - return err + return nil, err } // The data manager is responsible for the heavy lifting of storing/validating all our in-line data elements - dataRefs, err := s.mgr.data.ResolveInlineDataPrivate(ctx, s.namespace, s.msg.InlineData) - s.msg.Message.Data = dataRefs - return err + data, err := s.mgr.data.ResolveInlineDataPrivate(ctx, s.namespace, s.msg.InlineData) + s.msg.Message.Data = data.Refs() + s.data = data + return data, err } func (s *messageSender) sendInternal(ctx context.Context, method sendMethod) error { @@ -180,6 +182,8 @@ func (s *messageSender) sendInternal(ctx context.Context, method sendMethod) err if err := s.mgr.database.UpsertMessage(ctx, &s.msg.Message, database.UpsertOptimizationNew); err != nil { return err } + s.mgr.data.UpdateMessageCache(&s.msg.Message, s.data) + s.data = nil // no need to keep hold of this log.L(ctx).Infof("Sent private message %s:%s sequence=%d", s.msg.Header.Namespace, s.msg.Header.ID, s.msg.Sequence) return nil diff --git a/internal/privatemessaging/message_test.go b/internal/privatemessaging/message_test.go index a0e55abf17..9e3ef7e563 100644 --- a/internal/privatemessaging/message_test.go +++ b/internal/privatemessaging/message_test.go @@ -86,9 +86,10 @@ func TestSendConfirmMessageE2EOk(t *testing.T) { dataID := fftypes.NewUUID() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Hash: fftypes.NewRandB32()}, }, nil) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{}, nil, nil).Once() @@ -144,9 +145,10 @@ func TestSendUnpinnedMessageE2EOk(t *testing.T) { dataID := fftypes.NewUUID() groupID := fftypes.NewRandB32() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Hash: fftypes.NewRandB32()}, }, nil) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() mdi := pm.database.(*databasemocks.Plugin) mdi.On("UpsertMessage", pm.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil).Once() @@ -248,7 +250,7 @@ func TestSendMessageFail(t *testing.T) { dataID := fftypes.NewUUID() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Hash: fftypes.NewRandB32()}, }, nil) @@ -309,7 +311,7 @@ func TestResolveAndSendBadInlineData(t *testing.T) { }, } - err := message.resolve(pm.ctx) + _, err := message.resolve(pm.ctx) assert.Regexp(t, "pop", err) mim.AssertExpectations(t) @@ -334,7 +336,7 @@ func TestSendUnpinnedMessageTooLarge(t *testing.T) { dataID := fftypes.NewUUID() groupID := fftypes.NewRandB32() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Hash: fftypes.NewRandB32(), ValueSize: 100001}, }, nil) @@ -408,7 +410,7 @@ func TestMessagePrepare(t *testing.T) { }, nil, nil).Once() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, }, nil) @@ -484,7 +486,7 @@ func TestSendUnpinnedMessageInsertFail(t *testing.T) { dataID := fftypes.NewUUID() groupID := fftypes.NewRandB32() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Hash: fftypes.NewRandB32()}, }, nil) @@ -667,9 +669,10 @@ func TestRequestReplySuccess(t *testing.T) { Return(nil, nil) mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ + mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, }, nil) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() groupID := fftypes.NewRandB32() diff --git a/internal/syncasync/sync_async_bridge.go b/internal/syncasync/sync_async_bridge.go index a4cd6f344e..9bc6e79047 100644 --- a/internal/syncasync/sync_async_bridge.go +++ b/internal/syncasync/sync_async_bridge.go @@ -452,7 +452,7 @@ func (sa *syncAsyncBridge) resolveReply(inflight *inflightRequest, msg *fftypes. log.L(sa.ctx).Debugf("Resolving reply request '%s' with message '%s'", inflight.id, msg.Header.ID) response := &fftypes.MessageInOut{Message: *msg} - data, _, err := sa.data.GetMessageData(sa.ctx, msg, true) + data, _, err := sa.data.GetMessageDataCached(sa.ctx, msg) if err != nil { log.L(sa.ctx).Errorf("Failed to read response data for message '%s' on request '%s': %s", msg.Header.ID, inflight.id, err) return diff --git a/internal/syncasync/sync_async_bridge_test.go b/internal/syncasync/sync_async_bridge_test.go index 4aacfa1674..93d212c5a0 100644 --- a/internal/syncasync/sync_async_bridge_test.go +++ b/internal/syncasync/sync_async_bridge_test.go @@ -69,7 +69,7 @@ func TestRequestReplyOk(t *testing.T) { } mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return([]*fftypes.Data{ {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) @@ -117,7 +117,7 @@ func TestAwaitConfirmationOk(t *testing.T) { } mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return([]*fftypes.Data{ {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) @@ -163,7 +163,7 @@ func TestAwaitConfirmationRejected(t *testing.T) { } mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return([]*fftypes.Data{ {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) @@ -701,7 +701,7 @@ func TestEventCallbackMsgDataLookupFail(t *testing.T) { defer cancel() mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) sa.resolveReply(&inflightRequest{}, &fftypes.Message{ Header: fftypes.MessageHeader{ diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go index 954dbeff99..5ef755b06f 100644 --- a/mocks/datamocks/manager.go +++ b/mocks/datamocks/manager.go @@ -5,6 +5,7 @@ package datamocks import ( context "context" + data "github.com/hyperledger/firefly/internal/data" fftypes "github.com/hyperledger/firefly/pkg/fftypes" io "io" @@ -86,13 +87,20 @@ func (_m *Manager) DownloadBLOB(ctx context.Context, ns string, dataID string) ( return r0, r1, r2 } -// GetMessageData provides a mock function with given fields: ctx, msg, withValue -func (_m *Manager) GetMessageData(ctx context.Context, msg *fftypes.Message, withValue bool) ([]*fftypes.Data, bool, error) { - ret := _m.Called(ctx, msg, withValue) +// GetMessageDataCached provides a mock function with given fields: ctx, msg, options +func (_m *Manager) GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...data.CacheReadOption) ([]*fftypes.Data, bool, error) { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, msg) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) var r0 []*fftypes.Data - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Message, bool) []*fftypes.Data); ok { - r0 = rf(ctx, msg, withValue) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Message, ...data.CacheReadOption) []*fftypes.Data); ok { + r0 = rf(ctx, msg, options...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*fftypes.Data) @@ -100,15 +108,15 @@ func (_m *Manager) GetMessageData(ctx context.Context, msg *fftypes.Message, wit } var r1 bool - if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Message, bool) bool); ok { - r1 = rf(ctx, msg, withValue) + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Message, ...data.CacheReadOption) bool); ok { + r1 = rf(ctx, msg, options...) } else { r1 = ret.Get(1).(bool) } var r2 error - if rf, ok := ret.Get(2).(func(context.Context, *fftypes.Message, bool) error); ok { - r2 = rf(ctx, msg, withValue) + if rf, ok := ret.Get(2).(func(context.Context, *fftypes.Message, ...data.CacheReadOption) error); ok { + r2 = rf(ctx, msg, options...) } else { r2 = ret.Error(2) } @@ -116,6 +124,52 @@ func (_m *Manager) GetMessageData(ctx context.Context, msg *fftypes.Message, wit return r0, r1, r2 } +// GetMessageWithDataCached provides a mock function with given fields: ctx, msgID, options +func (_m *Manager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...data.CacheReadOption) (*fftypes.Message, []*fftypes.Data, bool, error) { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, msgID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *fftypes.Message + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) *fftypes.Message); ok { + r0 = rf(ctx, msgID, options...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.Message) + } + } + + var r1 []*fftypes.Data + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) []*fftypes.Data); ok { + r1 = rf(ctx, msgID, options...) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]*fftypes.Data) + } + } + + var r2 bool + if rf, ok := ret.Get(2).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) bool); ok { + r2 = rf(ctx, msgID, options...) + } else { + r2 = ret.Get(2).(bool) + } + + var r3 error + if rf, ok := ret.Get(3).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) error); ok { + r3 = rf(ctx, msgID, options...) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 +} + // HydrateBatch provides a mock function with given fields: ctx, persistedBatch func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) { ret := _m.Called(ctx, persistedBatch) @@ -140,15 +194,15 @@ func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.Bat } // ResolveInlineDataBroadcast provides a mock function with given fields: ctx, ns, inData -func (_m *Manager) ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, []*fftypes.DataAndBlob, error) { +func (_m *Manager) ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataArray, []*fftypes.DataAndBlob, error) { ret := _m.Called(ctx, ns, inData) - var r0 fftypes.DataRefs - if rf, ok := ret.Get(0).(func(context.Context, string, fftypes.InlineData) fftypes.DataRefs); ok { + var r0 fftypes.DataArray + if rf, ok := ret.Get(0).(func(context.Context, string, fftypes.InlineData) fftypes.DataArray); ok { r0 = rf(ctx, ns, inData) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(fftypes.DataRefs) + r0 = ret.Get(0).(fftypes.DataArray) } } @@ -172,15 +226,15 @@ func (_m *Manager) ResolveInlineDataBroadcast(ctx context.Context, ns string, in } // ResolveInlineDataPrivate provides a mock function with given fields: ctx, ns, inData -func (_m *Manager) ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, error) { +func (_m *Manager) ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataArray, error) { ret := _m.Called(ctx, ns, inData) - var r0 fftypes.DataRefs - if rf, ok := ret.Get(0).(func(context.Context, string, fftypes.InlineData) fftypes.DataRefs); ok { + var r0 fftypes.DataArray + if rf, ok := ret.Get(0).(func(context.Context, string, fftypes.InlineData) fftypes.DataArray); ok { r0 = rf(ctx, ns, inData) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(fftypes.DataRefs) + r0 = ret.Get(0).(fftypes.DataArray) } } @@ -194,6 +248,11 @@ func (_m *Manager) ResolveInlineDataPrivate(ctx context.Context, ns string, inDa return r0, r1 } +// UpdateMessageCache provides a mock function with given fields: msg, _a1 +func (_m *Manager) UpdateMessageCache(msg *fftypes.Message, _a1 []*fftypes.Data) { + _m.Called(msg, _a1) +} + // UploadBLOB provides a mock function with given fields: ctx, ns, inData, blob, autoMeta func (_m *Manager) UploadBLOB(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) { ret := _m.Called(ctx, ns, inData, blob, autoMeta) diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index 8c78ac1f6c..6c7539d907 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -111,6 +111,20 @@ func (d DataRefs) Hash() *Bytes32 { return &b32 } +type DataArray []*Data + +func (da DataArray) Refs() DataRefs { + dr := make(DataRefs, len(da)) + for i, d := range da { + dr[i] = &DataRef{ + ID: d.ID, + Hash: d.Hash, + ValueSize: d.ValueSize, + } + } + return dr +} + func CheckValidatorType(ctx context.Context, validator ValidatorType) error { switch validator { case ValidatorTypeJSON, ValidatorTypeNone, ValidatorTypeSystemDefinition: diff --git a/pkg/fftypes/data_test.go b/pkg/fftypes/data_test.go index da20613243..5958885aff 100644 --- a/pkg/fftypes/data_test.go +++ b/pkg/fftypes/data_test.go @@ -226,3 +226,29 @@ func TestDataImmutable(t *testing.T) { assert.Equal(t, data.Blob, data.BatchData(BatchTypeBroadcast).Blob) assert.Empty(t, data.BatchData(BatchTypePrivate).Blob.Public) } + +func TestDataArryToRefs(t *testing.T) { + data1 := &Data{ + ID: NewUUID(), + Validator: ValidatorTypeJSON, + Namespace: "ns1", + Hash: NewRandB32(), + Created: Now(), + ValueSize: 12345, + } + data2 := &Data{ + ID: NewUUID(), + Validator: ValidatorTypeJSON, + Namespace: "ns1", + Hash: NewRandB32(), + Created: Now(), + ValueSize: 23456, + } + + da := DataArray{data1, data2} + assert.Equal(t, da.Refs(), DataRefs{ + {ID: data1.ID, Hash: data1.Hash, ValueSize: 12345}, + {ID: data2.ID, Hash: data2.Hash, ValueSize: 23456}, + }) + +} From 095c06876f2a95e1b3139e31f605620650fc7238 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Tue, 8 Mar 2022 08:57:02 -0500 Subject: [PATCH 09/11] Apply changes to batch structure to events Signed-off-by: Peter Broadhurst --- internal/batch/batch_processor.go | 2 +- internal/config/config.go | 2 +- internal/data/data_manager.go | 24 +- internal/database/sqlcommon/batch_sql_test.go | 14 +- .../definition_handler_identity_claim.go | 2 +- .../definition_handler_identity_claim_test.go | 16 +- ...efinition_handler_identity_verification.go | 2 +- ...tion_handler_identity_verification_test.go | 6 +- internal/events/aggregator.go | 105 +++- internal/events/aggregator_test.go | 588 +++++++++--------- internal/events/batch_pin_complete_test.go | 182 +++--- internal/events/dx_callbacks_test.go | 14 + internal/events/event_dispatcher.go | 8 +- internal/events/event_dispatcher_test.go | 2 +- internal/events/persist_batch.go | 56 +- mocks/datamocks/manager.go | 24 +- pkg/fftypes/batch.go | 66 +- pkg/fftypes/batch_test.go | 8 +- 18 files changed, 627 insertions(+), 494 deletions(-) diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 50f8a7f3bb..42144e8637 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -394,9 +394,9 @@ func (bp *batchProcessor) initFlushState(id *fftypes.UUID, flushWork []*batchWor Namespace: bp.conf.namespace, SignerRef: bp.conf.signer, Group: bp.conf.group, + Created: fftypes.Now(), Node: bp.ni.GetNodeUUID(bp.ctx), }, - Created: fftypes.Now(), }, } for _, w := range flushWork { diff --git a/internal/config/config.go b/internal/config/config.go index 3ca8ebe99e..730ea1c3c4 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -357,7 +357,7 @@ func Reset() { viper.SetDefault(string(UIEnabled), true) viper.SetDefault(string(ValidatorCacheSize), "1Mb") viper.SetDefault(string(ValidatorCacheTTL), "1h") - viper.SetDefault(string(MessageCacheSize), "10Mb") + viper.SetDefault(string(MessageCacheSize), "50Mb") viper.SetDefault(string(MessageCacheTTL), "5m") viper.SetDefault(string(IdentityManagerCacheLimit), 100 /* items */) viper.SetDefault(string(IdentityManagerCacheTTL), "1h") diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index d6f780b8dc..70e7f32626 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -35,10 +35,10 @@ import ( type Manager interface { CheckDatatype(ctx context.Context, ns string, datatype *fftypes.Datatype) error - ValidateAll(ctx context.Context, data []*fftypes.Data) (valid bool, err error) - GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data []*fftypes.Data, foundAllData bool, err error) - GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data []*fftypes.Data, foundAll bool, err error) - UpdateMessageCache(msg *fftypes.Message, data []*fftypes.Data) + ValidateAll(ctx context.Context, data fftypes.DataArray) (valid bool, err error) + GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data fftypes.DataArray, foundAllData bool, err error) + GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data fftypes.DataArray, foundAll bool, err error) + UpdateMessageCache(msg *fftypes.Message, data fftypes.DataArray) ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataArray, error) ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataArray, []*fftypes.DataAndBlob, error) VerifyNamespaceExists(ctx context.Context, ns string) error @@ -161,7 +161,7 @@ func (dm *dataManager) getValidatorForDatatype(ctx context.Context, ns string, v // GetMessageWithData performs a cached lookup of a message with all of the associated data. // - Use this in performance sensitive code, but note mutable fields like the status of the // message CANNOT be relied upon (due to the caching). -func (dm *dataManager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data []*fftypes.Data, foundAllData bool, err error) { +func (dm *dataManager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data fftypes.DataArray, foundAllData bool, err error) { if mce := dm.queryMessageCache(ctx, msgID, options...); mce != nil { return mce.msg, mce.data, true, nil } @@ -176,7 +176,7 @@ func (dm *dataManager) GetMessageWithDataCached(ctx context.Context, msgID *ffty // GetMessageData looks for all the data attached to the message, including caching. // It only returns persistence errors. // For all cases where the data is not found (or the hashes mismatch) -func (dm *dataManager) GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data []*fftypes.Data, foundAll bool, err error) { +func (dm *dataManager) GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data fftypes.DataArray, foundAll bool, err error) { if mce := dm.queryMessageCache(ctx, msg.Header.ID, options...); mce != nil { return mce.data, true, nil } @@ -184,7 +184,7 @@ func (dm *dataManager) GetMessageDataCached(ctx context.Context, msg *fftypes.Me } // cachedMessageAndDataLookup is the common function that can lookup and cache a message with its data -func (dm *dataManager) dataLookupAndCache(ctx context.Context, msg *fftypes.Message) (data []*fftypes.Data, foundAllData bool, err error) { +func (dm *dataManager) dataLookupAndCache(ctx context.Context, msg *fftypes.Message) (data fftypes.DataArray, foundAllData bool, err error) { data, foundAllData, err = dm.getMessageData(ctx, msg) if err != nil { return nil, false, err @@ -218,7 +218,7 @@ func (dm *dataManager) queryMessageCache(ctx context.Context, id *fftypes.UUID, // UpdateMessageCache pushes an entry to the message cache. It is exposed out of the package, so that // code which generates (or augments) message/data can populate the cache. -func (dm *dataManager) UpdateMessageCache(msg *fftypes.Message, data []*fftypes.Data) { +func (dm *dataManager) UpdateMessageCache(msg *fftypes.Message, data fftypes.DataArray) { cacheEntry := &messageCacheEntry{ msg: msg, data: data, @@ -227,9 +227,9 @@ func (dm *dataManager) UpdateMessageCache(msg *fftypes.Message, data []*fftypes. dm.messageCache.Set(msg.Header.ID.String(), cacheEntry, dm.messageCacheTTL) } -func (dm *dataManager) getMessageData(ctx context.Context, msg *fftypes.Message) (data []*fftypes.Data, foundAll bool, err error) { +func (dm *dataManager) getMessageData(ctx context.Context, msg *fftypes.Message) (data fftypes.DataArray, foundAll bool, err error) { // Load all the data - must all be present for us to send - data = make([]*fftypes.Data, 0, len(msg.Data)) + data = make(fftypes.DataArray, 0, len(msg.Data)) foundAll = true for i, dataRef := range msg.Data { d, err := dm.resolveRef(ctx, msg.Header.Namespace, dataRef) @@ -246,7 +246,7 @@ func (dm *dataManager) getMessageData(ctx context.Context, msg *fftypes.Message) return data, foundAll, nil } -func (dm *dataManager) ValidateAll(ctx context.Context, data []*fftypes.Data) (valid bool, err error) { +func (dm *dataManager) ValidateAll(ctx context.Context, data fftypes.DataArray) (valid bool, err error) { for _, d := range data { if d.Datatype != nil && d.Validator != fftypes.ValidatorTypeNone { v, err := dm.getValidatorForDatatype(ctx, d.Namespace, d.Validator, d.Datatype) @@ -446,7 +446,7 @@ func (dm *dataManager) HydrateBatch(ctx context.Context, persistedBatch *fftypes Payload: fftypes.BatchPayload{ TX: persistedBatch.TX, Messages: make([]*fftypes.Message, len(manifest.Messages)), - Data: make([]*fftypes.Data, len(manifest.Data)), + Data: make(fftypes.DataArray, len(manifest.Data)), }, } diff --git a/internal/database/sqlcommon/batch_sql_test.go b/internal/database/sqlcommon/batch_sql_test.go index 80d652f0f2..e8ec1fb8e1 100644 --- a/internal/database/sqlcommon/batch_sql_test.go +++ b/internal/database/sqlcommon/batch_sql_test.go @@ -49,14 +49,14 @@ func TestBatch2EWithDB(t *testing.T) { Namespace: "ns1", Hash: fftypes.NewRandB32(), Node: fftypes.NewUUID(), + Created: fftypes.Now(), }, - Created: fftypes.Now(), TX: fftypes.TransactionRef{ Type: fftypes.TransactionTypeUnpinned, }, Manifest: (&fftypes.BatchManifest{ - Messages: []fftypes.MessageRef{ - {ID: msgID1}, + Messages: []*fftypes.MessageManifestEntry{ + {MessageRef: fftypes.MessageRef{ID: msgID1}}, }, }).String(), } @@ -91,16 +91,16 @@ func TestBatch2EWithDB(t *testing.T) { Namespace: "ns1", Hash: fftypes.NewRandB32(), Node: fftypes.NewUUID(), + Created: fftypes.Now(), }, - Created: fftypes.Now(), TX: fftypes.TransactionRef{ ID: txid, Type: fftypes.TransactionTypeBatchPin, }, Manifest: (&fftypes.BatchManifest{ - Messages: []fftypes.MessageRef{ - {ID: msgID1}, - {ID: msgID2}, + Messages: []*fftypes.MessageManifestEntry{ + {MessageRef: fftypes.MessageRef{ID: msgID1}}, + {MessageRef: fftypes.MessageRef{ID: msgID2}}, }, }).String(), PayloadRef: payloadRef, diff --git a/internal/definitions/definition_handler_identity_claim.go b/internal/definitions/definition_handler_identity_claim.go index ec715357f1..c59b5f7e58 100644 --- a/internal/definitions/definition_handler_identity_claim.go +++ b/internal/definitions/definition_handler_identity_claim.go @@ -102,7 +102,7 @@ func (dh *definitionHandlers) confirmVerificationForClaim(ctx context.Context, s } } for _, candidate := range candidates { - data, foundAll, err := dh.data.GetMessageData(ctx, candidate, true) + data, foundAll, err := dh.data.GetMessageDataCached(ctx, candidate) if err != nil { return nil, err } diff --git a/internal/definitions/definition_handler_identity_claim_test.go b/internal/definitions/definition_handler_identity_claim_test.go index 7ba5e66d2c..5725b3d9d7 100644 --- a/internal/definitions/definition_handler_identity_claim_test.go +++ b/internal/definitions/definition_handler_identity_claim_test.go @@ -169,8 +169,8 @@ func TestHandleDefinitionIdentityClaimCustomWithExistingParentVerificationOk(t * })).Return(nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, false, nil).Once() - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, false, nil).Once() + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg @@ -215,8 +215,8 @@ func TestHandleDefinitionIdentityClaimIdempotentReplay(t *testing.T) { })).Return(nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, false, nil).Once() - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, false, nil).Once() + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg @@ -250,7 +250,7 @@ func TestHandleDefinitionIdentityClaimFailInsertIdentity(t *testing.T) { mdi.On("UpsertIdentity", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg @@ -280,7 +280,7 @@ func TestHandleDefinitionIdentityClaimVerificationDataFail(t *testing.T) { mdi.On("GetMessages", ctx, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg @@ -310,7 +310,7 @@ func TestHandleDefinitionIdentityClaimVerificationMissingData(t *testing.T) { mdi.On("GetMessages", ctx, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg @@ -341,7 +341,7 @@ func TestHandleDefinitionIdentityClaimFailInsertVerifier(t *testing.T) { mdi.On("UpsertVerifier", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg diff --git a/internal/definitions/definition_handler_identity_verification.go b/internal/definitions/definition_handler_identity_verification.go index e9ef8d8105..9324cdcc79 100644 --- a/internal/definitions/definition_handler_identity_verification.go +++ b/internal/definitions/definition_handler_identity_verification.go @@ -68,7 +68,7 @@ func (dh *definitionHandlers) handleIdentityVerificationBroadcast(ctx context.Co log.L(ctx).Warnf("Invalid verification message %s - hash mismatch claim=%s verification=%s", verifyMsg.Header.ID, claimMsg.Hash, verification.Claim.Hash) return HandlerResult{Action: ActionReject}, nil } - data, foundAll, err := dh.data.GetMessageData(ctx, claimMsg, true) + data, foundAll, err := dh.data.GetMessageDataCached(ctx, claimMsg) if err != nil { return HandlerResult{Action: ActionRetry}, err } diff --git a/internal/definitions/definition_handler_identity_verification_test.go b/internal/definitions/definition_handler_identity_verification_test.go index 8d4455671a..6c6e1b5929 100644 --- a/internal/definitions/definition_handler_identity_verification_test.go +++ b/internal/definitions/definition_handler_identity_verification_test.go @@ -62,7 +62,7 @@ func TestHandleDefinitionIdentityVerificationWithExistingClaimOk(t *testing.T) { })).Return(nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{claimData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{claimData}, true, nil) bs.pendingConfirms[*claimMsg.Header.ID] = claimMsg @@ -92,7 +92,7 @@ func TestHandleDefinitionIdentityVerificationIncompleteClaimData(t *testing.T) { mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(claimMsg, nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{}, false, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{}, false, nil) action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) @@ -118,7 +118,7 @@ func TestHandleDefinitionIdentityVerificationClaimDataFail(t *testing.T) { mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(claimMsg, nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) diff --git a/internal/events/aggregator.go b/internal/events/aggregator.go index cf42308142..4ae42efb82 100644 --- a/internal/events/aggregator.go +++ b/internal/events/aggregator.go @@ -20,6 +20,7 @@ import ( "context" "crypto/sha256" "database/sql/driver" + "encoding/json" "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/data" @@ -192,25 +193,60 @@ func (ag *aggregator) getPins(ctx context.Context, filter database.Filter) ([]ff return ls, err } -func (ag *aggregator) extractBatchMessagePin(batch *fftypes.Batch, requiredIndex int64) (totalBatchPins int64, msg *fftypes.Message, msgBaseIndex int64) { - for _, batchMsg := range batch.Payload.Messages { +func (ag *aggregator) extractBatchMessagePin(manifest *fftypes.BatchManifest, requiredIndex int64) (totalBatchPins int64, msgEntry *fftypes.MessageManifestEntry, msgBaseIndex int64) { + for _, batchMsg := range manifest.Messages { batchMsgBaseIdx := totalBatchPins - for i := 0; i < len(batchMsg.Header.Topics); i++ { + for i := 0; i < batchMsg.Topics; i++ { if totalBatchPins == requiredIndex { - msg = batchMsg + msgEntry = batchMsg msgBaseIndex = batchMsgBaseIdx } totalBatchPins++ } } - return totalBatchPins, msg, msgBaseIndex + return totalBatchPins, msgEntry, msgBaseIndex +} + +func (ag *aggregator) migrateManifest(ctx context.Context, persistedBatch *fftypes.BatchPersisted) *fftypes.BatchManifest { + // In version v0.13.x and earlier, we stored the full batch + var fullBatch fftypes.Batch + err := json.Unmarshal([]byte(persistedBatch.Manifest), &fullBatch) + if err != nil { + log.L(ctx).Errorf("Invalid migration persisted batch: %s", err) + return nil + } + if len(fullBatch.Payload.Messages) == 0 { + log.L(ctx).Errorf("Invalid migration persisted batch: no payload") + return nil + } + return fullBatch.Manifest() +} + +func (ag *aggregator) extractManifest(ctx context.Context, batch *fftypes.BatchPersisted) *fftypes.BatchManifest { + + var manifest fftypes.BatchManifest + err := json.Unmarshal([]byte(batch.Manifest), &manifest) + if err != nil { + log.L(ctx).Errorf("Invalid manifest: %s", err) + return nil + } + switch manifest.Version { + case fftypes.ManifestVersionUnset: + return ag.migrateManifest(ctx, batch) + case fftypes.ManifestVersion1: + return &manifest + default: + log.L(ctx).Errorf("Invalid manifest version: %d", manifest.Version) + return nil + } } func (ag *aggregator) processPins(ctx context.Context, pins []*fftypes.Pin, state *batchState) (err error) { l := log.L(ctx) // Keep a batch cache for this list of pins - var batch *fftypes.Batch + var batch *fftypes.BatchPersisted + var manifest *fftypes.BatchManifest // As messages can have multiple topics, we need to avoid processing the message twice in the same poll loop. // We must check all the contexts in the message, and mark them dispatched together. dupMsgCheck := make(map[fftypes.UUID]bool) @@ -225,27 +261,32 @@ func (ag *aggregator) processPins(ctx context.Context, pins []*fftypes.Pin, stat l.Debugf("Batch %s not available - pin %s is parked", pin.Batch, pin.Hash) continue } + manifest = ag.extractManifest(ctx, batch) + if manifest == nil { + l.Errorf("Batch %s manifest could not be extracted - pin %s is parked", pin.Batch, pin.Hash) + continue + } } // Extract the message from the batch - where the index is of a topic within a message - batchPinCount, msg, msgBaseIndex := ag.extractBatchMessagePin(batch, pin.Index) - if msg == nil { + batchPinCount, msgEntry, msgBaseIndex := ag.extractBatchMessagePin(manifest, pin.Index) + if msgEntry == nil { l.Errorf("Pin %.10d outside of range: batch=%s pinCount=%d pinIndex=%d hash=%s masked=%t", pin.Sequence, pin.Batch, batchPinCount, pin.Index, pin.Hash, pin.Masked) continue } - l.Debugf("Aggregating pin %.10d batch=%s msg=%s pinIndex=%d msgBaseIndex=%d hash=%s masked=%t", pin.Sequence, pin.Batch, msg.Header.ID, pin.Index, msgBaseIndex, pin.Hash, pin.Masked) - if msg.Header.ID == nil { + l.Debugf("Aggregating pin %.10d batch=%s msg=%s pinIndex=%d msgBaseIndex=%d hash=%s masked=%t", pin.Sequence, pin.Batch, msgEntry.ID, pin.Index, msgBaseIndex, pin.Hash, pin.Masked) + if msgEntry.ID == nil { l.Errorf("null message entry %d in batch '%s'", pin.Index, batch.ID) continue } - if dupMsgCheck[*msg.Header.ID] { + if dupMsgCheck[*msgEntry.ID] { continue } - dupMsgCheck[*msg.Header.ID] = true + dupMsgCheck[*msgEntry.ID] = true // Attempt to process the message (only returns errors for database persistence issues) - err := ag.processMessage(ctx, batch, pin, msgBaseIndex, msg, state) + err := ag.processMessage(ctx, manifest, pin, msgBaseIndex, msgEntry, state) if err != nil { return err } @@ -295,9 +336,18 @@ func (ag *aggregator) checkOnchainConsistency(ctx context.Context, msg *fftypes. return true, nil } -func (ag *aggregator) processMessage(ctx context.Context, batch *fftypes.Batch, pin *fftypes.Pin, msgBaseIndex int64, msg *fftypes.Message, state *batchState) (err error) { +func (ag *aggregator) processMessage(ctx context.Context, manifest *fftypes.BatchManifest, pin *fftypes.Pin, msgBaseIndex int64, msgEntry *fftypes.MessageManifestEntry, state *batchState) (err error) { l := log.L(ctx) + msg, data, dataAvailable, err := ag.data.GetMessageWithDataCached(ctx, msgEntry.ID) + if err != nil { + return err + } + if !dataAvailable { + l.Errorf("Message '%s' in batch '%s' is missing data", msgEntry.ID, manifest.ID) + return nil + } + // Check if it's ready to be processed unmaskedContexts := make([]*fftypes.Bytes32, 0, len(msg.Header.Topics)) nextPins := make([]*nextPinState, 0, len(msg.Header.Topics)) @@ -305,14 +355,14 @@ func (ag *aggregator) processMessage(ctx context.Context, batch *fftypes.Batch, // Private messages have one or more masked "pin" hashes that allow us to work // out if it's the next message in the sequence, given the previous messages if msg.Header.Group == nil || len(msg.Pins) == 0 || len(msg.Header.Topics) != len(msg.Pins) { - l.Errorf("Message '%s' in batch '%s' has invalid pin data pins=%v topics=%v", msg.Header.ID, batch.ID, msg.Pins, msg.Header.Topics) + l.Errorf("Message '%s' in batch '%s' has invalid pin data pins=%v topics=%v", msg.Header.ID, manifest.ID, msg.Pins, msg.Header.Topics) return nil } for i, pinStr := range msg.Pins { var msgContext fftypes.Bytes32 err := msgContext.UnmarshalText([]byte(pinStr)) if err != nil { - l.Errorf("Message '%s' in batch '%s' has invalid pin at index %d: '%s'", msg.Header.ID, batch.ID, i, pinStr) + l.Errorf("Message '%s' in batch '%s' has invalid pin at index %d: '%s'", msg.Header.ID, manifest.ID, i, pinStr) return nil } nextPin, err := state.CheckMaskedContextReady(ctx, msg, msg.Header.Topics[i], pin.Sequence, &msgContext) @@ -335,10 +385,13 @@ func (ag *aggregator) processMessage(ctx context.Context, batch *fftypes.Batch, } - l.Debugf("Attempt dispatch msg=%s broadcastContexts=%v privatePins=%v", msg.Header.ID, unmaskedContexts, msg.Pins) - dispatched, err := ag.attemptMessageDispatch(ctx, msg, batch.Payload.TX.ID, state, pin) - if err != nil { - return err + dispatched := false + if dataAvailable { + l.Debugf("Attempt dispatch msg=%s broadcastContexts=%v privatePins=%v", msg.Header.ID, unmaskedContexts, msg.Pins) + dispatched, err = ag.attemptMessageDispatch(ctx, msg, data, manifest.TX.ID, state, pin) + if err != nil { + return err + } } // Mark all message pins dispatched true/false @@ -348,7 +401,7 @@ func (ag *aggregator) processMessage(ctx context.Context, batch *fftypes.Batch, for _, np := range nextPins { np.IncrementNextPin(ctx) } - state.MarkMessageDispatched(ctx, batch.ID, msg, msgBaseIndex) + state.MarkMessageDispatched(ctx, manifest.ID, msg, msgBaseIndex) } else { for _, unmaskedContext := range unmaskedContexts { state.SetContextBlockedBy(ctx, *unmaskedContext, pin.Sequence) @@ -358,13 +411,7 @@ func (ag *aggregator) processMessage(ctx context.Context, batch *fftypes.Batch, return nil } -func (ag *aggregator) attemptMessageDispatch(ctx context.Context, msg *fftypes.Message, tx *fftypes.UUID, state *batchState, pin *fftypes.Pin) (bool, error) { - - // If we don't find all the data, then we don't dispatch - data, foundAll, err := ag.data.GetMessageData(ctx, msg, true) - if err != nil || !foundAll { - return false, err - } +func (ag *aggregator) attemptMessageDispatch(ctx context.Context, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID, state *batchState, pin *fftypes.Pin) (valid bool, err error) { // Check the pin signer is valid for the message if valid, err := ag.checkOnchainConsistency(ctx, msg, pin); err != nil || !valid { @@ -392,7 +439,7 @@ func (ag *aggregator) attemptMessageDispatch(ctx context.Context, msg *fftypes.M } // Validate the message data - valid := true + valid = true var customCorrelator *fftypes.UUID switch { case msg.Header.Type == fftypes.MessageTypeDefinition: diff --git a/internal/events/aggregator_test.go b/internal/events/aggregator_test.go index 7b30307079..3c8b893521 100644 --- a/internal/events/aggregator_test.go +++ b/internal/events/aggregator_test.go @@ -62,6 +62,62 @@ func newTestAggregator() (*aggregator, func()) { return newTestAggregatorCommon(false) } +func newTestManifest(mType fftypes.MessageType, groupID *fftypes.Bytes32) (*fftypes.Message, *fftypes.Message, *fftypes.Identity, *fftypes.BatchManifest) { + org1 := newTestOrg("org1") + + msg1 := &fftypes.Message{ + Header: fftypes.MessageHeader{ + Type: mType, + ID: fftypes.NewUUID(), + Namespace: "any", + Group: groupID, + Topics: fftypes.FFStringArray{"topic1"}, + SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, + }, + Data: fftypes.DataRefs{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, + }, + } + msg2 := &fftypes.Message{ + Header: fftypes.MessageHeader{ + Type: mType, + ID: fftypes.NewUUID(), + Group: groupID, + Namespace: "any", + Topics: fftypes.FFStringArray{"topic1"}, + SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, + }, + Data: fftypes.DataRefs{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, + }, + } + + return msg1, msg2, org1, &fftypes.BatchManifest{ + Version: 1, + ID: fftypes.NewUUID(), + TX: fftypes.TransactionRef{ + Type: fftypes.TransactionTypeBatchPin, + ID: fftypes.NewUUID(), + }, + Messages: []*fftypes.MessageManifestEntry{ + { + MessageRef: fftypes.MessageRef{ + ID: msg1.Header.ID, + Hash: msg1.Hash, + }, + Topics: len(msg1.Header.Topics), + }, + { + MessageRef: fftypes.MessageRef{ + ID: msg2.Header.ID, + Hash: msg2.Hash, + }, + Topics: len(msg2.Header.Topics), + }, + }, + } +} + func TestAggregationMaskedZeroNonceMatch(t *testing.T) { ag, cancel := newTestAggregatorWithMetrics() @@ -95,9 +151,10 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { Value: member2key, }).Return(member2org, nil) - // Get the batch - mdi.On("GetBatchByID", ag.ctx, batchID).Return(&fftypes.Batch{ - ID: batchID, + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -118,7 +175,11 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { }, }, }, - }, nil) + } + bp, _ := batch.Confirmed() + + // Get the batch + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) // Look for existing nextpins - none found, first on context mdi.On("GetNextPins", ag.ctx, mock.Anything).Return([]*fftypes.NextPin{}, nil, nil).Once() // Get the group members @@ -144,7 +205,7 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { return *np.Hash == *member2NonceOne && np.Nonce == 1 })).Return(nil).Once() // Validate the message is ok - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) // Insert the confirmed event mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { @@ -232,9 +293,10 @@ func TestAggregationMaskedNextSequenceMatch(t *testing.T) { rag.ReturnArguments = mock.Arguments{a[1].(func(context.Context) error)(a[0].(context.Context))} } - // Get the batch - mdi.On("GetBatchByID", ag.ctx, batchID).Return(&fftypes.Batch{ - ID: batchID, + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -255,14 +317,18 @@ func TestAggregationMaskedNextSequenceMatch(t *testing.T) { }, }, }, - }, nil) + } + bp, _ := batch.Confirmed() + + // Get the batch + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) // Look for existing nextpins - none found, first on context mdi.On("GetNextPins", ag.ctx, mock.Anything).Return([]*fftypes.NextPin{ {Context: contextUnmasked, Identity: member1org.DID, Hash: member1Nonce100, Nonce: 100, Sequence: 929}, {Context: contextUnmasked, Identity: member2org.DID, Hash: member2Nonce500, Nonce: 500, Sequence: 424}, }, nil, nil).Once() // Validate the message is ok - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) // Insert the confirmed event mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { @@ -330,9 +396,10 @@ func TestAggregationBroadcast(t *testing.T) { Value: member1key, }).Return(member1org, nil) - // Get the batch - mdi.On("GetBatchByID", ag.ctx, batchID).Return(&fftypes.Batch{ - ID: batchID, + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -351,11 +418,15 @@ func TestAggregationBroadcast(t *testing.T) { }, }, }, - }, nil) + } + bp, _ := batch.Confirmed() + + // Get the batch + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) // Do not resolve any pins earlier mdi.On("GetPins", mock.Anything, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) // Validate the message is ok - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) // Insert the confirmed event mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { @@ -460,15 +531,20 @@ func TestProcessPinsMissingNoMsg(t *testing.T) { defer cancel() bs := newBatchState(ag) - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(&fftypes.Batch{ - ID: fftypes.NewUUID(), + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ {Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}}, }, }, - }, nil) + } + bp, _ := batch.Confirmed() + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(bp, nil) mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) err := ag.processPins(ag.ctx, []*fftypes.Pin{ @@ -484,9 +560,10 @@ func TestProcessPinsBadMsgHeader(t *testing.T) { defer cancel() bs := newBatchState(ag) - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(&fftypes.Batch{ - ID: fftypes.NewUUID(), + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ {Header: fftypes.MessageHeader{ @@ -495,7 +572,11 @@ func TestProcessPinsBadMsgHeader(t *testing.T) { }}, }, }, - }, nil) + } + bp, _ := batch.Confirmed() + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(bp, nil) mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) err := ag.processPins(ag.ctx, []*fftypes.Pin{ @@ -512,9 +593,10 @@ func TestProcessSkipDupMsg(t *testing.T) { bs := newBatchState(ag) batchID := fftypes.NewUUID() - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(&fftypes.Batch{ - ID: batchID, + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ {Header: fftypes.MessageHeader{ @@ -523,18 +605,26 @@ func TestProcessSkipDupMsg(t *testing.T) { }}, }, }, - }, nil).Once() + } + bp, _ := batch.Confirmed() + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(bp, nil).Once() mdi.On("GetPins", mock.Anything, mock.Anything).Return([]*fftypes.Pin{ {Sequence: 1111}, // blocks the context }, nil, nil) mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(batch.Payload.Messages[0], nil, true, nil) + err := ag.processPins(ag.ctx, []*fftypes.Pin{ {Sequence: 12345, Batch: batchID, Index: 0, Hash: fftypes.NewRandB32()}, {Sequence: 12345, Batch: batchID, Index: 1, Hash: fftypes.NewRandB32()}, }, bs) assert.NoError(t, err) mdi.AssertExpectations(t) + mdm.AssertExpectations(t) } @@ -544,9 +634,10 @@ func TestProcessMsgFailGetPins(t *testing.T) { bs := newBatchState(ag) batchID := fftypes.NewUUID() - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(&fftypes.Batch{ - ID: batchID, + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ {Header: fftypes.MessageHeader{ @@ -555,39 +646,65 @@ func TestProcessMsgFailGetPins(t *testing.T) { }}, }, }, - }, nil).Once() + } + bp, _ := batch.Confirmed() + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(bp, nil).Once() mdi.On("GetPins", mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(batch.Payload.Messages[0], nil, true, nil) + err := ag.processPins(ag.ctx, []*fftypes.Pin{ {Sequence: 12345, Batch: batchID, Index: 0, Hash: fftypes.NewRandB32()}, }, bs) assert.EqualError(t, err, "pop") mdi.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestProcessMsgFailMissingGroup(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - err := ag.processMessage(ag.ctx, &fftypes.Batch{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.Message{}, nil) + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(&fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}}, nil, true, nil) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{}, nil) assert.NoError(t, err) + mdm.AssertExpectations(t) } func TestProcessMsgFailBadPin(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - err := ag.processMessage(ag.ctx, &fftypes.Batch{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.Message{ + msg := &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), Group: fftypes.NewRandB32(), Topics: fftypes.FFStringArray{"topic1"}, }, + Hash: fftypes.NewRandB32(), Pins: fftypes.FFStringArray{"!Wrong"}, + } + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(msg, nil, true, nil) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{ + MessageRef: fftypes.MessageRef{ + ID: msg.Header.ID, + Hash: msg.Hash, + }, + Topics: len(msg.Header.Topics), }, newBatchState(ag)) assert.NoError(t, err) + mdm.AssertExpectations(t) + } func TestProcessMsgFailGetNextPins(t *testing.T) { @@ -597,16 +714,30 @@ func TestProcessMsgFailGetNextPins(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetNextPins", ag.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) - err := ag.processMessage(ag.ctx, &fftypes.Batch{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.Message{ + msg := &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), Group: fftypes.NewRandB32(), Topics: fftypes.FFStringArray{"topic1"}, }, Pins: fftypes.FFStringArray{fftypes.NewRandB32().String()}, + } + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(msg, nil, true, nil) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{ + MessageRef: fftypes.MessageRef{ + ID: msg.Header.ID, + Hash: msg.Hash, + }, + Topics: len(msg.Header.Topics), }, newBatchState(ag)) assert.EqualError(t, err, "pop") + mdm.AssertExpectations(t) + mdi.AssertExpectations(t) + } func TestProcessMsgFailDispatch(t *testing.T) { @@ -615,18 +746,36 @@ func TestProcessMsgFailDispatch(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetPins", ag.ctx, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) - err := ag.processMessage(ag.ctx, &fftypes.Batch{}, &fftypes.Pin{Sequence: 12345}, 10, &fftypes.Message{ + msg := &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), Topics: fftypes.FFStringArray{"topic1"}, + SignerRef: fftypes.SignerRef{ + Key: "0x12345", + }, }, Pins: fftypes.FFStringArray{fftypes.NewRandB32().String()}, + } + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(msg, nil, true, nil) + + mim := ag.identity.(*identitymanagermocks.Manager) + mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Sequence: 12345, Signer: "0x12345"}, 10, &fftypes.MessageManifestEntry{ + MessageRef: fftypes.MessageRef{ + ID: msg.Header.ID, + Hash: msg.Hash, + }, + Topics: len(msg.Header.Topics), }, newBatchState(ag)) assert.EqualError(t, err, "pop") + mdm.AssertExpectations(t) + mdi.AssertExpectations(t) + } func TestProcessMsgFailPinUpdate(t *testing.T) { @@ -640,6 +789,20 @@ func TestProcessMsgFailPinUpdate(t *testing.T) { mdm := ag.data.(*datamocks.Manager) mim := ag.identity.(*identitymanagermocks.Manager) + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Group: fftypes.NewRandB32(), + Topics: fftypes.FFStringArray{"topic1"}, + Namespace: "ns1", + SignerRef: fftypes.SignerRef{ + Author: org1.DID, + Key: "0x12345", + }, + }, + Pins: fftypes.FFStringArray{pin.String()}, + } + mim.On("FindIdentityForVerifier", ag.ctx, []fftypes.IdentityType{fftypes.IdentityTypeOrg, fftypes.IdentityTypeCustom}, "ns1", &fftypes.VerifierRef{ Type: fftypes.VerifierTypeEthAddress, Value: "0x12345", @@ -647,24 +810,18 @@ func TestProcessMsgFailPinUpdate(t *testing.T) { mdi.On("GetNextPins", ag.ctx, mock.Anything).Return([]*fftypes.NextPin{ {Context: fftypes.NewRandB32(), Hash: pin, Identity: org1.DID}, }, nil, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(msg, nil, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(false, nil) mdi.On("InsertEvent", ag.ctx, mock.Anything).Return(nil) mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(nil) mdi.On("UpdateNextPin", ag.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - err := ag.processMessage(ag.ctx, &fftypes.Batch{}, &fftypes.Pin{Masked: true, Sequence: 12345, Signer: "0x12345"}, 10, &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - Group: fftypes.NewRandB32(), - Topics: fftypes.FFStringArray{"topic1"}, - Namespace: "ns1", - SignerRef: fftypes.SignerRef{ - Author: org1.DID, - Key: "0x12345", - }, + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345, Signer: "0x12345"}, 10, &fftypes.MessageManifestEntry{ + MessageRef: fftypes.MessageRef{ + ID: msg.Header.ID, + Hash: msg.Hash, }, - Pins: fftypes.FFStringArray{pin.String()}, + Topics: len(msg.Header.Topics), }, bs) assert.NoError(t, err) @@ -908,20 +1065,6 @@ func TestAttemptContextInitInsertPinsFail(t *testing.T) { } -func TestAttemptMessageDispatchFailGetData(t *testing.T) { - ag, cancel := newTestAggregator() - defer cancel() - - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) - - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, - }, nil, nil, nil) - assert.EqualError(t, err, "pop") - -} - func TestAttemptMessageDispatchFailValidateData(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() @@ -931,7 +1074,7 @@ func TestAttemptMessageDispatchFailValidateData(t *testing.T) { org1 := newTestOrg("org1") mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(false, fmt.Errorf("pop")) _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ @@ -939,7 +1082,7 @@ func TestAttemptMessageDispatchFailValidateData(t *testing.T) { Data: fftypes.DataRefs{ {ID: fftypes.NewUUID()}, }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + }, fftypes.DataArray{}, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.EqualError(t, err, "pop") } @@ -955,12 +1098,6 @@ func TestAttemptMessageDispatchMissingBlobs(t *testing.T) { org1 := newTestOrg("org1") mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Blob: &fftypes.BlobRef{ - Hash: blobHash, - Public: "public-ref", - }}, - }, true, nil) mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetBlobMatchingHash", ag.ctx, blobHash).Return(nil, nil) @@ -969,6 +1106,11 @@ func TestAttemptMessageDispatchMissingBlobs(t *testing.T) { dispatched, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}}, + }, fftypes.DataArray{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Blob: &fftypes.BlobRef{ + Hash: blobHash, + Public: "public-ref", + }}, }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, dispatched) @@ -983,10 +1125,6 @@ func TestAttemptMessageDispatchMissingTransfers(t *testing.T) { org1 := newTestOrg("org1") mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetTokenTransfers", ag.ctx, mock.Anything).Return([]*fftypes.TokenTransfer{}, nil, nil) @@ -1001,11 +1139,10 @@ func TestAttemptMessageDispatchMissingTransfers(t *testing.T) { }, } msg.Hash = msg.Header.Hash() - dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, fftypes.DataArray{}, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, dispatched) - mdm.AssertExpectations(t) mdi.AssertExpectations(t) } @@ -1018,9 +1155,6 @@ func TestAttemptMessageDispatchGetTransfersFail(t *testing.T) { org1 := newTestOrg("org1") mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetTokenTransfers", ag.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) @@ -1032,11 +1166,10 @@ func TestAttemptMessageDispatchGetTransfersFail(t *testing.T) { }, } msg.Hash = msg.Header.Hash() - dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, fftypes.DataArray{}, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.EqualError(t, err, "pop") assert.False(t, dispatched) - mdm.AssertExpectations(t) mdi.AssertExpectations(t) } @@ -1063,17 +1196,14 @@ func TestAttemptMessageDispatchTransferMismatch(t *testing.T) { mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetTokenTransfers", ag.ctx, mock.Anything).Return(transfers, nil, nil) - dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, fftypes.DataArray{}, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, dispatched) - mdm.AssertExpectations(t) + mim.AssertExpectations(t) mdi.AssertExpectations(t) } @@ -1093,7 +1223,7 @@ func TestDefinitionBroadcastActionRejectCustomCorrelator(t *testing.T) { Return(definitions.HandlerResult{Action: definitions.ActionReject, CustomCorrelator: customCorrelator}, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(fftypes.DataArray{}, true, nil) mdi := ag.database.(*databasemocks.Plugin) mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.MatchedBy(func(u database.Update) bool { @@ -1127,7 +1257,7 @@ func TestDefinitionBroadcastActionRejectCustomCorrelator(t *testing.T) { Data: fftypes.DataRefs{ {ID: fftypes.NewUUID()}, }, - }, nil, bs, &fftypes.Pin{Signer: "0x12345"}) + }, fftypes.DataArray{}, nil, bs, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) err = bs.RunFinalize(ag.ctx) assert.NoError(t, err) @@ -1144,7 +1274,7 @@ func TestDefinitionBroadcastInvalidSigner(t *testing.T) { mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(fftypes.DataArray{}, true, nil) mdi := ag.database.(*databasemocks.Plugin) mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.MatchedBy(func(u database.Update) bool { @@ -1176,7 +1306,7 @@ func TestDefinitionBroadcastInvalidSigner(t *testing.T) { Data: fftypes.DataRefs{ {ID: fftypes.NewUUID()}, }, - }, nil, bs, &fftypes.Pin{Signer: "0x12345"}) + }, fftypes.DataArray{}, nil, bs, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) } @@ -1185,49 +1315,24 @@ func TestDispatchBroadcastQueuesLaterDispatch(t *testing.T) { defer cancel() bs := newBatchState(ag) - org1 := newTestOrg("org1") + msg1, msg2, org1, manifest := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, false, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID).Return(msg1, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID).Return(msg2, fftypes.DataArray{}, true, nil).Once() mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetPins", ag.ctx, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) - msg1 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - } - msg2 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - } - - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{msg1, msg2}, - }, - } - // First message should dispatch - err := ag.processMessage(ag.ctx, batch, &fftypes.Pin{Sequence: 12345}, 0, msg1, bs) + err := ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Sequence: 12345}, 0, manifest.Messages[0], bs) assert.NoError(t, err) // Second message should not (mocks have Once limit on GetMessageData to confirm) - err = ag.processMessage(ag.ctx, batch, &fftypes.Pin{Sequence: 12346}, 0, msg1, bs) + err = ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Sequence: 12346}, 0, manifest.Messages[1], bs) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -1239,15 +1344,16 @@ func TestDispatchPrivateQueuesLaterDispatch(t *testing.T) { defer cancel() bs := newBatchState(ag) - org1 := newTestOrg("org1") + groupID := fftypes.NewRandB32() + msg1, msg2, org1, manifest := newTestManifest(fftypes.MessageTypePrivate, groupID) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, false, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID).Return(msg1, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID).Return(msg2, fftypes.DataArray{}, true, nil).Once() - groupID := fftypes.NewRandB32() initNPG := &nextPinGroupState{topic: "topic1", groupID: groupID} member1NonceOne := initNPG.calcPinHash("org1", 1) member1NonceTwo := initNPG.calcPinHash("org1", 2) @@ -1260,46 +1366,15 @@ func TestDispatchPrivateQueuesLaterDispatch(t *testing.T) { {Context: context, Nonce: 1 /* match member1NonceOne */, Identity: org1.DID, Hash: member1NonceOne}, }, nil, nil) - msg1 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypePrivate, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: org1.DID, - }, - }, - Pins: fftypes.FFStringArray{member1NonceOne.String()}, - } - msg2 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypePrivate, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: org1.DID, - }, - }, - Pins: fftypes.FFStringArray{member1NonceTwo.String()}, - } - - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{msg1, msg2}, - }, - } + msg1.Pins = fftypes.FFStringArray{member1NonceOne.String()} + msg2.Pins = fftypes.FFStringArray{member1NonceTwo.String()} // First message should dispatch - err := ag.processMessage(ag.ctx, batch, &fftypes.Pin{Masked: true, Sequence: 12345}, 0, msg1, bs) + err := ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Masked: true, Sequence: 12345}, 0, manifest.Messages[0], bs) assert.NoError(t, err) // Second message should not (mocks have Once limit on GetMessageData to confirm) - err = ag.processMessage(ag.ctx, batch, &fftypes.Pin{Masked: true, Sequence: 12346}, 0, msg2, bs) + err = ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Masked: true, Sequence: 12346}, 0, manifest.Messages[1], bs) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -1311,15 +1386,17 @@ func TestDispatchPrivateNextPinIncremented(t *testing.T) { defer cancel() bs := newBatchState(ag) - org1 := newTestOrg("org1") + groupID := fftypes.NewRandB32() + msg1, msg2, org1, manifest := newTestManifest(fftypes.MessageTypePrivate, groupID) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil).Twice() + mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID).Return(msg1, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID).Return(msg2, fftypes.DataArray{}, true, nil).Once() + mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) - groupID := fftypes.NewRandB32() initNPG := &nextPinGroupState{topic: "topic1", groupID: groupID} member1NonceOne := initNPG.calcPinHash(org1.DID, 1) member1NonceTwo := initNPG.calcPinHash(org1.DID, 2) @@ -1332,48 +1409,15 @@ func TestDispatchPrivateNextPinIncremented(t *testing.T) { {Context: context, Nonce: 1 /* match member1NonceOne */, Identity: org1.DID, Hash: member1NonceOne}, }, nil, nil) - msg1 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypePrivate, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: org1.DID, - Key: "0x12345", - }, - }, - Pins: fftypes.FFStringArray{member1NonceOne.String()}, - } - msg2 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypePrivate, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: org1.DID, - Key: "0x12345", - }, - }, - Pins: fftypes.FFStringArray{member1NonceTwo.String()}, - } - - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{msg1, msg2}, - }, - } + msg1.Pins = fftypes.FFStringArray{member1NonceOne.String()} + msg2.Pins = fftypes.FFStringArray{member1NonceTwo.String()} // First message should dispatch - err := ag.processMessage(ag.ctx, batch, &fftypes.Pin{Masked: true, Sequence: 12345, Signer: "0x12345"}, 0, msg1, bs) + err := ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Masked: true, Sequence: 12345, Signer: "0x12345"}, 0, manifest.Messages[0], bs) assert.NoError(t, err) // Second message should dispatch too (Twice on GetMessageData) - err = ag.processMessage(ag.ctx, batch, &fftypes.Pin{Masked: true, Sequence: 12346, Signer: "0x12345"}, 0, msg2, bs) + err = ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Masked: true, Sequence: 12346, Signer: "0x12345"}, 0, manifest.Messages[1], bs) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -1384,7 +1428,7 @@ func TestDefinitionBroadcastActionRetry(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") + msg1, _, org1, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) @@ -1393,19 +1437,9 @@ func TestDefinitionBroadcastActionRetry(t *testing.T) { msh.On("HandleDefinitionBroadcast", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(definitions.HandlerResult{Action: definitions.ActionRetry}, fmt.Errorf("pop")) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(msg1, fftypes.DataArray{}, true, nil) - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.EqualError(t, err, "pop") } @@ -1414,29 +1448,15 @@ func TestDefinitionBroadcastRejectSignerLookupFail(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") - - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + msg1, _, _, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) - valid, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + valid, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.Regexp(t, "pop", err) assert.False(t, valid) - mdm.AssertExpectations(t) mim.AssertExpectations(t) } @@ -1444,29 +1464,15 @@ func TestDefinitionBroadcastRejectSignerLookupWrongOrg(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") - - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + msg1, _, _, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(newTestOrg("org2"), nil) - valid, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + valid, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, valid) - mdm.AssertExpectations(t) mim.AssertExpectations(t) } @@ -1474,22 +1480,10 @@ func TestDefinitionBroadcastRejectBadSigner(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") - - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + msg1, _, org1, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) + msg1.Header.SignerRef = fftypes.SignerRef{Key: "0x23456", Author: org1.DID} - valid, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x23456", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + valid, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, valid) @@ -1499,42 +1493,23 @@ func TestDefinitionBroadcastRejectUnregisteredSignerIdentityClaim(t *testing.T) ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") + msg1, _, _, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) - msh := ag.definitions.(*definitionsmocks.DefinitionHandlers) - msh.On("HandleDefinitionBroadcast", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(definitions.HandlerResult{Action: definitions.ActionWait}, nil) - - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - - valid, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - Tag: fftypes.SystemTagIdentityClaim, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + valid, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, valid) mim.AssertExpectations(t) - msh.AssertExpectations(t) - mdm.AssertExpectations(t) } func TestDefinitionBroadcastActionWait(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") + msg1, _, org1, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) @@ -1542,48 +1517,41 @@ func TestDefinitionBroadcastActionWait(t *testing.T) { msh := ag.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("HandleDefinitionBroadcast", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(definitions.HandlerResult{Action: definitions.ActionWait}, nil) - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) + mim.AssertExpectations(t) + msh.AssertExpectations(t) + } func TestAttemptMessageDispatchEventFail(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() bs := newBatchState(ag) - org1 := newTestOrg("org1") + msg1, _, org1, _ := newTestManifest(fftypes.MessageTypeBroadcast, nil) mdi := ag.database.(*databasemocks.Plugin) mdm := ag.data.(*datamocks.Manager) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", ag.ctx, mock.Anything).Return(fmt.Errorf("pop")) - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}}, + _, err := ag.attemptMessageDispatch(ag.ctx, msg1, fftypes.DataArray{ + &fftypes.Data{ID: msg1.Data[0].ID}, }, nil, bs, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) err = bs.RunFinalize(ag.ctx) assert.EqualError(t, err, "pop") + mim.AssertExpectations(t) + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + } func TestAttemptMessageDispatchGroupInit(t *testing.T) { @@ -1597,7 +1565,7 @@ func TestAttemptMessageDispatchGroupInit(t *testing.T) { mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", ag.ctx, mock.Anything).Return(nil) @@ -1608,7 +1576,7 @@ func TestAttemptMessageDispatchGroupInit(t *testing.T) { Type: fftypes.MessageTypeGroupInit, SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, }, - }, nil, bs, &fftypes.Pin{Signer: "0x12345"}) + }, nil, nil, bs, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) } @@ -1624,13 +1592,13 @@ func TestAttemptMessageUpdateMessageFail(t *testing.T) { mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}}, - }, nil, bs, &fftypes.Pin{Signer: "0x12345"}) + }, nil, nil, bs, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) err = bs.RunFinalize(ag.ctx) @@ -1711,7 +1679,7 @@ func TestResolveBlobsNoop(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{}}, }) @@ -1726,7 +1694,7 @@ func TestResolveBlobsErrorGettingHash(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetBlobMatchingHash", ag.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), }}, @@ -1743,7 +1711,7 @@ func TestResolveBlobsNotFoundPrivate(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetBlobMatchingHash", ag.ctx, mock.Anything).Return(nil, nil) - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), }}, @@ -1760,7 +1728,7 @@ func TestResolveBlobsFoundPrivate(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetBlobMatchingHash", ag.ctx, mock.Anything).Return(&fftypes.Blob{}, nil) - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), }}, @@ -1780,7 +1748,7 @@ func TestResolveBlobsCopyNotFound(t *testing.T) { mdm := ag.data.(*datamocks.Manager) mdm.On("CopyBlobPStoDX", ag.ctx, mock.Anything).Return(nil, nil) - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), Public: "public-ref", @@ -1801,7 +1769,7 @@ func TestResolveBlobsCopyFail(t *testing.T) { mdm := ag.data.(*datamocks.Manager) mdm.On("CopyBlobPStoDX", ag.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), Public: "public-ref", @@ -1822,7 +1790,7 @@ func TestResolveBlobsCopyOk(t *testing.T) { mdm := ag.data.(*datamocks.Manager) mdm.On("CopyBlobPStoDX", ag.ctx, mock.Anything).Return(&fftypes.Blob{}, nil) - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), Public: "public-ref", diff --git a/internal/events/batch_pin_complete_test.go b/internal/events/batch_pin_complete_test.go index 242ce007e7..acc3d0e281 100644 --- a/internal/events/batch_pin_complete_test.go +++ b/internal/events/batch_pin_complete_test.go @@ -50,9 +50,11 @@ func sampleBatch(t *testing.T, txType fftypes.TransactionType, data ...*fftypes. assert.NoError(t, err) } batch := &fftypes.Batch{ - SignerRef: identity, - ID: fftypes.NewUUID(), - Node: fftypes.NewUUID(), + BatchHeader: fftypes.BatchHeader{ + SignerRef: identity, + ID: fftypes.NewUUID(), + Node: fftypes.NewUUID(), + }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ ID: fftypes.NewUUID(), @@ -84,11 +86,13 @@ func TestBatchPinCompleteOkBroadcast(t *testing.T) { }, } batchData := &fftypes.Batch{ - ID: batch.BatchID, - Namespace: "ns1", - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x22222", + BatchHeader: fftypes.BatchHeader{ + ID: batch.BatchID, + Namespace: "ns1", + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x22222", + }, }, PayloadRef: batch.BatchPayloadRef, Payload: fftypes.BatchPayload{ @@ -166,8 +170,10 @@ func TestBatchPinCompleteOkPrivate(t *testing.T) { }, } batchData := &fftypes.Batch{ - ID: batch.BatchID, - Namespace: "ns1", + BatchHeader: fftypes.BatchHeader{ + ID: batch.BatchID, + Namespace: "ns1", + }, PayloadRef: batch.BatchPayloadRef, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -298,8 +304,9 @@ func TestBatchPinCompleteBadNamespace(t *testing.T) { func TestPersistBatchMissingID(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - valid, err := em.persistBatch(context.Background(), &fftypes.Batch{}) + batch, valid, err := em.persistBatch(context.Background(), &fftypes.Batch{}) assert.False(t, valid) + assert.Nil(t, batch) assert.NoError(t, err) } @@ -308,10 +315,13 @@ func TestPersistBatchAuthorResolveFail(t *testing.T) { defer cancel() batchHash := fftypes.NewRandB32() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, + Hash: batchHash, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -319,7 +329,6 @@ func TestPersistBatchAuthorResolveFail(t *testing.T) { ID: fftypes.NewUUID(), }, }, - Hash: batchHash, } mim := em.identity.(*identitymanagermocks.Manager) mim.On("NormalizeSigningKeyIdentity", mock.Anything, mock.Anything).Return("", fmt.Errorf("pop")) @@ -334,10 +343,13 @@ func TestPersistBatchBadAuthor(t *testing.T) { defer cancel() batchHash := fftypes.NewRandB32() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, + Hash: batchHash, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -345,7 +357,6 @@ func TestPersistBatchBadAuthor(t *testing.T) { ID: fftypes.NewUUID(), }, }, - Hash: batchHash, } mim := em.identity.(*identitymanagermocks.Manager) mim.On("NormalizeSigningKeyIdentity", mock.Anything, mock.Anything).Return("author2", nil) @@ -359,10 +370,13 @@ func TestPersistBatchMismatchChainHash(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, + Hash: fftypes.NewRandB32(), }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -370,7 +384,6 @@ func TestPersistBatchMismatchChainHash(t *testing.T) { ID: fftypes.NewUUID(), }, }, - Hash: fftypes.NewRandB32(), } mim := em.identity.(*identitymanagermocks.Manager) mim.On("NormalizeSigningKeyIdentity", mock.Anything, mock.Anything).Return("author1", nil) @@ -384,10 +397,12 @@ func TestPersistBatchUpsertBatchMismatchHash(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -401,8 +416,9 @@ func TestPersistBatchUpsertBatchMismatchHash(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(database.HashMismatch) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) assert.False(t, valid) + assert.Nil(t, bp) assert.NoError(t, err) mdi.AssertExpectations(t) } @@ -411,10 +427,12 @@ func TestPersistBatchBadHash(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -425,8 +443,9 @@ func TestPersistBatchBadHash(t *testing.T) { } batch.Hash = fftypes.NewRandB32() - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) assert.False(t, valid) + assert.Nil(t, bp) assert.NoError(t, err) } @@ -434,10 +453,12 @@ func TestPersistBatchUpsertBatchFail(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -451,7 +472,8 @@ func TestPersistBatchUpsertBatchFail(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) + assert.Nil(t, bp) assert.False(t, valid) assert.EqualError(t, err, "pop") } @@ -460,12 +482,14 @@ func TestPersistBatchSwallowBadData(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, + Namespace: "ns1", }, - Namespace: "ns1", Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ Type: fftypes.TransactionTypeBatchPin, @@ -480,9 +504,10 @@ func TestPersistBatchSwallowBadData(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) assert.False(t, valid) assert.NoError(t, err) + assert.Nil(t, bp) mdi.AssertExpectations(t) } @@ -490,13 +515,15 @@ func TestPersistBatchGoodDataUpsertOptimizeExistingFail(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - Node: testNodeID, - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Node: testNodeID, + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, + Namespace: "ns1", }, - Namespace: "ns1", Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ Type: fftypes.TransactionTypeBatchPin, @@ -514,7 +541,8 @@ func TestPersistBatchGoodDataUpsertOptimizeExistingFail(t *testing.T) { mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil) mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) + assert.Nil(t, bp) assert.False(t, valid) assert.EqualError(t, err, "pop") } @@ -523,13 +551,15 @@ func TestPersistBatchGoodDataUpsertOptimizeNewFail(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - Node: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Node: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, + Namespace: "ns1", }, - Namespace: "ns1", Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ Type: fftypes.TransactionTypeBatchPin, @@ -547,7 +577,8 @@ func TestPersistBatchGoodDataUpsertOptimizeNewFail(t *testing.T) { mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil) mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) + assert.Nil(t, bp) assert.False(t, valid) assert.EqualError(t, err, "pop") } @@ -565,8 +596,9 @@ func TestPersistBatchGoodDataMessageFail(t *testing.T) { mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil) mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(fmt.Errorf("pop")) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) assert.False(t, valid) + assert.Nil(t, bp) assert.EqualError(t, err, "pop") } @@ -582,7 +614,8 @@ func TestPersistBatchGoodMessageAuthorMismatch(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) + assert.Nil(t, bp) assert.False(t, valid) assert.NoError(t, err) } @@ -591,13 +624,16 @@ func TestPersistBatchDataNilData(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, } data := &fftypes.Data{ ID: fftypes.NewUUID(), } - err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) + valid, err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) assert.NoError(t, err) + assert.False(t, valid) } func TestPersistBatchDataBadHash(t *testing.T) { @@ -609,8 +645,9 @@ func TestPersistBatchDataBadHash(t *testing.T) { } batch := sampleBatch(t, fftypes.TransactionTypeBatchPin, data) batch.Payload.Data[0].Hash = fftypes.NewRandB32() - err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) + valid, err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) assert.NoError(t, err) + assert.False(t, valid) } func TestPersistBatchDataUpsertHashMismatch(t *testing.T) { @@ -625,7 +662,8 @@ func TestPersistBatchDataUpsertHashMismatch(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(database.HashMismatch) - err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) + valid, err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) + assert.False(t, valid) assert.NoError(t, err) mdi.AssertExpectations(t) } @@ -640,7 +678,8 @@ func TestPersistBatchDataUpsertDataError(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(fmt.Errorf("pop")) - err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) + valid, err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) + assert.False(t, valid) assert.EqualError(t, err, "pop") } @@ -654,7 +693,8 @@ func TestPersistBatchDataOk(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(nil) - err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) + valid, err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) + assert.True(t, valid) assert.NoError(t, err) mdi.AssertExpectations(t) } @@ -663,7 +703,9 @@ func TestPersistBatchMessageNilData(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, } msg := &fftypes.Message{ Header: fftypes.MessageHeader{ diff --git a/internal/events/dx_callbacks_test.go b/internal/events/dx_callbacks_test.go index 6349f9749d..ae4a1f92be 100644 --- a/internal/events/dx_callbacks_test.go +++ b/internal/events/dx_callbacks_test.go @@ -25,6 +25,7 @@ import ( "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/dataexchangemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/definitionsmocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/pkg/database" @@ -97,12 +98,16 @@ func TestPinnedReceiveOK(t *testing.T) { mim.On("CachedIdentityLookup", em.ctx, "signingOrg").Return(org1, false, nil) mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil, nil) mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil, nil) + mdm := em.data.(*datamocks.Manager) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) assert.NotNil(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestMessageReceiveOkBadBatchIgnored(t *testing.T) { @@ -757,6 +762,8 @@ func TestMessageReceiveUnpinnedBatchOk(t *testing.T) { mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) mdi.On("UpdateMessages", em.ctx, mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", em.ctx, mock.Anything).Return(nil) + mdm := em.data.(*datamocks.Manager) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) @@ -764,6 +771,7 @@ func TestMessageReceiveUnpinnedBatchOk(t *testing.T) { mdi.AssertExpectations(t) mdx.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestMessageReceiveUnpinnedBatchConfirmMessagesFail(t *testing.T) { em, cancel := newTestEventManager(t) @@ -793,6 +801,8 @@ func TestMessageReceiveUnpinnedBatchConfirmMessagesFail(t *testing.T) { mdi.On("UpsertData", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) mdi.On("UpdateMessages", em.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + mdm := em.data.(*datamocks.Manager) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) @@ -800,6 +810,7 @@ func TestMessageReceiveUnpinnedBatchConfirmMessagesFail(t *testing.T) { mdi.AssertExpectations(t) mdx.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestMessageReceiveUnpinnedBatchPersistEventFail(t *testing.T) { @@ -831,6 +842,8 @@ func TestMessageReceiveUnpinnedBatchPersistEventFail(t *testing.T) { mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) mdi.On("UpdateMessages", em.ctx, mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", em.ctx, mock.Anything).Return(fmt.Errorf("pop")) + mdm := em.data.(*datamocks.Manager) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) @@ -838,6 +851,7 @@ func TestMessageReceiveUnpinnedBatchPersistEventFail(t *testing.T) { mdi.AssertExpectations(t) mdx.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestMessageReceiveMessageEnsureLocalGroupFail(t *testing.T) { diff --git a/internal/events/event_dispatcher.go b/internal/events/event_dispatcher.go index 27d91326b2..d5b48e3929 100644 --- a/internal/events/event_dispatcher.go +++ b/internal/events/event_dispatcher.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -159,6 +159,10 @@ func (ed *eventDispatcher) getEvents(ctx context.Context, filter database.Filter } func (ed *eventDispatcher) enrichEvents(events []fftypes.LocallySequenced) ([]*fftypes.EventDelivery, error) { + + // TODO: Update this to use the message cache, only for event types that have message references. + // Do this after Alex's changes to enrichment are merged + // We need all the messages that match event references refIDs := make([]driver.Value, len(events)) for i, ls := range events { @@ -382,7 +386,7 @@ func (ed *eventDispatcher) deliverEvents() { var data []*fftypes.Data var err error if withData && event.Message != nil { - data, _, err = ed.data.GetMessageData(ed.ctx, event.Message, true) + data, _, err = ed.data.GetMessageDataCached(ed.ctx, event.Message) } if err == nil { err = ed.transport.DeliveryRequest(ed.connID, ed.subscription.definition, event, data) diff --git a/internal/events/event_dispatcher_test.go b/internal/events/event_dispatcher_test.go index 2b3653ba9a..00f0b90362 100644 --- a/internal/events/event_dispatcher_test.go +++ b/internal/events/event_dispatcher_test.go @@ -815,7 +815,7 @@ func TestDeliverEventsWithDataFail(t *testing.T) { defer cancel() mdm := ed.data.(*datamocks.Manager) - mdm.On("GetMessageData", ed.ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + mdm.On("GetMessageDataCached", ed.ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) id1 := fftypes.NewUUID() ed.eventDelivery <- &fftypes.EventDelivery{ diff --git a/internal/events/persist_batch.go b/internal/events/persist_batch.go index 58fd93f20a..6673a92de6 100644 --- a/internal/events/persist_batch.go +++ b/internal/events/persist_batch.go @@ -39,7 +39,6 @@ func (em *eventManager) persistBatchFromBroadcast(ctx context.Context /* db TX c // or discards them. Errors are returned only in the case of database failures, which should be retried. func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, batch *fftypes.Batch) (persistedBatch *fftypes.BatchPersisted, valid bool, err error) { l := log.L(ctx) - now := fftypes.Now() if batch.ID == nil || batch.Payload.TX.ID == nil { l.Errorf("Invalid batch '%s'. Missing ID or transaction ID (%v)", batch.ID, batch.Payload.TX.ID) @@ -54,17 +53,15 @@ func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, bat return nil, false, nil // This is not retryable. skip this batch } - // Re-generate the manifest - manifest := batch.Manifest() - manifestString := manifest.String() - manifestHash := fftypes.HashString(manifestString) + // Set confirmed on the batch (the messages should not be confirmed at this point - that's the aggregator's job) + persistedBatch, _ = batch.Confirmed() + manifestHash := fftypes.HashString(persistedBatch.Manifest) // Verify the hash calculation. if !manifestHash.Equals(batch.Hash) { // To cope with existing batches written by v0.13 and older environments, we have to do a more expensive // hashing of the whole payload before we reject. - payloadHash := batch.Payload.Hash() - if payloadHash.Equals(batch.Hash) { + if batch.Payload.Hash().Equals(batch.Hash) { l.Infof("Persisting migrated batch '%s'. Hash is a payload hash: %s", batch.ID, batch.Hash) } else { l.Errorf("Invalid batch '%s'. Hash does not match payload. Found=%s Expected=%s", batch.ID, manifestHash, batch.Hash) @@ -72,13 +69,6 @@ func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, bat } } - // Set confirmed on the batch (the messages should not be confirmed at this point - that's the aggregator's job) - persistedBatch = &fftypes.BatchPersisted{ - BatchHeader: batch.BatchHeader, - Manifest: manifestString, - Confirmed: now, - } - // Upsert the batch err = em.database.UpsertBatch(ctx, persistedBatch) if err != nil { @@ -90,23 +80,48 @@ func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, bat return nil, false, err // a persistence failure here is considered retryable (so returned) } + valid, err = em.persistBatchContent(ctx, batch) + if err != nil || !valid { + return nil, valid, err + } + return persistedBatch, valid, err +} + +func (em *eventManager) persistBatchContent(ctx context.Context, batch *fftypes.Batch) (valid bool, err error) { + optimization := em.getOptimization(ctx, batch) // Insert the data entries + dataByID := make(map[fftypes.UUID]*fftypes.Data) for i, data := range batch.Payload.Data { - if err = em.persistBatchData(ctx, batch, i, data, optimization); err != nil { - return nil, false, err + if valid, err = em.persistBatchData(ctx, batch, i, data, optimization); !valid || err != nil { + return valid, err } + dataByID[*data.ID] = data } // Insert the message entries for i, msg := range batch.Payload.Messages { if valid, err = em.persistBatchMessage(ctx, batch, i, msg, optimization); !valid || err != nil { - return nil, valid, err + return valid, err + } + dataInBatch := true + msgData := make(fftypes.DataArray, len(msg.Data)) + for di, dataRef := range msg.Data { + msgData[di] = dataByID[*dataRef.ID] + if msgData[di] == nil || !msgData[di].Hash.Equals(dataRef.Hash) { + log.L(ctx).Errorf("Message '%s' in batch '%s' - data not in-line in batch id='%s' hash='%s'", msg.Header.ID, batch.ID, dataRef.ID, dataRef.Hash) + dataInBatch = false + break + } + } + if dataInBatch { + // We can push the complete message into the cache straight away + em.data.UpdateMessageCache(msg, msgData) } } - return persistedBatch, true, nil + return true, nil } func (em *eventManager) getOptimization(ctx context.Context, batch *fftypes.Batch) database.UpsertOptimization { @@ -122,9 +137,8 @@ func (em *eventManager) getOptimization(ctx context.Context, batch *fftypes.Batc return database.UpsertOptimizationNew } -func (em *eventManager) persistBatchData(ctx context.Context /* db TX context*/, batch *fftypes.Batch, i int, data *fftypes.Data, optimization database.UpsertOptimization) error { - _, err := em.persistReceivedData(ctx, i, data, "batch", batch.ID, optimization) - return err +func (em *eventManager) persistBatchData(ctx context.Context /* db TX context*/, batch *fftypes.Batch, i int, data *fftypes.Data, optimization database.UpsertOptimization) (bool, error) { + return em.persistReceivedData(ctx, i, data, "batch", batch.ID, optimization) } func (em *eventManager) persistReceivedData(ctx context.Context /* db TX context*/, i int, data *fftypes.Data, mType string, mID *fftypes.UUID, optimization database.UpsertOptimization) (bool, error) { diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go index 5ef755b06f..aefc6ce243 100644 --- a/mocks/datamocks/manager.go +++ b/mocks/datamocks/manager.go @@ -88,7 +88,7 @@ func (_m *Manager) DownloadBLOB(ctx context.Context, ns string, dataID string) ( } // GetMessageDataCached provides a mock function with given fields: ctx, msg, options -func (_m *Manager) GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...data.CacheReadOption) ([]*fftypes.Data, bool, error) { +func (_m *Manager) GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...data.CacheReadOption) (fftypes.DataArray, bool, error) { _va := make([]interface{}, len(options)) for _i := range options { _va[_i] = options[_i] @@ -98,12 +98,12 @@ func (_m *Manager) GetMessageDataCached(ctx context.Context, msg *fftypes.Messag _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 []*fftypes.Data - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Message, ...data.CacheReadOption) []*fftypes.Data); ok { + var r0 fftypes.DataArray + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Message, ...data.CacheReadOption) fftypes.DataArray); ok { r0 = rf(ctx, msg, options...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Data) + r0 = ret.Get(0).(fftypes.DataArray) } } @@ -125,7 +125,7 @@ func (_m *Manager) GetMessageDataCached(ctx context.Context, msg *fftypes.Messag } // GetMessageWithDataCached provides a mock function with given fields: ctx, msgID, options -func (_m *Manager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...data.CacheReadOption) (*fftypes.Message, []*fftypes.Data, bool, error) { +func (_m *Manager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...data.CacheReadOption) (*fftypes.Message, fftypes.DataArray, bool, error) { _va := make([]interface{}, len(options)) for _i := range options { _va[_i] = options[_i] @@ -144,12 +144,12 @@ func (_m *Manager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes. } } - var r1 []*fftypes.Data - if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) []*fftypes.Data); ok { + var r1 fftypes.DataArray + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) fftypes.DataArray); ok { r1 = rf(ctx, msgID, options...) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).([]*fftypes.Data) + r1 = ret.Get(1).(fftypes.DataArray) } } @@ -249,7 +249,7 @@ func (_m *Manager) ResolveInlineDataPrivate(ctx context.Context, ns string, inDa } // UpdateMessageCache provides a mock function with given fields: msg, _a1 -func (_m *Manager) UpdateMessageCache(msg *fftypes.Message, _a1 []*fftypes.Data) { +func (_m *Manager) UpdateMessageCache(msg *fftypes.Message, _a1 fftypes.DataArray) { _m.Called(msg, _a1) } @@ -300,18 +300,18 @@ func (_m *Manager) UploadJSON(ctx context.Context, ns string, inData *fftypes.Da } // ValidateAll provides a mock function with given fields: ctx, _a1 -func (_m *Manager) ValidateAll(ctx context.Context, _a1 []*fftypes.Data) (bool, error) { +func (_m *Manager) ValidateAll(ctx context.Context, _a1 fftypes.DataArray) (bool, error) { ret := _m.Called(ctx, _a1) var r0 bool - if rf, ok := ret.Get(0).(func(context.Context, []*fftypes.Data) bool); ok { + if rf, ok := ret.Get(0).(func(context.Context, fftypes.DataArray) bool); ok { r0 = rf(ctx, _a1) } else { r0 = ret.Get(0).(bool) } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, []*fftypes.Data) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, fftypes.DataArray) error); ok { r1 = rf(ctx, _a1) } else { r1 = ret.Error(1) diff --git a/pkg/fftypes/batch.go b/pkg/fftypes/batch.go index e46a6154b6..b30d447069 100644 --- a/pkg/fftypes/batch.go +++ b/pkg/fftypes/batch.go @@ -31,6 +31,11 @@ var ( BatchTypePrivate BatchType = ffEnum("batchtype", "private") ) +const ( + ManifestVersionUnset uint = 0 + ManifestVersion1 uint = 1 +) + // BatchHeader is the common fields between the serialized batch, and the batch manifest type BatchHeader struct { ID *UUID `json:"id"` @@ -38,17 +43,25 @@ type BatchHeader struct { Namespace string `json:"namespace"` Node *UUID `json:"node,omitempty"` SignerRef - Group *Bytes32 `jdon:"group,omitempty"` - Hash *Bytes32 `json:"hash"` + Group *Bytes32 `jdon:"group,omitempty"` + Created *FFTime `json:"created"` + Hash *Bytes32 `json:"hash"` +} + +type MessageManifestEntry struct { + MessageRef + Topics int `json:"topics"` // We only need the count, to be able to match up the pins } // BatchManifest is all we need to persist to be able to reconstitute // an identical batch. It can be generated from a received batch to // confirm you have received an identical batch to that sent. type BatchManifest struct { - ID *UUID `json:"id"` - Messages []MessageRef `json:"messages"` - Data []DataRef `json:"data"` + Version uint `json:"version"` + ID *UUID `json:"id"` + TX TransactionRef `json:"tx"` + Messages []*MessageManifestEntry `json:"messages"` + Data DataRefs `json:"data"` } // Batch is the full payload object used in-flight. @@ -64,7 +77,6 @@ type BatchPersisted struct { Manifest string `json:"manifest"` // not automatically parsed TX TransactionRef `json:"tx"` PayloadRef string `json:"payloadRef,omitempty"` - Created *FFTime `json:"created"` Confirmed *FFTime `json:"confirmed"` } @@ -95,23 +107,49 @@ func (ma *BatchPayload) Hash() *Bytes32 { func (ma *BatchPayload) Manifest(id *UUID) *BatchManifest { tm := &BatchManifest{ + Version: ManifestVersion1, ID: id, - Messages: make([]MessageRef, len(ma.Messages)), - Data: make([]DataRef, len(ma.Data)), + TX: ma.TX, + Messages: make([]*MessageManifestEntry, 0, len(ma.Messages)), + Data: make(DataRefs, 0, len(ma.Data)), } - for i, m := range ma.Messages { - tm.Messages[i].ID = m.Header.ID - tm.Messages[i].Hash = m.Hash + for _, m := range ma.Messages { + if m != nil && m.Header.ID != nil { + tm.Messages = append(tm.Messages, &MessageManifestEntry{ + MessageRef: MessageRef{ + ID: m.Header.ID, + Hash: m.Hash, + }, + Topics: len(m.Header.Topics), + }) + } } - for i, d := range ma.Data { - tm.Data[i].ID = d.ID - tm.Data[i].Hash = d.Hash + for _, d := range ma.Data { + if d != nil && d.ID != nil { + tm.Data = append(tm.Data, &DataRef{ + ID: d.ID, + Hash: d.Hash, + }) + } } return tm } + func (b *Batch) Manifest() *BatchManifest { if b == nil { return nil } return b.Payload.Manifest(b.ID) } + +// Confirmed generates a newly confirmed persisted batch, including (re-)generating the manifest +func (b *Batch) Confirmed() (*BatchPersisted, *BatchManifest) { + manifest := b.Manifest() + manifestString := manifest.String() + return &BatchPersisted{ + BatchHeader: b.BatchHeader, + TX: b.Payload.TX, + Manifest: manifestString, + Confirmed: Now(), + }, manifest +} diff --git a/pkg/fftypes/batch_test.go b/pkg/fftypes/batch_test.go index 43ec0e6a7b..461c8ca44c 100644 --- a/pkg/fftypes/batch_test.go +++ b/pkg/fftypes/batch_test.go @@ -44,7 +44,13 @@ func TestSQLSerializedManifest(t *testing.T) { }, } - mfString := batch.Manifest().String() + bp, manifest := batch.Confirmed() + mfString := manifest.String() + assert.Equal(t, batch.BatchHeader, bp.BatchHeader) + assert.Equal(t, batch.Payload.TX, bp.TX) + assert.Equal(t, mfString, bp.Manifest) + assert.NotNil(t, bp.Confirmed) + var mf *BatchManifest err := json.Unmarshal([]byte(mfString), &mf) assert.NoError(t, err) From d9844c5d3cb8ef42c17a506c4f6f94334a1c21cf Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Tue, 8 Mar 2022 10:26:08 -0500 Subject: [PATCH 10/11] Changes worked through all modules Signed-off-by: Peter Broadhurst --- docs/swagger/swagger.yaml | 228 ++----------- internal/apiserver/route_get_batch_by_id.go | 4 +- .../apiserver/route_get_batch_by_id_test.go | 2 +- internal/apiserver/route_get_batches.go | 4 +- internal/apiserver/route_get_batches_test.go | 2 +- internal/apiserver/route_get_data.go | 2 +- internal/apiserver/route_get_data_test.go | 2 +- internal/apiserver/route_get_msg_data.go | 2 +- internal/apiserver/route_get_msg_data_test.go | 2 +- internal/batch/batch_manager.go | 2 +- internal/batch/batch_manager_test.go | 14 +- internal/batch/batch_processor.go | 2 +- internal/broadcast/operations_test.go | 2 +- internal/data/data_manager_test.go | 10 +- internal/database/sqlcommon/data_sql.go | 4 +- internal/definitions/definition_handler.go | 6 +- .../definition_handler_contracts.go | 4 +- .../definition_handler_contracts_test.go | 14 +- .../definition_handler_datatype.go | 2 +- .../definition_handler_datatype_test.go | 16 +- .../definition_handler_identity_claim.go | 2 +- .../definition_handler_identity_claim_test.go | 48 +-- .../definition_handler_identity_update.go | 2 +- ...definition_handler_identity_update_test.go | 14 +- ...efinition_handler_identity_verification.go | 2 +- ...tion_handler_identity_verification_test.go | 26 +- .../definition_handler_namespace.go | 2 +- .../definition_handler_namespace_test.go | 20 +- .../definition_handler_network_node.go | 2 +- .../definition_handler_network_node_test.go | 8 +- .../definition_handler_network_org.go | 2 +- .../definition_handler_network_org_test.go | 4 +- .../definitions/definition_handler_test.go | 6 +- .../definition_handler_tokenpool.go | 2 +- .../definition_handler_tokenpool_test.go | 4 +- internal/events/aggregator.go | 17 +- internal/events/aggregator_test.go | 304 ++++++++++++++++++ internal/events/batch_pin_complete_test.go | 10 +- internal/events/persist_batch_test.go | 91 +++++- internal/events/system/events.go | 4 +- internal/events/webhooks/webhooks.go | 6 +- internal/events/webhooks/webhooks_test.go | 16 +- internal/events/websockets/websockets.go | 4 +- internal/orchestrator/data_query.go | 4 +- internal/orchestrator/data_query_test.go | 36 +-- internal/orchestrator/orchestrator.go | 4 +- internal/orchestrator/txn_status_test.go | 4 +- .../privatemessaging/groupmanager_test.go | 12 +- internal/privatemessaging/message.go | 2 +- internal/privatemessaging/message_test.go | 2 +- internal/privatemessaging/operations_test.go | 2 +- internal/privatemessaging/privatemessaging.go | 2 +- .../privatemessaging/privatemessaging_test.go | 10 +- internal/syncasync/sync_async_bridge_test.go | 6 +- mocks/databasemocks/plugin.go | 8 +- mocks/definitionsmocks/definition_handlers.go | 6 +- mocks/eventsmocks/plugin.go | 4 +- mocks/eventsmocks/plugin_all.go | 4 +- mocks/orchestratormocks/orchestrator.go | 32 +- pkg/database/plugin.go | 2 +- pkg/events/plugin.go | 4 +- test/e2e/e2e_test.go | 2 +- test/e2e/restclient_test.go | 4 +- 63 files changed, 636 insertions(+), 434 deletions(-) diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index f82bd14ccf..935ca0f232 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -903,121 +903,29 @@ paths: properties: author: type: string - blobs: - items: {} - type: array confirmed: {} created: {} hash: {} id: {} key: type: string + manifest: + type: string namespace: type: string node: {} - payload: - properties: - data: - items: - properties: - blob: - properties: - hash: {} - name: - type: string - public: - type: string - size: - format: int64 - type: integer - type: object - created: {} - datatype: - properties: - name: - type: string - version: - type: string - type: object - hash: {} - id: {} - namespace: - type: string - validator: - type: string - value: - type: string - type: object - type: array - messages: - items: - properties: - batch: {} - confirmed: {} - data: - items: - properties: - hash: {} - id: {} - type: object - type: array - hash: {} - header: - properties: - author: - type: string - cid: {} - created: {} - datahash: {} - group: {} - id: {} - key: - type: string - namespace: - type: string - tag: - type: string - topics: - items: - type: string - type: array - txtype: - type: string - type: - enum: - - definition - - broadcast - - private - - groupinit - - transfer_broadcast - - transfer_private - type: string - type: object - pins: - items: - type: string - type: array - state: - enum: - - staged - - ready - - sent - - pending - - confirmed - - rejected - type: string - type: object - type: array - tx: - properties: - id: {} - type: - type: string - type: object - type: object payloadRef: type: string + tx: + properties: + id: {} + type: + type: string + type: object type: + enum: + - broadcast + - private type: string type: object description: Success @@ -1056,121 +964,29 @@ paths: properties: author: type: string - blobs: - items: {} - type: array confirmed: {} created: {} hash: {} id: {} key: type: string + manifest: + type: string namespace: type: string node: {} - payload: - properties: - data: - items: - properties: - blob: - properties: - hash: {} - name: - type: string - public: - type: string - size: - format: int64 - type: integer - type: object - created: {} - datatype: - properties: - name: - type: string - version: - type: string - type: object - hash: {} - id: {} - namespace: - type: string - validator: - type: string - value: - type: string - type: object - type: array - messages: - items: - properties: - batch: {} - confirmed: {} - data: - items: - properties: - hash: {} - id: {} - type: object - type: array - hash: {} - header: - properties: - author: - type: string - cid: {} - created: {} - datahash: {} - group: {} - id: {} - key: - type: string - namespace: - type: string - tag: - type: string - topics: - items: - type: string - type: array - txtype: - type: string - type: - enum: - - definition - - broadcast - - private - - groupinit - - transfer_broadcast - - transfer_private - type: string - type: object - pins: - items: - type: string - type: array - state: - enum: - - staged - - ready - - sent - - pending - - confirmed - - rejected - type: string - type: object - type: array - tx: - properties: - id: {} - type: - type: string - type: object - type: object payloadRef: type: string + tx: + properties: + id: {} + type: + type: string + type: object type: + enum: + - broadcast + - private type: string type: object description: Success diff --git a/internal/apiserver/route_get_batch_by_id.go b/internal/apiserver/route_get_batch_by_id.go index d07da849f8..65ebb91f38 100644 --- a/internal/apiserver/route_get_batch_by_id.go +++ b/internal/apiserver/route_get_batch_by_id.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -37,7 +37,7 @@ var getBatchByID = &oapispec.Route{ FilterFactory: nil, Description: i18n.MsgTBD, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return &fftypes.Batch{} }, + JSONOutputValue: func() interface{} { return &fftypes.BatchPersisted{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { output, err = getOr(r.Ctx).GetBatchByID(r.Ctx, r.PP["ns"], r.PP["batchid"]) diff --git a/internal/apiserver/route_get_batch_by_id_test.go b/internal/apiserver/route_get_batch_by_id_test.go index 7ab63c56a0..9c8a15c159 100644 --- a/internal/apiserver/route_get_batch_by_id_test.go +++ b/internal/apiserver/route_get_batch_by_id_test.go @@ -32,7 +32,7 @@ func TestGetBatchByID(t *testing.T) { res := httptest.NewRecorder() o.On("GetBatchByID", mock.Anything, "mynamespace", "abcd12345"). - Return(&fftypes.Batch{}, nil) + Return(&fftypes.BatchPersisted{}, nil) r.ServeHTTP(res, req) assert.Equal(t, 200, res.Result().StatusCode) diff --git a/internal/apiserver/route_get_batches.go b/internal/apiserver/route_get_batches.go index 9357f285e1..a9f199ff65 100644 --- a/internal/apiserver/route_get_batches.go +++ b/internal/apiserver/route_get_batches.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -37,7 +37,7 @@ var getBatches = &oapispec.Route{ FilterFactory: database.BatchQueryFactory, Description: i18n.MsgTBD, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return []*fftypes.Batch{} }, + JSONOutputValue: func() interface{} { return []*fftypes.BatchPersisted{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { return filterResult(getOr(r.Ctx).GetBatches(r.Ctx, r.PP["ns"], r.Filter)) diff --git a/internal/apiserver/route_get_batches_test.go b/internal/apiserver/route_get_batches_test.go index c74a687a9f..e25b464fa2 100644 --- a/internal/apiserver/route_get_batches_test.go +++ b/internal/apiserver/route_get_batches_test.go @@ -32,7 +32,7 @@ func TestGetBatches(t *testing.T) { res := httptest.NewRecorder() o.On("GetBatches", mock.Anything, "mynamespace", mock.Anything). - Return([]*fftypes.Batch{}, nil, nil) + Return([]*fftypes.BatchPersisted{}, nil, nil) r.ServeHTTP(res, req) assert.Equal(t, 200, res.Result().StatusCode) diff --git a/internal/apiserver/route_get_data.go b/internal/apiserver/route_get_data.go index 71810ab65f..d7e2cbbe7c 100644 --- a/internal/apiserver/route_get_data.go +++ b/internal/apiserver/route_get_data.go @@ -37,7 +37,7 @@ var getData = &oapispec.Route{ FilterFactory: database.DataQueryFactory, Description: i18n.MsgTBD, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return []*fftypes.Data{} }, + JSONOutputValue: func() interface{} { return fftypes.DataArray{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { return filterResult(getOr(r.Ctx).GetData(r.Ctx, r.PP["ns"], r.Filter)) diff --git a/internal/apiserver/route_get_data_test.go b/internal/apiserver/route_get_data_test.go index 6a69d09370..fedc291245 100644 --- a/internal/apiserver/route_get_data_test.go +++ b/internal/apiserver/route_get_data_test.go @@ -32,7 +32,7 @@ func TestGetData(t *testing.T) { res := httptest.NewRecorder() o.On("GetData", mock.Anything, "mynamespace", mock.Anything). - Return([]*fftypes.Data{}, nil, nil) + Return(fftypes.DataArray{}, nil, nil) r.ServeHTTP(res, req) assert.Equal(t, 200, res.Result().StatusCode) diff --git a/internal/apiserver/route_get_msg_data.go b/internal/apiserver/route_get_msg_data.go index 16be29e808..b9390b6a7f 100644 --- a/internal/apiserver/route_get_msg_data.go +++ b/internal/apiserver/route_get_msg_data.go @@ -37,7 +37,7 @@ var getMsgData = &oapispec.Route{ FilterFactory: nil, // No filtering on this route - use namespaces/{ns}/data Description: i18n.MsgTBD, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return []*fftypes.Data{} }, + JSONOutputValue: func() interface{} { return fftypes.DataArray{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { output, err = getOr(r.Ctx).GetMessageData(r.Ctx, r.PP["ns"], r.PP["msgid"]) diff --git a/internal/apiserver/route_get_msg_data_test.go b/internal/apiserver/route_get_msg_data_test.go index 6dc4879657..9cab08a055 100644 --- a/internal/apiserver/route_get_msg_data_test.go +++ b/internal/apiserver/route_get_msg_data_test.go @@ -32,7 +32,7 @@ func TestGetMessageData(t *testing.T) { res := httptest.NewRecorder() o.On("GetMessageData", mock.Anything, "mynamespace", "uuid1"). - Return([]*fftypes.Data{}, nil) + Return(fftypes.DataArray{}, nil) r.ServeHTTP(res, req) assert.Equal(t, 200, res.Result().StatusCode) diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index 0d1e616b90..16f1dded10 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -176,7 +176,7 @@ func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fft return processor, nil } -func (bm *batchManager) assembleMessageData(processor *batchProcessor, msg *fftypes.Message) (retData []*fftypes.Data, err error) { +func (bm *batchManager) assembleMessageData(processor *batchProcessor, msg *fftypes.Message) (retData fftypes.DataArray, err error) { var cro []data.CacheReadOption if processor.conf.DispatcherOptions.BatchType == fftypes.BatchTypeBroadcast { cro = append(cro, data.CRORequirePublicBlobRefs) diff --git a/internal/batch/batch_manager_test.go b/internal/batch/batch_manager_test.go index 742e9c8b96..7914514983 100644 --- a/internal/batch/batch_manager_test.go +++ b/internal/batch/batch_manager_test.go @@ -98,7 +98,7 @@ func TestE2EDispatchBroadcast(t *testing.T) { ID: dataID1, Hash: dataHash, } - mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{data}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{data}, true, nil) mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -215,7 +215,7 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { ID: dataID1, Hash: dataHash, } - mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{data}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{data}, true, nil) mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) // pins @@ -272,7 +272,7 @@ func TestDispatchUnknownType(t *testing.T) { msg := &fftypes.Message{} mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() - mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{}, true, nil) err := bm.Start() assert.NoError(t, err) @@ -343,7 +343,7 @@ func TestMessageSequencerMissingMessageData(t *testing.T) { }). Once() mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) - mdm.On("GetMessageDataCached", mock.Anything, mock.Anything, data.CRORequirePublicBlobRefs).Return([]*fftypes.Data{}, false, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything, data.CRORequirePublicBlobRefs).Return(fftypes.DataArray{}, false, nil) bm.(*batchManager).messageSequencer() @@ -380,7 +380,7 @@ func TestMessageSequencerUpdateMessagesFail(t *testing.T) { {ID: dataID}, }}, }, nil, nil) - mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{{ID: dataID}}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{{ID: dataID}}, true, nil) mdi.On("InsertTransaction", mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) // transaction submit mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -433,7 +433,7 @@ func TestMessageSequencerDispatchFail(t *testing.T) { {ID: dataID}, }}, }, nil, nil) - mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{{ID: dataID}}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{{ID: dataID}}, true, nil) mdi.On("RunAsGroup", mock.Anything, mock.Anything, mock.Anything).Return(nil) bm.(*batchManager).messageSequencer() @@ -471,7 +471,7 @@ func TestMessageSequencerUpdateBatchFail(t *testing.T) { {ID: dataID}, }}, }, nil, nil) - mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return([]*fftypes.Data{{ID: dataID}}, true, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{{ID: dataID}}, true, nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("fizzle")) rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything, mock.Anything) rag.RunFn = func(a mock.Arguments) { diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 42144e8637..04afc8045c 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -36,7 +36,7 @@ import ( type batchWork struct { msg *fftypes.Message - data []*fftypes.Data + data fftypes.DataArray } type batchProcessorConf struct { diff --git a/internal/broadcast/operations_test.go b/internal/broadcast/operations_test.go index 06d6a7feaa..bcca3d4440 100644 --- a/internal/broadcast/operations_test.go +++ b/internal/broadcast/operations_test.go @@ -175,7 +175,7 @@ func TestRunOperationBatchBroadcastInvalidData(t *testing.T) { op := &fftypes.Operation{} batch := &fftypes.Batch{ Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ {Value: fftypes.JSONAnyPtr(`!json`)}, }, }, diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index 3ac8a45d49..c4b738377e 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -73,7 +73,7 @@ func TestValidateE2E(t *testing.T) { Version: "0.0.1", } mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(dt, nil) - isValid, err := dm.ValidateAll(ctx, []*fftypes.Data{data}) + isValid, err := dm.ValidateAll(ctx, fftypes.DataArray{data}) assert.Regexp(t, "FF10198", err) assert.False(t, isValid) @@ -86,7 +86,7 @@ func TestValidateE2E(t *testing.T) { err = v.Validate(ctx, data) assert.NoError(t, err) - isValid, err = dm.ValidateAll(ctx, []*fftypes.Data{data}) + isValid, err = dm.ValidateAll(ctx, fftypes.DataArray{data}) assert.NoError(t, err) assert.True(t, isValid) @@ -149,7 +149,7 @@ func TestValidateBadHash(t *testing.T) { Namespace: "0.0.1", } mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(dt, nil).Once() - _, err := dm.ValidateAll(ctx, []*fftypes.Data{data}) + _, err := dm.ValidateAll(ctx, fftypes.DataArray{data}) assert.Regexp(t, "FF10201", err) } @@ -616,7 +616,7 @@ func TestValidateAllLookupError(t *testing.T) { Value: fftypes.JSONAnyPtr(`anything`), } data.Seal(ctx, nil) - _, err := dm.ValidateAll(ctx, []*fftypes.Data{data}) + _, err := dm.ValidateAll(ctx, fftypes.DataArray{data}) assert.Regexp(t, "pop", err) } @@ -646,7 +646,7 @@ func TestValidateAllStoredValidatorInvalid(t *testing.T) { Version: "0.0.1", }, } - isValid, err := dm.ValidateAll(ctx, []*fftypes.Data{data}) + isValid, err := dm.ValidateAll(ctx, fftypes.DataArray{data}) assert.False(t, isValid) assert.NoError(t, err) mdi.AssertExpectations(t) diff --git a/internal/database/sqlcommon/data_sql.go b/internal/database/sqlcommon/data_sql.go index 79c05f7dce..cb18d2c483 100644 --- a/internal/database/sqlcommon/data_sql.go +++ b/internal/database/sqlcommon/data_sql.go @@ -237,7 +237,7 @@ func (s *SQLCommon) GetDataByID(ctx context.Context, id *fftypes.UUID, withValue return data, nil } -func (s *SQLCommon) GetData(ctx context.Context, filter database.Filter) (message []*fftypes.Data, res *database.FilterResult, err error) { +func (s *SQLCommon) GetData(ctx context.Context, filter database.Filter) (message fftypes.DataArray, res *database.FilterResult, err error) { query, fop, fi, err := s.filterSelect(ctx, "", sq.Select(dataColumnsWithValue...).From("data"), filter, dataFilterFieldMap, []interface{}{"sequence"}) if err != nil { @@ -250,7 +250,7 @@ func (s *SQLCommon) GetData(ctx context.Context, filter database.Filter) (messag } defer rows.Close() - data := []*fftypes.Data{} + data := fftypes.DataArray{} for rows.Next() { d, err := s.dataResult(ctx, rows, true) if err != nil { diff --git a/internal/definitions/definition_handler.go b/internal/definitions/definition_handler.go index bd457d0a0f..a3d9472e08 100644 --- a/internal/definitions/definition_handler.go +++ b/internal/definitions/definition_handler.go @@ -37,7 +37,7 @@ import ( type DefinitionHandlers interface { privatemessaging.GroupManager - HandleDefinitionBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (HandlerResult, error) + HandleDefinitionBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (HandlerResult, error) SendReply(ctx context.Context, event *fftypes.Event, reply *fftypes.MessageInOut) } @@ -120,7 +120,7 @@ func (dh *definitionHandlers) EnsureLocalGroup(ctx context.Context, group *fftyp return dh.messaging.EnsureLocalGroup(ctx, group) } -func (dh *definitionHandlers) HandleDefinitionBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (msgAction HandlerResult, err error) { +func (dh *definitionHandlers) HandleDefinitionBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (msgAction HandlerResult, err error) { l := log.L(ctx) l.Infof("Confirming system definition broadcast '%s' [%s]", msg.Header.Tag, msg.Header.ID) switch msg.Header.Tag { @@ -150,7 +150,7 @@ func (dh *definitionHandlers) HandleDefinitionBroadcast(ctx context.Context, sta } } -func (dh *definitionHandlers) getSystemBroadcastPayload(ctx context.Context, msg *fftypes.Message, data []*fftypes.Data, res fftypes.Definition) (valid bool) { +func (dh *definitionHandlers) getSystemBroadcastPayload(ctx context.Context, msg *fftypes.Message, data fftypes.DataArray, res fftypes.Definition) (valid bool) { l := log.L(ctx) if len(data) != 1 { l.Warnf("Unable to process system broadcast %s - expecting 1 attachment, found %d", msg.Header.ID, len(data)) diff --git a/internal/definitions/definition_handler_contracts.go b/internal/definitions/definition_handler_contracts.go index b718542d25..0031b17628 100644 --- a/internal/definitions/definition_handler_contracts.go +++ b/internal/definitions/definition_handler_contracts.go @@ -65,7 +65,7 @@ func (dh *definitionHandlers) persistContractAPI(ctx context.Context, api *fftyp return err == nil, err } -func (dh *definitionHandlers) handleFFIBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandlers) handleFFIBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (HandlerResult, error) { l := log.L(ctx) var broadcast fftypes.FFI valid := dh.getSystemBroadcastPayload(ctx, msg, data, &broadcast) @@ -96,7 +96,7 @@ func (dh *definitionHandlers) handleFFIBroadcast(ctx context.Context, state Defi return HandlerResult{Action: ActionConfirm}, nil } -func (dh *definitionHandlers) handleContractAPIBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandlers) handleContractAPIBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (HandlerResult, error) { l := log.L(ctx) var broadcast fftypes.ContractAPI valid := dh.getSystemBroadcastPayload(ctx, msg, data, &broadcast) diff --git a/internal/definitions/definition_handler_contracts_test.go b/internal/definitions/definition_handler_contracts_test.go index 2adddf091e..6fe263685c 100644 --- a/internal/definitions/definition_handler_contracts_test.go +++ b/internal/definitions/definition_handler_contracts_test.go @@ -107,7 +107,7 @@ func TestHandleFFIBroadcastOk(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineFFI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -135,7 +135,7 @@ func TestHandleFFIBroadcastReject(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineFFI, }, - }, []*fftypes.Data{}, fftypes.NewUUID()) + }, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -195,7 +195,7 @@ func TestHandleFFIBroadcastValidateFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineFFI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -218,7 +218,7 @@ func TestHandleFFIBroadcastPersistFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineFFI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) bs.assertNoFinalizers() @@ -241,7 +241,7 @@ func TestHandleContractAPIBroadcastOk(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineContractAPI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -295,7 +295,7 @@ func TestHandleContractAPIBroadcastValidateFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineContractAPI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -317,7 +317,7 @@ func TestHandleContractAPIBroadcastPersistFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineContractAPI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) bs.assertNoFinalizers() diff --git a/internal/definitions/definition_handler_datatype.go b/internal/definitions/definition_handler_datatype.go index ab1eb58630..5dde2a0e67 100644 --- a/internal/definitions/definition_handler_datatype.go +++ b/internal/definitions/definition_handler_datatype.go @@ -23,7 +23,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleDatatypeBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandlers) handleDatatypeBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (HandlerResult, error) { l := log.L(ctx) var dt fftypes.Datatype diff --git a/internal/definitions/definition_handler_datatype_test.go b/internal/definitions/definition_handler_datatype_test.go index 87c4fde596..adc267a047 100644 --- a/internal/definitions/definition_handler_datatype_test.go +++ b/internal/definitions/definition_handler_datatype_test.go @@ -57,7 +57,7 @@ func TestHandleDefinitionBroadcastDatatypeOk(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -95,7 +95,7 @@ func TestHandleDefinitionBroadcastDatatypeEventFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -126,7 +126,7 @@ func TestHandleDefinitionBroadcastDatatypeMissingID(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -156,7 +156,7 @@ func TestHandleDefinitionBroadcastBadSchema(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -181,7 +181,7 @@ func TestHandleDefinitionBroadcastMissingData(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{}, fftypes.NewUUID()) + }, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -214,7 +214,7 @@ func TestHandleDefinitionBroadcastDatatypeLookupFail(t *testing.T) { Namespace: fftypes.SystemNamespace, Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.EqualError(t, err, "pop") @@ -250,7 +250,7 @@ func TestHandleDefinitionBroadcastUpsertFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.EqualError(t, err, "pop") @@ -285,7 +285,7 @@ func TestHandleDefinitionBroadcastDatatypeDuplicate(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_identity_claim.go b/internal/definitions/definition_handler_identity_claim.go index c59b5f7e58..c0403e2493 100644 --- a/internal/definitions/definition_handler_identity_claim.go +++ b/internal/definitions/definition_handler_identity_claim.go @@ -25,7 +25,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleIdentityClaimBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, verificationID *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandlers) handleIdentityClaimBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, verificationID *fftypes.UUID) (HandlerResult, error) { var claim fftypes.IdentityClaim valid := dh.getSystemBroadcastPayload(ctx, msg, data, &claim) if !valid { diff --git a/internal/definitions/definition_handler_identity_claim_test.go b/internal/definitions/definition_handler_identity_claim_test.go index 5725b3d9d7..4637e14147 100644 --- a/internal/definitions/definition_handler_identity_claim_test.go +++ b/internal/definitions/definition_handler_identity_claim_test.go @@ -169,12 +169,12 @@ func TestHandleDefinitionIdentityClaimCustomWithExistingParentVerificationOk(t * })).Return(nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, false, nil).Once() - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, false, nil).Once() + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -215,12 +215,12 @@ func TestHandleDefinitionIdentityClaimIdempotentReplay(t *testing.T) { })).Return(nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, false, nil).Once() - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, false, nil).Once() + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -250,11 +250,11 @@ func TestHandleDefinitionIdentityClaimFailInsertIdentity(t *testing.T) { mdi.On("UpsertIdentity", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -284,7 +284,7 @@ func TestHandleDefinitionIdentityClaimVerificationDataFail(t *testing.T) { bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -310,11 +310,11 @@ func TestHandleDefinitionIdentityClaimVerificationMissingData(t *testing.T) { mdi.On("GetMessages", ctx, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -341,11 +341,11 @@ func TestHandleDefinitionIdentityClaimFailInsertVerifier(t *testing.T) { mdi.On("UpsertVerifier", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -370,7 +370,7 @@ func TestHandleDefinitionIdentityClaimCustomMissingParentVerificationOk(t *testi mdi.On("GetVerifierByValue", ctx, fftypes.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) mdi.On("GetMessages", ctx, mock.Anything).Return([]*fftypes.Message{}, nil, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) // Just wait for the verification to come in later assert.NoError(t, err) @@ -394,7 +394,7 @@ func TestHandleDefinitionIdentityClaimCustomParentVerificationFail(t *testing.T) mdi.On("GetVerifierByValue", ctx, fftypes.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) mdi.On("GetMessages", ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -419,7 +419,7 @@ func TestHandleDefinitionIdentityClaimVerifierClash(t *testing.T) { Hash: fftypes.NewRandB32(), }, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -442,7 +442,7 @@ func TestHandleDefinitionIdentityClaimVerifierError(t *testing.T) { mdi.On("GetIdentityByID", ctx, custom1.ID).Return(nil, nil) mdi.On("GetVerifierByValue", ctx, fftypes.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -467,7 +467,7 @@ func TestHandleDefinitionIdentityClaimIdentityClash(t *testing.T) { }, }, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -489,7 +489,7 @@ func TestHandleDefinitionIdentityClaimIdentityError(t *testing.T) { mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) mdi.On("GetIdentityByID", ctx, custom1.ID).Return(nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -508,7 +508,7 @@ func TestHandleDefinitionIdentityMissingAuthor(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -526,7 +526,7 @@ func TestHandleDefinitionIdentityClaimBadSignature(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -544,7 +544,7 @@ func TestHandleDefinitionIdentityVerifyChainFail(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("VerifyIdentityChain", ctx, custom1).Return(nil, true, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -562,7 +562,7 @@ func TestHandleDefinitionIdentityVerifyChainInvalid(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("VerifyIdentityChain", ctx, custom1).Return(nil, false, fmt.Errorf("wrong")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -577,7 +577,7 @@ func TestHandleDefinitionIdentityClaimBadData(t *testing.T) { _, org1, claimMsg, _, _, _ := testCustomClaimAndVerification(t) claimMsg.Header.Author = org1.DID // should be the child for the claim - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_identity_update.go b/internal/definitions/definition_handler_identity_update.go index 5f1688e9d3..1bc97e87dd 100644 --- a/internal/definitions/definition_handler_identity_update.go +++ b/internal/definitions/definition_handler_identity_update.go @@ -24,7 +24,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleIdentityUpdateBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data) (HandlerResult, error) { +func (dh *definitionHandlers) handleIdentityUpdateBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray) (HandlerResult, error) { var update fftypes.IdentityUpdate valid := dh.getSystemBroadcastPayload(ctx, msg, data, &update) if !valid { diff --git a/internal/definitions/definition_handler_identity_update_test.go b/internal/definitions/definition_handler_identity_update_test.go index 91cb7d0549..51e2884212 100644 --- a/internal/definitions/definition_handler_identity_update_test.go +++ b/internal/definitions/definition_handler_identity_update_test.go @@ -86,7 +86,7 @@ func TestHandleDefinitionIdentityUpdateOk(t *testing.T) { return event.Type == fftypes.EventTypeIdentityUpdated })).Return(nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -109,7 +109,7 @@ func TestHandleDefinitionIdentityUpdateUpsertFail(t *testing.T) { mdi := dh.database.(*databasemocks.Plugin) mdi.On("UpsertIdentity", ctx, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -128,7 +128,7 @@ func TestHandleDefinitionIdentityInvalidIdentity(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -145,7 +145,7 @@ func TestHandleDefinitionIdentityNotFound(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -162,7 +162,7 @@ func TestHandleDefinitionIdentityLookupFail(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -199,7 +199,7 @@ func TestHandleDefinitionIdentityValidateFail(t *testing.T) { }, } - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -224,7 +224,7 @@ func TestHandleDefinitionIdentityMissingData(t *testing.T) { }, } - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_identity_verification.go b/internal/definitions/definition_handler_identity_verification.go index 9324cdcc79..f599433aa9 100644 --- a/internal/definitions/definition_handler_identity_verification.go +++ b/internal/definitions/definition_handler_identity_verification.go @@ -23,7 +23,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleIdentityVerificationBroadcast(ctx context.Context, state DefinitionBatchState, verifyMsg *fftypes.Message, data []*fftypes.Data) (HandlerResult, error) { +func (dh *definitionHandlers) handleIdentityVerificationBroadcast(ctx context.Context, state DefinitionBatchState, verifyMsg *fftypes.Message, data fftypes.DataArray) (HandlerResult, error) { var verification fftypes.IdentityVerification valid := dh.getSystemBroadcastPayload(ctx, verifyMsg, data, &verification) if !valid { diff --git a/internal/definitions/definition_handler_identity_verification_test.go b/internal/definitions/definition_handler_identity_verification_test.go index 6c6e1b5929..76b4c7824e 100644 --- a/internal/definitions/definition_handler_identity_verification_test.go +++ b/internal/definitions/definition_handler_identity_verification_test.go @@ -62,11 +62,11 @@ func TestHandleDefinitionIdentityVerificationWithExistingClaimOk(t *testing.T) { })).Return(nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{claimData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{claimData}, true, nil) bs.pendingConfirms[*claimMsg.Header.ID] = claimMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -92,9 +92,9 @@ func TestHandleDefinitionIdentityVerificationIncompleteClaimData(t *testing.T) { mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(claimMsg, nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", ctx, mock.Anything).Return([]*fftypes.Data{}, false, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{}, false, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -120,7 +120,7 @@ func TestHandleDefinitionIdentityVerificationClaimDataFail(t *testing.T) { mdm := dh.data.(*datamocks.Manager) mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -144,7 +144,7 @@ func TestHandleDefinitionIdentityVerificationClaimHashMismatchl(t *testing.T) { mdi := dh.database.(*databasemocks.Plugin) mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(claimMsg, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -165,7 +165,7 @@ func TestHandleDefinitionIdentityVerificationBeforeClaim(t *testing.T) { mdi := dh.database.(*databasemocks.Plugin) mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(nil, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -186,7 +186,7 @@ func TestHandleDefinitionIdentityVerificationClaimLookupFail(t *testing.T) { mdi := dh.database.(*databasemocks.Plugin) mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -205,7 +205,7 @@ func TestHandleDefinitionIdentityVerificationWrongSigner(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -222,7 +222,7 @@ func TestHandleDefinitionIdentityVerificationCheckParentNotFound(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -239,7 +239,7 @@ func TestHandleDefinitionIdentityVerificationCheckParentFail(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -267,7 +267,7 @@ func TestHandleDefinitionIdentityVerificationInvalidPayload(t *testing.T) { Type: fftypes.MessageTypeBroadcast, Tag: fftypes.SystemTagIdentityVerification, }, - }, []*fftypes.Data{emptyObjectData}, fftypes.NewUUID()) + }, fftypes.DataArray{emptyObjectData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -284,7 +284,7 @@ func TestHandleDefinitionIdentityVerificationInvalidData(t *testing.T) { Type: fftypes.MessageTypeBroadcast, Tag: fftypes.SystemTagIdentityVerification, }, - }, []*fftypes.Data{}, fftypes.NewUUID()) + }, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_namespace.go b/internal/definitions/definition_handler_namespace.go index 55db98b405..738ee6776a 100644 --- a/internal/definitions/definition_handler_namespace.go +++ b/internal/definitions/definition_handler_namespace.go @@ -23,7 +23,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleNamespaceBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandlers) handleNamespaceBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (HandlerResult, error) { l := log.L(ctx) var ns fftypes.Namespace diff --git a/internal/definitions/definition_handler_namespace_test.go b/internal/definitions/definition_handler_namespace_test.go index 794c858887..daac65f425 100644 --- a/internal/definitions/definition_handler_namespace_test.go +++ b/internal/definitions/definition_handler_namespace_test.go @@ -49,7 +49,7 @@ func TestHandleDefinitionBroadcastNSOk(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -79,7 +79,7 @@ func TestHandleDefinitionBroadcastNSEventFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -108,7 +108,7 @@ func TestHandleDefinitionBroadcastNSUpsertFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.EqualError(t, err, "pop") @@ -123,7 +123,7 @@ func TestHandleDefinitionBroadcastNSMissingData(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{}, fftypes.NewUUID()) + }, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -143,7 +143,7 @@ func TestHandleDefinitionBroadcastNSBadID(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -160,7 +160,7 @@ func TestHandleDefinitionBroadcastNSBadData(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -185,7 +185,7 @@ func TestHandleDefinitionBroadcastDuplicate(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -216,7 +216,7 @@ func TestHandleDefinitionBroadcastDuplicateOverrideLocal(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -246,7 +246,7 @@ func TestHandleDefinitionBroadcastDuplicateOverrideLocalFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.EqualError(t, err, "pop") @@ -273,7 +273,7 @@ func TestHandleDefinitionBroadcastDupCheckFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.EqualError(t, err, "pop") diff --git a/internal/definitions/definition_handler_network_node.go b/internal/definitions/definition_handler_network_node.go index 6f8d8e4a04..42f896610b 100644 --- a/internal/definitions/definition_handler_network_node.go +++ b/internal/definitions/definition_handler_network_node.go @@ -23,7 +23,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleDeprecatedNodeBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data) (HandlerResult, error) { +func (dh *definitionHandlers) handleDeprecatedNodeBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray) (HandlerResult, error) { l := log.L(ctx) var nodeOld fftypes.DeprecatedNode diff --git a/internal/definitions/definition_handler_network_node_test.go b/internal/definitions/definition_handler_network_node_test.go index 2c15e39b54..a6baa8a717 100644 --- a/internal/definitions/definition_handler_network_node_test.go +++ b/internal/definitions/definition_handler_network_node_test.go @@ -129,7 +129,7 @@ func TestHandleDeprecatedNodeDefinitionOK(t *testing.T) { mdx := dh.exchange.(*dataexchangemocks.Plugin) mdx.On("AddPeer", ctx, node.DX.Endpoint).Return(nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, msg, []*fftypes.Data{data}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, msg, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -148,7 +148,7 @@ func TestHandleDeprecatedNodeDefinitionBadData(t *testing.T) { dh, bs := newTestDefinitionHandlers(t) ctx := context.Background() - action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, &fftypes.Message{}, []*fftypes.Data{}) + action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, &fftypes.Message{}, fftypes.DataArray{}) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -167,7 +167,7 @@ func TestHandleDeprecatedNodeDefinitionFailOrgLookup(t *testing.T) { Value: node.Owner, }).Return(nil, fmt.Errorf("pop")) - action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, msg, []*fftypes.Data{data}) + action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, msg, fftypes.DataArray{data}) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -188,7 +188,7 @@ func TestHandleDeprecatedNodeDefinitionOrgNotFound(t *testing.T) { Value: node.Owner, }).Return(nil, nil) - action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, msg, []*fftypes.Data{data}) + action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, msg, fftypes.DataArray{data}) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_network_org.go b/internal/definitions/definition_handler_network_org.go index 93e3c08c55..540e4bdb5c 100644 --- a/internal/definitions/definition_handler_network_org.go +++ b/internal/definitions/definition_handler_network_org.go @@ -22,7 +22,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleDeprecatedOrganizationBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data) (HandlerResult, error) { +func (dh *definitionHandlers) handleDeprecatedOrganizationBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray) (HandlerResult, error) { var orgOld fftypes.DeprecatedOrganization valid := dh.getSystemBroadcastPayload(ctx, msg, data, &orgOld) diff --git a/internal/definitions/definition_handler_network_org_test.go b/internal/definitions/definition_handler_network_org_test.go index d64c70b565..b7dec269d6 100644 --- a/internal/definitions/definition_handler_network_org_test.go +++ b/internal/definitions/definition_handler_network_org_test.go @@ -109,7 +109,7 @@ func TestHandleDeprecatedOrgDefinitionOK(t *testing.T) { return event.Type == fftypes.EventTypeIdentityConfirmed })).Return(nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, msg, []*fftypes.Data{data}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, msg, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -124,7 +124,7 @@ func TestHandleDeprecatedOrgDefinitionBadData(t *testing.T) { dh, bs := newTestDefinitionHandlers(t) ctx := context.Background() - action, err := dh.handleDeprecatedOrganizationBroadcast(ctx, bs, &fftypes.Message{}, []*fftypes.Data{}) + action, err := dh.handleDeprecatedOrganizationBroadcast(ctx, bs, &fftypes.Message{}, fftypes.DataArray{}) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_test.go b/internal/definitions/definition_handler_test.go index 7d5d31d67b..96ec4a0e50 100644 --- a/internal/definitions/definition_handler_test.go +++ b/internal/definitions/definition_handler_test.go @@ -85,7 +85,7 @@ func TestHandleDefinitionBroadcastUnknown(t *testing.T) { Header: fftypes.MessageHeader{ Tag: "unknown", }, - }, []*fftypes.Data{}, fftypes.NewUUID()) + }, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -97,7 +97,7 @@ func TestGetSystemBroadcastPayloadMissingData(t *testing.T) { Header: fftypes.MessageHeader{ Tag: "unknown", }, - }, []*fftypes.Data{}, nil) + }, fftypes.DataArray{}, nil) assert.False(t, valid) } @@ -107,7 +107,7 @@ func TestGetSystemBroadcastPayloadBadJSON(t *testing.T) { Header: fftypes.MessageHeader{ Tag: "unknown", }, - }, []*fftypes.Data{}, nil) + }, fftypes.DataArray{}, nil) assert.False(t, valid) } diff --git a/internal/definitions/definition_handler_tokenpool.go b/internal/definitions/definition_handler_tokenpool.go index eb40243b91..16494696a0 100644 --- a/internal/definitions/definition_handler_tokenpool.go +++ b/internal/definitions/definition_handler_tokenpool.go @@ -39,7 +39,7 @@ func (dh *definitionHandlers) persistTokenPool(ctx context.Context, announce *ff return true, nil } -func (dh *definitionHandlers) handleTokenPoolBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data) (HandlerResult, error) { +func (dh *definitionHandlers) handleTokenPoolBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray) (HandlerResult, error) { var announce fftypes.TokenPoolAnnouncement if valid := dh.getSystemBroadcastPayload(ctx, msg, data, &announce); !valid { return HandlerResult{Action: ActionReject}, nil diff --git a/internal/definitions/definition_handler_tokenpool_test.go b/internal/definitions/definition_handler_tokenpool_test.go index 55cbb64ff1..2f2f10e0fe 100644 --- a/internal/definitions/definition_handler_tokenpool_test.go +++ b/internal/definitions/definition_handler_tokenpool_test.go @@ -51,7 +51,7 @@ func newPoolAnnouncement() *fftypes.TokenPoolAnnouncement { } } -func buildPoolDefinitionMessage(announce *fftypes.TokenPoolAnnouncement) (*fftypes.Message, []*fftypes.Data, error) { +func buildPoolDefinitionMessage(announce *fftypes.TokenPoolAnnouncement) (*fftypes.Message, fftypes.DataArray, error) { msg := &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), @@ -62,7 +62,7 @@ func buildPoolDefinitionMessage(announce *fftypes.TokenPoolAnnouncement) (*fftyp if err != nil { return nil, nil, err } - data := []*fftypes.Data{{ + data := fftypes.DataArray{{ Value: fftypes.JSONAnyPtrBytes(b), }} return msg, data, nil diff --git a/internal/events/aggregator.go b/internal/events/aggregator.go index 4ae42efb82..f52a8e8014 100644 --- a/internal/events/aggregator.go +++ b/internal/events/aggregator.go @@ -209,17 +209,20 @@ func (ag *aggregator) extractBatchMessagePin(manifest *fftypes.BatchManifest, re func (ag *aggregator) migrateManifest(ctx context.Context, persistedBatch *fftypes.BatchPersisted) *fftypes.BatchManifest { // In version v0.13.x and earlier, we stored the full batch - var fullBatch fftypes.Batch - err := json.Unmarshal([]byte(persistedBatch.Manifest), &fullBatch) + var fullPayload fftypes.BatchPayload + err := json.Unmarshal([]byte(persistedBatch.Manifest), &fullPayload) if err != nil { log.L(ctx).Errorf("Invalid migration persisted batch: %s", err) return nil } - if len(fullBatch.Payload.Messages) == 0 { + if len(fullPayload.Messages) == 0 { log.L(ctx).Errorf("Invalid migration persisted batch: no payload") return nil } - return fullBatch.Manifest() + return (&fftypes.Batch{ + BatchHeader: persistedBatch.BatchHeader, + Payload: fullPayload, + }).Manifest() } func (ag *aggregator) extractManifest(ctx context.Context, batch *fftypes.BatchPersisted) *fftypes.BatchManifest { @@ -276,10 +279,6 @@ func (ag *aggregator) processPins(ctx context.Context, pins []*fftypes.Pin, stat } l.Debugf("Aggregating pin %.10d batch=%s msg=%s pinIndex=%d msgBaseIndex=%d hash=%s masked=%t", pin.Sequence, pin.Batch, msgEntry.ID, pin.Index, msgBaseIndex, pin.Hash, pin.Masked) - if msgEntry.ID == nil { - l.Errorf("null message entry %d in batch '%s'", pin.Index, batch.ID) - continue - } if dupMsgCheck[*msgEntry.ID] { continue } @@ -505,7 +504,7 @@ func (ag *aggregator) attemptMessageDispatch(ctx context.Context, msg *fftypes.M // resolveBlobs ensures that the blobs for all the attachments in the data array, have been received into the // local data exchange blob store. Either because of a private transfer, or by downloading them from the shared storage -func (ag *aggregator) resolveBlobs(ctx context.Context, data []*fftypes.Data) (resolved bool, err error) { +func (ag *aggregator) resolveBlobs(ctx context.Context, data fftypes.DataArray) (resolved bool, err error) { l := log.L(ctx) for _, d := range data { diff --git a/internal/events/aggregator_test.go b/internal/events/aggregator_test.go index 3c8b893521..249080dff2 100644 --- a/internal/events/aggregator_test.go +++ b/internal/events/aggregator_test.go @@ -19,6 +19,7 @@ package events import ( "context" "crypto/sha256" + "encoding/json" "fmt" "testing" @@ -458,6 +459,229 @@ func TestAggregationBroadcast(t *testing.T) { mdm.AssertExpectations(t) } +func TestAggregationMigratedBroadcast(t *testing.T) { + + ag, cancel := newTestAggregator() + defer cancel() + bs := newBatchState(ag) + + // Generate some pin data + member1org := newTestOrg("org1") + member1key := "0x12345" + topic := "some-topic" + batchID := fftypes.NewUUID() + msgID := fftypes.NewUUID() + h := sha256.New() + h.Write([]byte(topic)) + contextUnmasked := fftypes.HashResult(h) + + mdi := ag.database.(*databasemocks.Plugin) + mdm := ag.data.(*datamocks.Manager) + mim := ag.identity.(*identitymanagermocks.Manager) + + mim.On("FindIdentityForVerifier", ag.ctx, []fftypes.IdentityType{fftypes.IdentityTypeOrg, fftypes.IdentityTypeCustom}, "ns1", &fftypes.VerifierRef{ + Type: fftypes.VerifierTypeEthAddress, + Value: member1key, + }).Return(member1org, nil) + + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, + Payload: fftypes.BatchPayload{ + Messages: []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + ID: msgID, + Topics: []string{topic}, + Namespace: "ns1", + SignerRef: fftypes.SignerRef{ + Author: member1org.DID, + Key: member1key, + }, + }, + Data: fftypes.DataRefs{ + {ID: fftypes.NewUUID()}, + }, + }, + }, + }, + } + payloadBinary, err := json.Marshal(&batch.Payload) + assert.NoError(t, err) + bp := &fftypes.BatchPersisted{ + TX: batch.Payload.TX, + BatchHeader: batch.BatchHeader, + Manifest: string(payloadBinary), + } + + // Get the batch + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) + // Do not resolve any pins earlier + mdi.On("GetPins", mock.Anything, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) + // Validate the message is ok + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) + mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) + // Insert the confirmed event + mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { + return *e.Reference == *msgID && e.Type == fftypes.EventTypeMessageConfirmed + })).Return(nil) + // Set the pin to dispatched + mdi.On("UpdatePins", ag.ctx, mock.Anything, mock.Anything).Return(nil) + // Update the message + mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(nil) + // Confirm the offset + mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + err = ag.processPins(ag.ctx, []*fftypes.Pin{ + { + Sequence: 10001, + Hash: contextUnmasked, + Batch: batchID, + Index: 0, + Signer: member1key, + Dispatched: false, + }, + }, bs) + assert.NoError(t, err) + + err = bs.RunFinalize(ag.ctx) + assert.NoError(t, err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestAggregationMigratedBroadcastNilMessageID(t *testing.T) { + + ag, cancel := newTestAggregator() + defer cancel() + bs := newBatchState(ag) + + // Generate some pin data + member1org := newTestOrg("org1") + member1key := "0x12345" + topic := "some-topic" + batchID := fftypes.NewUUID() + h := sha256.New() + h.Write([]byte(topic)) + contextUnmasked := fftypes.HashResult(h) + + mdi := ag.database.(*databasemocks.Plugin) + mdm := ag.data.(*datamocks.Manager) + mim := ag.identity.(*identitymanagermocks.Manager) + + mim.On("FindIdentityForVerifier", ag.ctx, []fftypes.IdentityType{fftypes.IdentityTypeOrg, fftypes.IdentityTypeCustom}, "ns1", &fftypes.VerifierRef{ + Type: fftypes.VerifierTypeEthAddress, + Value: member1key, + }).Return(member1org, nil) + + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, + Payload: fftypes.BatchPayload{ + Messages: []*fftypes.Message{{ + Header: fftypes.MessageHeader{ + Topics: fftypes.FFStringArray{"topic1"}, + }, + }}, + }, + } + payloadBinary, err := json.Marshal(&batch.Payload) + assert.NoError(t, err) + bp := &fftypes.BatchPersisted{ + TX: batch.Payload.TX, + BatchHeader: batch.BatchHeader, + Manifest: string(payloadBinary), + } + + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) + mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + err = ag.processPins(ag.ctx, []*fftypes.Pin{ + { + Sequence: 10001, + Hash: contextUnmasked, + Batch: batchID, + Index: 0, + Signer: member1key, + Dispatched: false, + }, + }, bs) + assert.NoError(t, err) + + err = bs.RunFinalize(ag.ctx) + assert.NoError(t, err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestAggregationMigratedBroadcastInvalid(t *testing.T) { + + ag, cancel := newTestAggregator() + defer cancel() + bs := newBatchState(ag) + + // Generate some pin data + member1org := newTestOrg("org1") + member1key := "0x12345" + topic := "some-topic" + batchID := fftypes.NewUUID() + h := sha256.New() + h.Write([]byte(topic)) + contextUnmasked := fftypes.HashResult(h) + + mdi := ag.database.(*databasemocks.Plugin) + mdm := ag.data.(*datamocks.Manager) + mim := ag.identity.(*identitymanagermocks.Manager) + + mim.On("FindIdentityForVerifier", ag.ctx, []fftypes.IdentityType{fftypes.IdentityTypeOrg, fftypes.IdentityTypeCustom}, "ns1", &fftypes.VerifierRef{ + Type: fftypes.VerifierTypeEthAddress, + Value: member1key, + }).Return(member1org, nil) + + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, + Payload: fftypes.BatchPayload{ + Messages: []*fftypes.Message{{ + Header: fftypes.MessageHeader{ + Topics: fftypes.FFStringArray{"topic1"}, + }, + }}, + }, + } + bp := &fftypes.BatchPersisted{ + TX: batch.Payload.TX, + BatchHeader: batch.BatchHeader, + Manifest: "{}", + } + + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) + mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + err := ag.processPins(ag.ctx, []*fftypes.Pin{ + { + Sequence: 10001, + Hash: contextUnmasked, + Batch: batchID, + Index: 0, + Signer: member1key, + Dispatched: false, + }, + }, bs) + assert.NoError(t, err) + + err = bs.RunFinalize(ag.ctx) + assert.NoError(t, err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + func TestShutdownOnCancel(t *testing.T) { ag, cancel := newTestAggregator() mdi := ag.database.(*databasemocks.Plugin) @@ -664,6 +888,32 @@ func TestProcessMsgFailGetPins(t *testing.T) { mdm.AssertExpectations(t) } +func TestProcessMsgFailData(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(nil, nil, false, fmt.Errorf("pop")) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{}, nil) + assert.Regexp(t, "pop", err) + + mdm.AssertExpectations(t) +} + +func TestProcessMsgFailMissingData(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(&fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}}, nil, false, nil) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{}, nil) + assert.NoError(t, err) + + mdm.AssertExpectations(t) +} + func TestProcessMsgFailMissingGroup(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() @@ -1493,6 +1743,27 @@ func TestDefinitionBroadcastRejectUnregisteredSignerIdentityClaim(t *testing.T) ag, cancel := newTestAggregator() defer cancel() + msg1, _, _, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) + msg1.Header.Tag = fftypes.SystemTagIdentityClaim + + mim := ag.identity.(*identitymanagermocks.Manager) + mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + + msh := ag.definitions.(*definitionsmocks.DefinitionHandlers) + msh.On("HandleDefinitionBroadcast", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(definitions.HandlerResult{Action: definitions.ActionWait}, nil) + + valid, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + assert.NoError(t, err) + assert.False(t, valid) + + mim.AssertExpectations(t) + msh.AssertExpectations(t) +} + +func TestDefinitionBroadcastRootUnregisteredOk(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + msg1, _, _, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) @@ -1880,3 +2151,36 @@ func TestProcessWithBatchActionsSuccess(t *testing.T) { }) assert.NoError(t, err) } + +func TestExtractManifestFail(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + manifest := ag.extractManifest(ag.ctx, &fftypes.BatchPersisted{ + Manifest: "!wrong", + }) + + assert.Nil(t, manifest) +} + +func TestExtractManifestBadVersion(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + manifest := ag.extractManifest(ag.ctx, &fftypes.BatchPersisted{ + Manifest: `{"version":999}`, + }) + + assert.Nil(t, manifest) +} + +func TestMigrateManifestFail(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + manifest := ag.migrateManifest(ag.ctx, &fftypes.BatchPersisted{ + Manifest: "!wrong", + }) + + assert.Nil(t, manifest) +} diff --git a/internal/events/batch_pin_complete_test.go b/internal/events/batch_pin_complete_test.go index acc3d0e281..ec3a8a687e 100644 --- a/internal/events/batch_pin_complete_test.go +++ b/internal/events/batch_pin_complete_test.go @@ -101,7 +101,7 @@ func TestBatchPinCompleteOkBroadcast(t *testing.T) { ID: batch.TransactionID, }, Messages: []*fftypes.Message{}, - Data: []*fftypes.Data{}, + Data: fftypes.DataArray{}, }, } batchData.Hash = batchData.Payload.Hash() @@ -181,7 +181,7 @@ func TestBatchPinCompleteOkPrivate(t *testing.T) { ID: batch.TransactionID, }, Messages: []*fftypes.Message{}, - Data: []*fftypes.Data{}, + Data: fftypes.DataArray{}, }, } batchDataBytes, err := json.Marshal(&batchData) @@ -496,7 +496,7 @@ func TestPersistBatchSwallowBadData(t *testing.T) { ID: fftypes.NewUUID(), }, Messages: []*fftypes.Message{nil}, - Data: []*fftypes.Data{nil}, + Data: fftypes.DataArray{nil}, }, } batch.Hash = batch.Payload.Hash() @@ -529,7 +529,7 @@ func TestPersistBatchGoodDataUpsertOptimizeExistingFail(t *testing.T) { Type: fftypes.TransactionTypeBatchPin, ID: fftypes.NewUUID(), }, - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)}, }, }, @@ -565,7 +565,7 @@ func TestPersistBatchGoodDataUpsertOptimizeNewFail(t *testing.T) { Type: fftypes.TransactionTypeBatchPin, ID: fftypes.NewUUID(), }, - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)}, }, }, diff --git a/internal/events/persist_batch_test.go b/internal/events/persist_batch_test.go index b75c1390a7..4071348151 100644 --- a/internal/events/persist_batch_test.go +++ b/internal/events/persist_batch_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -75,18 +76,104 @@ func TestPersistBatchFromBroadcast(t *testing.T) { }, }, }, - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ data, }, }, } - batch.Hash = batch.Payload.Hash() + batch.Hash = fftypes.HashString(batch.Manifest().String()) _, err = em.persistBatchFromBroadcast(em.ctx, batch, batch.Hash) assert.EqualError(t, err, "pop") // Confirms we got to upserting the batch } +func TestPersistBatchFromBroadcastNoCacheDataNotInBatch(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil) + mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationSkip).Return(nil) + + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "did:firefly:org/12345", + Key: "0x12345", + }, + }, + Payload: fftypes.BatchPayload{ + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + Type: fftypes.TransactionTypeBatchPin, + }, + Messages: []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.MessageTypeDefinition, + SignerRef: fftypes.SignerRef{ + Author: "did:firefly:org/12345", + Key: "0x12345", + }, + TxType: fftypes.TransactionTypeBatchPin, + }, + Data: fftypes.DataRefs{ + { + ID: fftypes.NewUUID(), + Hash: fftypes.NewRandB32(), + }, + }, + }, + }, + Data: nil, + }, + } + batch.Payload.Messages[0].Seal(em.ctx) + batch.Hash = fftypes.HashString(batch.Manifest().String()) + + valid, err := em.persistBatchFromBroadcast(em.ctx, batch, batch.Hash) + assert.True(t, valid) + assert.NoError(t, err) + +} + +func TestPersistBatchNilMessageEntryop(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil) + + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "did:firefly:org/12345", + Key: "0x12345", + }, + }, + Payload: fftypes.BatchPayload{ + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + Type: fftypes.TransactionTypeBatchPin, + }, + Messages: []*fftypes.Message{nil}, + Data: nil, + }, + } + batch.Hash = fftypes.HashString(batch.Manifest().String()) + + valid, err := em.persistBatchFromBroadcast(em.ctx, batch, batch.Hash) + assert.False(t, valid) + assert.NoError(t, err) + +} + func TestPersistBatchFromBroadcastBadHash(t *testing.T) { em, cancel := newTestEventManager(t) diff --git a/internal/events/system/events.go b/internal/events/system/events.go index e8fc53477b..9ca64c8ac4 100644 --- a/internal/events/system/events.go +++ b/internal/events/system/events.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -89,7 +89,7 @@ func (se *Events) AddListener(ns string, el EventListener) error { return nil } -func (se *Events) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (se *Events) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { se.mux.Lock() defer se.mux.Unlock() for ns, listeners := range se.listeners { diff --git a/internal/events/webhooks/webhooks.go b/internal/events/webhooks/webhooks.go index 3375266aa0..65e8875e9a 100644 --- a/internal/events/webhooks/webhooks.go +++ b/internal/events/webhooks/webhooks.go @@ -261,7 +261,7 @@ func (wh *WebHooks) ValidateOptions(options *fftypes.SubscriptionOptions) error return err } -func (wh *WebHooks) attemptRequest(sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) (req *whRequest, res *whResponse, err error) { +func (wh *WebHooks) attemptRequest(sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) (req *whRequest, res *whResponse, err error) { withData := sub.Options.WithData != nil && *sub.Options.WithData allData := make([]*fftypes.JSONAny, 0, len(data)) var firstData fftypes.JSONObject @@ -350,7 +350,7 @@ func (wh *WebHooks) attemptRequest(sub *fftypes.Subscription, event *fftypes.Eve return req, res, nil } -func (wh *WebHooks) doDelivery(connID string, reply bool, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (wh *WebHooks) doDelivery(connID string, reply bool, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { req, res, gwErr := wh.attemptRequest(sub, event, data) if gwErr != nil { // Generate a bad-gateway error response - we always want to send something back, @@ -400,7 +400,7 @@ func (wh *WebHooks) doDelivery(connID string, reply bool, sub *fftypes.Subscript return nil } -func (wh *WebHooks) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (wh *WebHooks) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { if event.Message == nil && sub.Options.WithData != nil && *sub.Options.WithData { log.L(wh.ctx).Debugf("Webhook withData=true subscription called with non-message event '%s'", event.ID) return nil diff --git a/internal/events/webhooks/webhooks_test.go b/internal/events/webhooks/webhooks_test.go index 6c65bc3f56..f3c3f77efc 100644 --- a/internal/events/webhooks/webhooks_test.go +++ b/internal/events/webhooks/webhooks_test.go @@ -216,7 +216,7 @@ func TestRequestWithBodyReplyEndToEnd(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{data}) + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{data}) assert.NoError(t, err) mcb.AssertExpectations(t) @@ -321,7 +321,7 @@ func TestRequestWithEmptyStringBodyReplyEndToEnd(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{data}) + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{data}) assert.NoError(t, err) mcb.AssertExpectations(t) @@ -376,7 +376,7 @@ func TestRequestNoBodyNoReply(t *testing.T) { }`), } - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{data}) + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{data}) assert.NoError(t, err) assert.True(t, called) } @@ -433,7 +433,7 @@ func TestRequestReplyEmptyData(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{}) + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{}) assert.NoError(t, err) assert.True(t, called) } @@ -479,7 +479,7 @@ func TestRequestReplyBadJSON(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{}) + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{}) assert.NoError(t, err) } func TestRequestReplyDataArrayBadStatusB64(t *testing.T) { @@ -540,7 +540,7 @@ func TestRequestReplyDataArrayBadStatusB64(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{ + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) @@ -588,7 +588,7 @@ func TestRequestReplyDataArrayError(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{ + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) @@ -638,7 +638,7 @@ func TestRequestReplyBuildRequestFailFastAsk(t *testing.T) { close(waiter) } - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{ + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) diff --git a/internal/events/websockets/websockets.go b/internal/events/websockets/websockets.go index cf2e63cf8b..9889db324c 100644 --- a/internal/events/websockets/websockets.go +++ b/internal/events/websockets/websockets.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -78,7 +78,7 @@ func (ws *WebSockets) ValidateOptions(options *fftypes.SubscriptionOptions) erro return nil } -func (ws *WebSockets) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (ws *WebSockets) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { ws.connMux.Lock() conn, ok := ws.connections[connID] ws.connMux.Unlock() diff --git a/internal/orchestrator/data_query.go b/internal/orchestrator/data_query.go index 3a30e8ae2f..5b97328d23 100644 --- a/internal/orchestrator/data_query.go +++ b/internal/orchestrator/data_query.go @@ -183,7 +183,7 @@ func (or *orchestrator) GetMessagesWithData(ctx context.Context, ns string, filt return msgsData, fr, err } -func (or *orchestrator) GetMessageData(ctx context.Context, ns, id string) ([]*fftypes.Data, error) { +func (or *orchestrator) GetMessageData(ctx context.Context, ns, id string) (fftypes.DataArray, error) { msg, err := or.getMessageByID(ctx, ns, id) if err != nil || msg == nil { return nil, err @@ -258,7 +258,7 @@ func (or *orchestrator) GetBatches(ctx context.Context, ns string, filter databa return or.database.GetBatches(ctx, filter) } -func (or *orchestrator) GetData(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Data, *database.FilterResult, error) { +func (or *orchestrator) GetData(ctx context.Context, ns string, filter database.AndFilter) (fftypes.DataArray, *database.FilterResult, error) { filter = or.scopeNS(ns, filter) return or.database.GetData(ctx, filter) } diff --git a/internal/orchestrator/data_query_test.go b/internal/orchestrator/data_query_test.go index 16218dd6c8..cfe56de831 100644 --- a/internal/orchestrator/data_query_test.go +++ b/internal/orchestrator/data_query_test.go @@ -121,7 +121,7 @@ func TestGetMessageByIDWithDataOk(t *testing.T) { }, } or.mdi.On("GetMessageByID", mock.Anything, mock.MatchedBy(func(u *fftypes.UUID) bool { return u.Equals(msgID) })).Return(msg, nil) - or.mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{ + or.mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Value: fftypes.JSONAnyPtr("{}")}, {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Value: fftypes.JSONAnyPtr("{}")}, }, true, nil) @@ -158,7 +158,7 @@ func TestGetMessageByIDWithDataFail(t *testing.T) { }, } or.mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, nil) - or.mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + or.mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, false, fmt.Errorf("pop")) _, err := or.GetMessageByIDWithData(context.Background(), "ns1", msgID.String()) assert.EqualError(t, err, "pop") @@ -195,7 +195,7 @@ func TestGetMessagesWithDataOk(t *testing.T) { } or.mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil) fb := database.MessageQueryFactory.NewFilter(context.Background()) - or.mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + or.mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{}, true, nil) f := fb.And(fb.Eq("id", u)) _, _, err := or.GetMessagesWithData(context.Background(), "ns1", f) assert.NoError(t, err) @@ -213,7 +213,7 @@ func TestGetMessagesWithDataFail(t *testing.T) { } or.mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil) fb := database.MessageQueryFactory.NewFilter(context.Background()) - or.mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, true, fmt.Errorf("pop")) + or.mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, true, fmt.Errorf("pop")) f := fb.And(fb.Eq("id", u)) _, _, err := or.GetMessagesWithData(context.Background(), "ns1", f) assert.EqualError(t, err, "pop") @@ -247,12 +247,10 @@ func TestGetMessageTransactionOk(t *testing.T) { TxType: fftypes.TransactionTypeBatchPin, }, }, nil) - or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.Batch{ - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: txID, - }, + or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.BatchPersisted{ + TX: fftypes.TransactionRef{ + Type: fftypes.TransactionTypeBatchPin, + ID: txID, }, }, nil) or.mdi.On("GetTransactionByID", mock.Anything, txID).Return(&fftypes.Transaction{ @@ -275,12 +273,10 @@ func TestGetMessageTransactionOperations(t *testing.T) { TxType: fftypes.TransactionTypeBatchPin, }, }, nil) - or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.Batch{ - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: txID, - }, + or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.BatchPersisted{ + TX: fftypes.TransactionRef{ + Type: fftypes.TransactionTypeBatchPin, + ID: txID, }, }, nil) or.mdi.On("GetOperations", mock.Anything, mock.Anything).Return([]*fftypes.Operation{}, nil, nil) @@ -309,7 +305,7 @@ func TestGetMessageTransactionNoBatchTX(t *testing.T) { TxType: fftypes.TransactionTypeBatchPin, }, }, nil) - or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.Batch{}, nil) + or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.BatchPersisted{}, nil) _, err := or.GetMessageTransaction(context.Background(), "ns1", msgID.String()) assert.Regexp(t, "FF10210", err) } @@ -384,7 +380,7 @@ func TestGetMessageData(t *testing.T) { }, } or.mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, nil) - or.mdm.On("GetMessageData", mock.Anything, msg, true).Return([]*fftypes.Data{}, true, nil) + or.mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{}, true, nil) _, err := or.GetMessageData(context.Background(), "ns1", fftypes.NewUUID().String()) assert.NoError(t, err) } @@ -449,7 +445,7 @@ func TestGetBatchByIDBadID(t *testing.T) { func TestGetBatches(t *testing.T) { or := newTestOrchestrator() u := fftypes.NewUUID() - or.mdi.On("GetBatches", mock.Anything, mock.Anything).Return([]*fftypes.Batch{}, nil, nil) + or.mdi.On("GetBatches", mock.Anything, mock.Anything).Return([]*fftypes.BatchPersisted{}, nil, nil) fb := database.BatchQueryFactory.NewFilter(context.Background()) f := fb.And(fb.Eq("id", u)) _, _, err := or.GetBatches(context.Background(), "ns1", f) @@ -473,7 +469,7 @@ func TestGetDataByIDBadID(t *testing.T) { func TestGetData(t *testing.T) { or := newTestOrchestrator() u := fftypes.NewUUID() - or.mdi.On("GetData", mock.Anything, mock.Anything).Return([]*fftypes.Data{}, nil, nil) + or.mdi.On("GetData", mock.Anything, mock.Anything).Return(fftypes.DataArray{}, nil, nil) fb := database.DataQueryFactory.NewFilter(context.Background()) f := fb.And(fb.Eq("id", u)) _, _, err := or.GetData(context.Background(), "ns1", f) diff --git a/internal/orchestrator/orchestrator.go b/internal/orchestrator/orchestrator.go index 6e70a70f5f..36083494ad 100644 --- a/internal/orchestrator/orchestrator.go +++ b/internal/orchestrator/orchestrator.go @@ -105,12 +105,12 @@ type Orchestrator interface { GetMessageTransaction(ctx context.Context, ns, id string) (*fftypes.Transaction, error) GetMessageOperations(ctx context.Context, ns, id string) ([]*fftypes.Operation, *database.FilterResult, error) GetMessageEvents(ctx context.Context, ns, id string, filter database.AndFilter) ([]*fftypes.Event, *database.FilterResult, error) - GetMessageData(ctx context.Context, ns, id string) ([]*fftypes.Data, error) + GetMessageData(ctx context.Context, ns, id string) (fftypes.DataArray, error) GetMessagesForData(ctx context.Context, ns, dataID string, filter database.AndFilter) ([]*fftypes.Message, *database.FilterResult, error) GetBatchByID(ctx context.Context, ns, id string) (*fftypes.BatchPersisted, error) GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.BatchPersisted, *database.FilterResult, error) GetDataByID(ctx context.Context, ns, id string) (*fftypes.Data, error) - GetData(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Data, *database.FilterResult, error) + GetData(ctx context.Context, ns string, filter database.AndFilter) (fftypes.DataArray, *database.FilterResult, error) GetDatatypeByID(ctx context.Context, ns, id string) (*fftypes.Datatype, error) GetDatatypeByName(ctx context.Context, ns, name, version string) (*fftypes.Datatype, error) GetDatatypes(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Datatype, *database.FilterResult, error) diff --git a/internal/orchestrator/txn_status_test.go b/internal/orchestrator/txn_status_test.go index b6972ed2b3..926038f538 100644 --- a/internal/orchestrator/txn_status_test.go +++ b/internal/orchestrator/txn_status_test.go @@ -128,7 +128,7 @@ func TestGetTransactionStatusBatchPinFail(t *testing.T) { }, } events := []*fftypes.BlockchainEvent{} - batches := []*fftypes.Batch{} + batches := []*fftypes.BatchPersisted{} or.mdi.On("GetTransactionByID", mock.Anything, txID).Return(tx, nil) or.mdi.On("GetOperations", mock.Anything, mock.Anything).Return(ops, nil, nil) @@ -180,7 +180,7 @@ func TestGetTransactionStatusBatchPinPending(t *testing.T) { }, } events := []*fftypes.BlockchainEvent{} - batches := []*fftypes.Batch{} + batches := []*fftypes.BatchPersisted{} or.mdi.On("GetTransactionByID", mock.Anything, txID).Return(tx, nil) or.mdi.On("GetOperations", mock.Anything, mock.Anything).Return(ops, nil, nil) diff --git a/internal/privatemessaging/groupmanager_test.go b/internal/privatemessaging/groupmanager_test.go index 31fcd76fd9..559f762dd8 100644 --- a/internal/privatemessaging/groupmanager_test.go +++ b/internal/privatemessaging/groupmanager_test.go @@ -86,7 +86,7 @@ func TestResolveInitGroupMissingData(t *testing.T) { defer cancel() mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{}, false, nil) + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{}, false, nil) _, err := pm.ResolveInitGroup(pm.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ @@ -109,7 +109,7 @@ func TestResolveInitGroupBadData(t *testing.T) { defer cancel() mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`!json`)}, }, true, nil) @@ -134,7 +134,7 @@ func TestResolveInitGroupBadValidation(t *testing.T) { defer cancel() mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`{}`)}, }, true, nil) @@ -172,7 +172,7 @@ func TestResolveInitGroupBadGroupID(t *testing.T) { b, _ := json.Marshal(&group) mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) @@ -210,7 +210,7 @@ func TestResolveInitGroupUpsertFail(t *testing.T) { b, _ := json.Marshal(&group) mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) mdi := pm.database.(*databasemocks.Plugin) @@ -250,7 +250,7 @@ func TestResolveInitGroupNewOk(t *testing.T) { b, _ := json.Marshal(&group) mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) mdi := pm.database.(*databasemocks.Plugin) diff --git a/internal/privatemessaging/message.go b/internal/privatemessaging/message.go index ca91c52383..826d4092ee 100644 --- a/internal/privatemessaging/message.go +++ b/internal/privatemessaging/message.go @@ -141,7 +141,7 @@ func (s *messageSender) resolveAndSend(ctx context.Context, method sendMethod) e return s.sendInternal(ctx, method) } -func (s *messageSender) resolve(ctx context.Context) ([]*fftypes.Data, error) { +func (s *messageSender) resolve(ctx context.Context) (fftypes.DataArray, error) { // Resolve the sending identity if err := s.mgr.identity.ResolveInputSigningIdentity(ctx, s.msg.Header.Namespace, &s.msg.Header.SignerRef); err != nil { return nil, i18n.WrapError(ctx, err, i18n.MsgAuthorInvalid) diff --git a/internal/privatemessaging/message_test.go b/internal/privatemessaging/message_test.go index 9e3ef7e563..d58f1f642d 100644 --- a/internal/privatemessaging/message_test.go +++ b/internal/privatemessaging/message_test.go @@ -804,7 +804,7 @@ func TestSendDataTransferBlobsFail(t *testing.T) { }, }, }, - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr("{}"), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), }}, diff --git a/internal/privatemessaging/operations_test.go b/internal/privatemessaging/operations_test.go index 78b3f7fc1f..44d114b83c 100644 --- a/internal/privatemessaging/operations_test.go +++ b/internal/privatemessaging/operations_test.go @@ -520,7 +520,7 @@ func TestRunOperationBatchSendInvalidData(t *testing.T) { Group: &fftypes.Group{}, Batch: &fftypes.Batch{ Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ {Value: fftypes.JSONAnyPtr(`!json`)}, }, }, diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index 0cb7e54d0b..a4ec4eb3fb 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -191,7 +191,7 @@ func (pm *privateMessaging) dispatchBatchCommon(ctx context.Context, state *batc return pm.sendData(ctx, tw, nodes, state.Persisted.Manifest) } -func (pm *privateMessaging) transferBlobs(ctx context.Context, data []*fftypes.Data, txid *fftypes.UUID, node *fftypes.Identity) error { +func (pm *privateMessaging) transferBlobs(ctx context.Context, data fftypes.DataArray, txid *fftypes.UUID, node *fftypes.Identity) error { // Send all the blobs associated with this batch for _, d := range data { // We only need to send a blob if there is one, and it's not been uploaded to the shared storage diff --git a/internal/privatemessaging/privatemessaging_test.go b/internal/privatemessaging/privatemessaging_test.go index b609f1aa3c..04c2b2809e 100644 --- a/internal/privatemessaging/privatemessaging_test.go +++ b/internal/privatemessaging/privatemessaging_test.go @@ -193,7 +193,7 @@ func TestDispatchBatchWithBlobs(t *testing.T) { TX: fftypes.TransactionRef{ ID: txID, }, - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ {ID: dataID1, Blob: &fftypes.BlobRef{Hash: blob1}}, }, }, @@ -408,7 +408,7 @@ func TestSendSubmitBlobTransferFail(t *testing.T) { }, }, Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{Hash: blob1}}, }, }, @@ -482,7 +482,7 @@ func TestWriteTransactionSubmitBatchPinFail(t *testing.T) { }, }, Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{Hash: blob1}}, }, }, @@ -502,7 +502,7 @@ func TestTransferBlobsNotFound(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetBlobMatchingHash", pm.ctx, mock.Anything).Return(nil, nil) - err := pm.transferBlobs(pm.ctx, []*fftypes.Data{ + err := pm.transferBlobs(pm.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Blob: &fftypes.BlobRef{Hash: fftypes.NewRandB32()}}, }, fftypes.NewUUID(), newTestNode("node1", newTestOrg("org1"))) assert.Regexp(t, "FF10239", err) @@ -522,7 +522,7 @@ func TestTransferBlobsOpInsertFail(t *testing.T) { mdx.On("TransferBLOB", pm.ctx, mock.Anything, "peer1", "blob/1").Return(nil) mom.On("AddOrReuseOperation", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) - err := pm.transferBlobs(pm.ctx, []*fftypes.Data{ + err := pm.transferBlobs(pm.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Blob: &fftypes.BlobRef{Hash: fftypes.NewRandB32()}}, }, fftypes.NewUUID(), newTestNode("node1", newTestOrg("org1"))) assert.Regexp(t, "pop", err) diff --git a/internal/syncasync/sync_async_bridge_test.go b/internal/syncasync/sync_async_bridge_test.go index 93d212c5a0..416d5f1d96 100644 --- a/internal/syncasync/sync_async_bridge_test.go +++ b/internal/syncasync/sync_async_bridge_test.go @@ -69,7 +69,7 @@ func TestRequestReplyOk(t *testing.T) { } mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) @@ -117,7 +117,7 @@ func TestAwaitConfirmationOk(t *testing.T) { } mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) @@ -163,7 +163,7 @@ func TestAwaitConfirmationRejected(t *testing.T) { } mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) diff --git a/mocks/databasemocks/plugin.go b/mocks/databasemocks/plugin.go index b7b75d9454..7c25ec42a9 100644 --- a/mocks/databasemocks/plugin.go +++ b/mocks/databasemocks/plugin.go @@ -584,15 +584,15 @@ func (_m *Plugin) GetContractListeners(ctx context.Context, filter database.Filt } // GetData provides a mock function with given fields: ctx, filter -func (_m *Plugin) GetData(ctx context.Context, filter database.Filter) ([]*fftypes.Data, *database.FilterResult, error) { +func (_m *Plugin) GetData(ctx context.Context, filter database.Filter) (fftypes.DataArray, *database.FilterResult, error) { ret := _m.Called(ctx, filter) - var r0 []*fftypes.Data - if rf, ok := ret.Get(0).(func(context.Context, database.Filter) []*fftypes.Data); ok { + var r0 fftypes.DataArray + if rf, ok := ret.Get(0).(func(context.Context, database.Filter) fftypes.DataArray); ok { r0 = rf(ctx, filter) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Data) + r0 = ret.Get(0).(fftypes.DataArray) } } diff --git a/mocks/definitionsmocks/definition_handlers.go b/mocks/definitionsmocks/definition_handlers.go index 91b38ec4c4..e4d8b2cdf7 100644 --- a/mocks/definitionsmocks/definition_handlers.go +++ b/mocks/definitionsmocks/definition_handlers.go @@ -95,18 +95,18 @@ func (_m *DefinitionHandlers) GetGroupsNS(ctx context.Context, ns string, filter } // HandleDefinitionBroadcast provides a mock function with given fields: ctx, state, msg, data, tx -func (_m *DefinitionHandlers) HandleDefinitionBroadcast(ctx context.Context, state definitions.DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (definitions.HandlerResult, error) { +func (_m *DefinitionHandlers) HandleDefinitionBroadcast(ctx context.Context, state definitions.DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (definitions.HandlerResult, error) { ret := _m.Called(ctx, state, msg, data, tx) var r0 definitions.HandlerResult - if rf, ok := ret.Get(0).(func(context.Context, definitions.DefinitionBatchState, *fftypes.Message, []*fftypes.Data, *fftypes.UUID) definitions.HandlerResult); ok { + if rf, ok := ret.Get(0).(func(context.Context, definitions.DefinitionBatchState, *fftypes.Message, fftypes.DataArray, *fftypes.UUID) definitions.HandlerResult); ok { r0 = rf(ctx, state, msg, data, tx) } else { r0 = ret.Get(0).(definitions.HandlerResult) } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, definitions.DefinitionBatchState, *fftypes.Message, []*fftypes.Data, *fftypes.UUID) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, definitions.DefinitionBatchState, *fftypes.Message, fftypes.DataArray, *fftypes.UUID) error); ok { r1 = rf(ctx, state, msg, data, tx) } else { r1 = ret.Error(1) diff --git a/mocks/eventsmocks/plugin.go b/mocks/eventsmocks/plugin.go index 1288aaa2e7..b8a8f9f1a3 100644 --- a/mocks/eventsmocks/plugin.go +++ b/mocks/eventsmocks/plugin.go @@ -36,11 +36,11 @@ func (_m *Plugin) Capabilities() *events.Capabilities { } // DeliveryRequest provides a mock function with given fields: connID, sub, event, data -func (_m *Plugin) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (_m *Plugin) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { ret := _m.Called(connID, sub, event, data) var r0 error - if rf, ok := ret.Get(0).(func(string, *fftypes.Subscription, *fftypes.EventDelivery, []*fftypes.Data) error); ok { + if rf, ok := ret.Get(0).(func(string, *fftypes.Subscription, *fftypes.EventDelivery, fftypes.DataArray) error); ok { r0 = rf(connID, sub, event, data) } else { r0 = ret.Error(0) diff --git a/mocks/eventsmocks/plugin_all.go b/mocks/eventsmocks/plugin_all.go index 550385038e..d85005ea66 100644 --- a/mocks/eventsmocks/plugin_all.go +++ b/mocks/eventsmocks/plugin_all.go @@ -41,11 +41,11 @@ func (_m *PluginAll) ChangeEvent(connID string, ce *fftypes.ChangeEvent) { } // DeliveryRequest provides a mock function with given fields: connID, sub, event, data -func (_m *PluginAll) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (_m *PluginAll) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { ret := _m.Called(connID, sub, event, data) var r0 error - if rf, ok := ret.Get(0).(func(string, *fftypes.Subscription, *fftypes.EventDelivery, []*fftypes.Data) error); ok { + if rf, ok := ret.Get(0).(func(string, *fftypes.Subscription, *fftypes.EventDelivery, fftypes.DataArray) error); ok { r0 = rf(connID, sub, event, data) } else { r0 = ret.Error(0) diff --git a/mocks/orchestratormocks/orchestrator.go b/mocks/orchestratormocks/orchestrator.go index 4a744a50f1..9533a4934b 100644 --- a/mocks/orchestratormocks/orchestrator.go +++ b/mocks/orchestratormocks/orchestrator.go @@ -207,15 +207,15 @@ func (_m *Orchestrator) Events() events.EventManager { } // GetBatchByID provides a mock function with given fields: ctx, ns, id -func (_m *Orchestrator) GetBatchByID(ctx context.Context, ns string, id string) (*fftypes.Batch, error) { +func (_m *Orchestrator) GetBatchByID(ctx context.Context, ns string, id string) (*fftypes.BatchPersisted, error) { ret := _m.Called(ctx, ns, id) - var r0 *fftypes.Batch - if rf, ok := ret.Get(0).(func(context.Context, string, string) *fftypes.Batch); ok { + var r0 *fftypes.BatchPersisted + if rf, ok := ret.Get(0).(func(context.Context, string, string) *fftypes.BatchPersisted); ok { r0 = rf(ctx, ns, id) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*fftypes.Batch) + r0 = ret.Get(0).(*fftypes.BatchPersisted) } } @@ -230,15 +230,15 @@ func (_m *Orchestrator) GetBatchByID(ctx context.Context, ns string, id string) } // GetBatches provides a mock function with given fields: ctx, ns, filter -func (_m *Orchestrator) GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Batch, *database.FilterResult, error) { +func (_m *Orchestrator) GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.BatchPersisted, *database.FilterResult, error) { ret := _m.Called(ctx, ns, filter) - var r0 []*fftypes.Batch - if rf, ok := ret.Get(0).(func(context.Context, string, database.AndFilter) []*fftypes.Batch); ok { + var r0 []*fftypes.BatchPersisted + if rf, ok := ret.Get(0).(func(context.Context, string, database.AndFilter) []*fftypes.BatchPersisted); ok { r0 = rf(ctx, ns, filter) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Batch) + r0 = ret.Get(0).([]*fftypes.BatchPersisted) } } @@ -411,15 +411,15 @@ func (_m *Orchestrator) GetConfigRecords(ctx context.Context, filter database.An } // GetData provides a mock function with given fields: ctx, ns, filter -func (_m *Orchestrator) GetData(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Data, *database.FilterResult, error) { +func (_m *Orchestrator) GetData(ctx context.Context, ns string, filter database.AndFilter) (fftypes.DataArray, *database.FilterResult, error) { ret := _m.Called(ctx, ns, filter) - var r0 []*fftypes.Data - if rf, ok := ret.Get(0).(func(context.Context, string, database.AndFilter) []*fftypes.Data); ok { + var r0 fftypes.DataArray + if rf, ok := ret.Get(0).(func(context.Context, string, database.AndFilter) fftypes.DataArray); ok { r0 = rf(ctx, ns, filter) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Data) + r0 = ret.Get(0).(fftypes.DataArray) } } @@ -645,15 +645,15 @@ func (_m *Orchestrator) GetMessageByIDWithData(ctx context.Context, ns string, i } // GetMessageData provides a mock function with given fields: ctx, ns, id -func (_m *Orchestrator) GetMessageData(ctx context.Context, ns string, id string) ([]*fftypes.Data, error) { +func (_m *Orchestrator) GetMessageData(ctx context.Context, ns string, id string) (fftypes.DataArray, error) { ret := _m.Called(ctx, ns, id) - var r0 []*fftypes.Data - if rf, ok := ret.Get(0).(func(context.Context, string, string) []*fftypes.Data); ok { + var r0 fftypes.DataArray + if rf, ok := ret.Get(0).(func(context.Context, string, string) fftypes.DataArray); ok { r0 = rf(ctx, ns, id) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Data) + r0 = ret.Get(0).(fftypes.DataArray) } } diff --git a/pkg/database/plugin.go b/pkg/database/plugin.go index 7ffd8f968f..2622853a44 100644 --- a/pkg/database/plugin.go +++ b/pkg/database/plugin.go @@ -110,7 +110,7 @@ type iDataCollection interface { GetDataByID(ctx context.Context, id *fftypes.UUID, withValue bool) (message *fftypes.Data, err error) // GetData - Get data - GetData(ctx context.Context, filter Filter) (message []*fftypes.Data, res *FilterResult, err error) + GetData(ctx context.Context, filter Filter) (message fftypes.DataArray, res *FilterResult, err error) // GetDataRefs - Get data references only (no data) GetDataRefs(ctx context.Context, filter Filter) (message fftypes.DataRefs, res *FilterResult, err error) diff --git a/pkg/events/plugin.go b/pkg/events/plugin.go index 15177c24c6..1aceceda2f 100644 --- a/pkg/events/plugin.go +++ b/pkg/events/plugin.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -48,7 +48,7 @@ type Plugin interface { // DeliveryRequest requests delivery of work on a connection, which must later be responded to // Data will only be supplied as non-nil if the subscription is set to include data - DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error + DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error } // ChangeEventListener is an optional interface for delivering database change events, only supported for ephemeral connections diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index c388b05077..730beed3da 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -83,7 +83,7 @@ func pollForUp(t *testing.T, client *resty.Client) { assert.Equal(t, 200, resp.StatusCode()) } -func validateReceivedMessages(ts *testState, client *resty.Client, topic string, msgType fftypes.MessageType, txtype fftypes.TransactionType, count int) (data []*fftypes.Data) { +func validateReceivedMessages(ts *testState, client *resty.Client, topic string, msgType fftypes.MessageType, txtype fftypes.TransactionType, count int) (data fftypes.DataArray) { var group *fftypes.Bytes32 messages := GetMessages(ts.t, client, ts.startTime, msgType, topic, 200) for i, message := range messages { diff --git a/test/e2e/restclient_test.go b/test/e2e/restclient_test.go index 9e818e8a28..1a7e03420b 100644 --- a/test/e2e/restclient_test.go +++ b/test/e2e/restclient_test.go @@ -104,7 +104,7 @@ func GetMessages(t *testing.T, client *resty.Client, startTime time.Time, msgTyp return msgs } -func GetData(t *testing.T, client *resty.Client, startTime time.Time, expectedStatus int) (data []*fftypes.Data) { +func GetData(t *testing.T, client *resty.Client, startTime time.Time, expectedStatus int) (data fftypes.DataArray) { path := urlGetData resp, err := client.R(). SetQueryParam("created", fmt.Sprintf(">%d", startTime.UnixNano())). @@ -115,7 +115,7 @@ func GetData(t *testing.T, client *resty.Client, startTime time.Time, expectedSt return data } -func GetDataForMessage(t *testing.T, client *resty.Client, startTime time.Time, msgID *fftypes.UUID) (data []*fftypes.Data) { +func GetDataForMessage(t *testing.T, client *resty.Client, startTime time.Time, msgID *fftypes.UUID) (data fftypes.DataArray) { path := urlGetMessages path += "/" + msgID.String() + "/data" resp, err := client.R(). From e46b921ea4df8241ced02aa24b6a2e5b177083b3 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Tue, 8 Mar 2022 14:23:49 -0500 Subject: [PATCH 11/11] Message caching worked through e2e Signed-off-by: Peter Broadhurst --- docs/swagger/swagger.yaml | 106 +++++++++++++++ internal/apiserver/route_get_status_pins.go | 42 ++++++ .../apiserver/route_get_status_pins_test.go | 39 ++++++ internal/apiserver/routes.go | 123 ++++++++---------- internal/batch/batch_manager.go | 5 +- internal/batch/batch_manager_test.go | 4 + internal/batch/batch_processor.go | 17 ++- internal/batch/batch_processor_test.go | 25 +++- internal/broadcast/definition.go | 1 + internal/broadcast/manager.go | 3 +- internal/broadcast/message.go | 3 +- internal/data/data_manager.go | 33 ++++- internal/data/data_manager_test.go | 82 ++++++++++-- internal/database/sqlcommon/batch_sql_test.go | 8 +- internal/definitions/definition_handler.go | 17 ++- .../definitions/definition_handler_test.go | 9 ++ internal/events/aggregator.go | 12 +- internal/events/aggregator_test.go | 37 +++--- internal/events/dx_callbacks.go | 2 +- internal/events/persist_batch.go | 13 +- internal/events/token_pool_created.go | 3 +- internal/events/token_pool_created_test.go | 12 +- internal/orchestrator/data_query.go | 4 + internal/orchestrator/data_query_test.go | 10 ++ internal/orchestrator/orchestrator.go | 1 + internal/privatemessaging/message.go | 1 - internal/privatemessaging/privatemessaging.go | 2 +- mocks/datamocks/manager.go | 5 + mocks/orchestratormocks/orchestrator.go | 32 +++++ pkg/fftypes/batch.go | 4 +- pkg/fftypes/batch_test.go | 2 +- pkg/fftypes/data.go | 3 + pkg/fftypes/jsonany.go | 7 + pkg/fftypes/jsonany_test.go | 16 +++ 34 files changed, 551 insertions(+), 132 deletions(-) create mode 100644 internal/apiserver/route_get_status_pins.go create mode 100644 internal/apiserver/route_get_status_pins_test.go diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index 0cc9316e42..8bb1d8fa19 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -9735,5 +9735,111 @@ paths: description: Success default: description: "" + /status/pins: + get: + description: 'TODO: Description' + operationId: getStatusPins + parameters: + - description: Server-side request timeout (millseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 120s + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: batch + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: created + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: dispatched + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: hash + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: index + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: masked + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: sequence + schema: + type: string + - description: Sort field. For multi-field sort use comma separated values (or + multiple query values) with '-' prefix for descending + in: query + name: sort + schema: + type: string + - description: Ascending sort order (overrides all fields in a multi-field sort) + in: query + name: ascending + schema: + type: string + - description: Descending sort order (overrides all fields in a multi-field + sort) + in: query + name: descending + schema: + type: string + - description: 'The number of records to skip (max: 1,000). Unsuitable for bulk + operations' + in: query + name: skip + schema: + type: string + - description: 'The maximum number of records to return (max: 1,000)' + in: query + name: limit + schema: + example: "25" + type: string + - description: Return a total count as well as items (adds extra database processing) + in: query + name: count + schema: + type: string + responses: + "200": + content: + application/json: + schema: + properties: + batch: {} + created: {} + dispatched: + type: boolean + hash: {} + index: + format: int64 + type: integer + masked: + type: boolean + sequence: + format: int64 + type: integer + signer: + type: string + type: object + description: Success + default: + description: "" servers: - url: http://localhost:5000 diff --git a/internal/apiserver/route_get_status_pins.go b/internal/apiserver/route_get_status_pins.go new file mode 100644 index 0000000000..8a7fb88f58 --- /dev/null +++ b/internal/apiserver/route_get_status_pins.go @@ -0,0 +1,42 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "net/http" + + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/internal/oapispec" + "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +var getStatusPins = &oapispec.Route{ + Name: "getStatusPins", + Path: "status/pins", + Method: http.MethodGet, + PathParams: nil, + QueryParams: nil, + FilterFactory: database.PinQueryFactory, + Description: i18n.MsgTBD, + JSONInputValue: nil, + JSONOutputValue: func() interface{} { return []fftypes.Pin{} }, + JSONOutputCodes: []int{http.StatusOK}, + JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { + return filterResult(getOr(r.Ctx).GetPins(r.Ctx, r.Filter)) + }, +} diff --git a/internal/apiserver/route_get_status_pins_test.go b/internal/apiserver/route_get_status_pins_test.go new file mode 100644 index 0000000000..c87f722150 --- /dev/null +++ b/internal/apiserver/route_get_status_pins_test.go @@ -0,0 +1,39 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "net/http/httptest" + "testing" + + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestGetStatusPins(t *testing.T) { + o, r := newTestAPIServer() + req := httptest.NewRequest("GET", "/api/v1/status/pins", nil) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + + o.On("GetPins", mock.Anything, mock.Anything). + Return([]*fftypes.Pin{}, nil, nil) + r.ServeHTTP(res, req) + + assert.Equal(t, 200, res.Result().StatusCode) +} diff --git a/internal/apiserver/routes.go b/internal/apiserver/routes.go index 70026210c5..b90fac023e 100644 --- a/internal/apiserver/routes.go +++ b/internal/apiserver/routes.go @@ -21,36 +21,30 @@ import "github.com/hyperledger/firefly/internal/oapispec" const emptyObjectSchema = `{"type": "object"}` var routes = []*oapispec.Route{ - postNewDatatype, - postNewNamespace, - postNewMessageBroadcast, - postNewMessagePrivate, - postNewMessageRequestReply, - postNodesSelf, - postNewOrganization, - postNewOrganizationSelf, - postNewIdentity, - patchUpdateIdentity, - - postData, - postNewSubscription, - - putSubscription, - + deleteContractListener, deleteSubscription, - getBatchByID, getBatches, + getBlockchainEventByID, + getBlockchainEvents, + getChartHistogram, + getContractAPIByName, + getContractAPIs, + getContractInterface, + getContractInterfaceNameVersion, + getContractInterfaces, + getContractListenerByNameOrID, + getContractListeners, getData, getDataBlob, getDataByID, + getDataMsgs, getDatatypeByName, getDatatypes, - getDataMsgs, getEventByID, getEvents, - getGroups, getGroupByHash, + getGroups, getIdentities, getIdentityByID, getIdentityDID, @@ -59,68 +53,65 @@ var routes = []*oapispec.Route{ getMsgData, getMsgEvents, getMsgOps, - getMsgTxn, getMsgs, - getNetworkOrg, - getNetworkOrgs, - getNetworkNode, - getNetworkNodes, + getMsgTxn, getNamespace, getNamespaces, + getNetworkNode, + getNetworkNodes, + getNetworkOrg, + getNetworkOrgs, getOpByID, getOps, - postOpRetry, getStatus, getStatusBatchManager, + getStatusPins, getSubscriptionByID, getSubscriptions, + getTokenAccountPools, + getTokenAccounts, + getTokenApprovals, + getTokenBalances, + getTokenConnectors, + getTokenPoolByNameOrID, + getTokenPools, + getTokenTransferByID, + getTokenTransfers, + getTxnBlockchainEvents, getTxnByID, getTxnOps, - getTxnBlockchainEvents, - getTxnStatus, getTxns, - getVerifiers, + getTxnStatus, getVerifierByID, - - getChartHistogram, - - postTokenPool, - getTokenPools, - getTokenPoolByNameOrID, - getTokenBalances, - getTokenApprovals, - getTokenAccounts, - getTokenAccountPools, - getTokenTransfers, - getTokenTransferByID, - postTokenMint, - postTokenBurn, - postTokenTransfer, - postTokenApproval, - getTokenConnectors, - - postContractInvoke, - postContractQuery, - - postNewContractInterface, - getContractInterface, - getContractInterfaces, - getContractInterfaceNameVersion, + getVerifiers, + patchUpdateIdentity, + postContractAPIInvoke, + postContractAPIQuery, + postContractInterfaceGenerate, postContractInterfaceInvoke, postContractInterfaceQuery, - postContractInterfaceGenerate, - + postContractInvoke, + postContractQuery, + postData, postNewContractAPI, - getContractAPIByName, - getContractAPIs, - putContractAPI, - postContractAPIInvoke, - postContractAPIQuery, - + postNewContractInterface, postNewContractListener, - getContractListenerByNameOrID, - getContractListeners, - deleteContractListener, - getBlockchainEvents, - getBlockchainEventByID, + postNewDatatype, + postNewIdentity, + postNewMessageBroadcast, + postNewMessagePrivate, + postNewMessageRequestReply, + postNewNamespace, + postNewOrganization, + postNewOrganizationSelf, + postNewSubscription, + postNodesSelf, + postOpRetry, + postTokenApproval, + postTokenBurn, + postTokenMint, + postTokenPool, + postTokenTransfer, + putContractAPI, + putSubscription, } diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index 16f1dded10..d81faa921a 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -158,6 +158,7 @@ func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fft bm.ctx, // Background context, not the call context bm.ni, bm.database, + bm.data, &batchProcessorConf{ DispatcherOptions: dispatcher.options, name: name, @@ -241,7 +242,7 @@ func (bm *batchManager) messageSequencer() { continue } - bm.dispatchMessage(processor, msg, data...) + bm.dispatchMessage(processor, msg, data) } // Next time round only read after the messages we just processed (unless we get a tap to rewind) @@ -302,7 +303,7 @@ func (bm *batchManager) waitForNewMessages() (done bool) { } } -func (bm *batchManager) dispatchMessage(processor *batchProcessor, msg *fftypes.Message, data ...*fftypes.Data) { +func (bm *batchManager) dispatchMessage(processor *batchProcessor, msg *fftypes.Message, data fftypes.DataArray) { l := log.L(bm.ctx) l.Debugf("Dispatching message %s to %s batch processor %s", msg.Header.ID, msg.Header.Type, processor.conf.name) work := &batchWork{ diff --git a/internal/batch/batch_manager_test.go b/internal/batch/batch_manager_test.go index 7914514983..b6caae31de 100644 --- a/internal/batch/batch_manager_test.go +++ b/internal/batch/batch_manager_test.go @@ -99,6 +99,7 @@ func TestE2EDispatchBroadcast(t *testing.T) { Hash: dataHash, } mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{data}, true, nil) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -216,6 +217,7 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { Hash: dataHash, } mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{data}, true, nil) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) // pins @@ -381,6 +383,7 @@ func TestMessageSequencerUpdateMessagesFail(t *testing.T) { }}, }, nil, nil) mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{{ID: dataID}}, true, nil) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() mdi.On("InsertTransaction", mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) // transaction submit mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -444,6 +447,7 @@ func TestMessageSequencerDispatchFail(t *testing.T) { mdi.AssertExpectations(t) mdm.AssertExpectations(t) } + func TestMessageSequencerUpdateBatchFail(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 04afc8045c..34101fe177 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/internal/retry" @@ -74,6 +75,7 @@ type FlushStatus struct { type batchProcessor struct { ctx context.Context ni sysmessaging.LocalNodeInfo + data data.Manager database database.Plugin txHelper txcommon.Helper cancelCtx func() @@ -99,7 +101,7 @@ type DispatchState struct { const batchSizeEstimateBase = int64(512) -func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, conf *batchProcessorConf, baseRetryConf *retry.Retry) *batchProcessor { +func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, dm data.Manager, conf *batchProcessorConf, baseRetryConf *retry.Retry) *batchProcessor { pCtx := log.WithLogField(log.WithLogField(ctx, "d", conf.dispatcherName), "p", conf.name) pCtx, cancelCtx := context.WithCancel(pCtx) bp := &batchProcessor{ @@ -107,6 +109,7 @@ func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di da cancelCtx: cancelCtx, ni: ni, database: di, + data: dm, txHelper: txcommon.NewTransactionHelper(di), newWork: make(chan *batchWork, conf.BatchMaxSize), quescing: make(chan bool, 1), @@ -401,9 +404,11 @@ func (bp *batchProcessor) initFlushState(id *fftypes.UUID, flushWork []*batchWor } for _, w := range flushWork { if w.msg != nil { + w.msg.BatchID = id state.Payload.Messages = append(state.Payload.Messages, w.msg.BatchMessage()) } for _, d := range w.data { + log.L(bp.ctx).Debugf("Adding data '%s' to batch for message '%s'", d.ID, w.msg.Header.ID) state.Payload.Data = append(state.Payload.Data, d.BatchData(state.Persisted.Type)) } } @@ -505,11 +510,14 @@ func (bp *batchProcessor) sealBatch(state *DispatchState) (err error) { if state.Persisted.TX.ID, err = bp.txHelper.SubmitNewTransaction(ctx, state.Persisted.Namespace, bp.conf.txType); err != nil { return err } + state.Payload.TX = state.Persisted.TX + state.Manifest.TX = state.Persisted.TX // The hash of the batch, is the hash of the manifest to minimize the compute cost. // Note in v0.13 and before, it was the hash of the payload - so the inbound route has a fallback to accepting the full payload hash - state.Persisted.Manifest = state.Manifest.String() - state.Persisted.Hash = fftypes.HashString(state.Persisted.Manifest) + manifestString := state.Manifest.String() + state.Persisted.Manifest = fftypes.JSONAnyPtr(manifestString) + state.Persisted.Hash = fftypes.HashString(manifestString) log.L(ctx).Debugf("Batch %s sealed. Hash=%s", state.Persisted.ID, state.Persisted.Hash) @@ -536,6 +544,9 @@ func (bp *batchProcessor) markPayloadDispatched(state *DispatchState) error { msgIDs := make([]driver.Value, len(state.Payload.Messages)) for i, msg := range state.Payload.Messages { msgIDs[i] = msg.Header.ID + // We don't want to have to read the DB again if we want to query for the batch ID, or pins, + // so ensure the copy in our cache gets updated. + bp.data.UpdateMessageIfCached(ctx, msg) } fb := database.MessageQueryFactory.NewFilter(ctx) filter := fb.And( diff --git a/internal/batch/batch_processor_test.go b/internal/batch/batch_processor_test.go index e22256a694..136b89e310 100644 --- a/internal/batch/batch_processor_test.go +++ b/internal/batch/batch_processor_test.go @@ -24,6 +24,7 @@ import ( "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/retry" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/sysmessagingmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" "github.com/hyperledger/firefly/pkg/fftypes" @@ -34,8 +35,9 @@ import ( func newTestBatchProcessor(dispatch DispatchHandler) (*databasemocks.Plugin, *batchProcessor) { mdi := &databasemocks.Plugin{} mni := &sysmessagingmocks.LocalNodeInfo{} + mdm := &datamocks.Manager{} mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()).Maybe() - bp := newBatchProcessor(context.Background(), mni, mdi, &batchProcessorConf{ + bp := newBatchProcessor(context.Background(), mni, mdi, mdm, &batchProcessorConf{ namespace: "ns1", txType: fftypes.TransactionTypeBatchPin, signer: fftypes.SignerRef{Author: "did:firefly:org/abcd", Key: "0x12345"}, @@ -75,11 +77,13 @@ func TestUnfilledBatch(t *testing.T) { mockRunAsGroupPassthrough(mdi) mdi.On("UpdateMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) - mdi.On("UpdateBatch", mock.Anything, mock.Anything).Return(nil) mth := bp.txHelper.(*txcommonmocks.Helper) mth.On("SubmitNewTransaction", mock.Anything, "ns1", fftypes.TransactionTypeBatchPin).Return(fftypes.NewUUID(), nil) + mdm := bp.data.(*datamocks.Manager) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() + // Dispatch the work go func() { for i := 0; i < 5; i++ { @@ -98,6 +102,10 @@ func TestUnfilledBatch(t *testing.T) { bp.cancelCtx() <-bp.done + + mdm.AssertExpectations(t) + mdi.AssertExpectations(t) + mth.AssertExpectations(t) } func TestBatchSizeOverflow(t *testing.T) { @@ -113,11 +121,13 @@ func TestBatchSizeOverflow(t *testing.T) { mockRunAsGroupPassthrough(mdi) mdi.On("UpdateMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) - mdi.On("UpdateBatch", mock.Anything, mock.Anything).Return(nil) mth := bp.txHelper.(*txcommonmocks.Helper) mth.On("SubmitNewTransaction", mock.Anything, "ns1", fftypes.TransactionTypeBatchPin).Return(fftypes.NewUUID(), nil) + mdm := bp.data.(*datamocks.Manager) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() + // Dispatch the work msgIDs := []*fftypes.UUID{fftypes.NewUUID(), fftypes.NewUUID()} go func() { @@ -140,6 +150,10 @@ func TestBatchSizeOverflow(t *testing.T) { bp.cancelCtx() <-bp.done + + mdi.AssertExpectations(t) + mth.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestCloseToUnblockDispatch(t *testing.T) { @@ -314,6 +328,9 @@ func TestMarkMessageDispatchedUnpinnedOK(t *testing.T) { mth := bp.txHelper.(*txcommonmocks.Helper) mth.On("SubmitNewTransaction", mock.Anything, "ns1", fftypes.TransactionTypeUnpinned).Return(fftypes.NewUUID(), nil) + mdm := bp.data.(*datamocks.Manager) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() + // Dispatch the work go func() { for i := 0; i < 5; i++ { @@ -334,6 +351,8 @@ func TestMarkMessageDispatchedUnpinnedOK(t *testing.T) { <-bp.done mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + mth.AssertExpectations(t) } func TestMaskContextsDuplicate(t *testing.T) { diff --git a/internal/broadcast/definition.go b/internal/broadcast/definition.go index 3a2e36a4a6..f45434bc99 100644 --- a/internal/broadcast/definition.go +++ b/internal/broadcast/definition.go @@ -98,6 +98,7 @@ func (bm *broadcastManager) broadcastDefinitionCommon(ctx context.Context, ns st namespace: ns, msg: in, resolved: true, + data: fftypes.DataArray{data}, } sender.setDefaults() if waitConfirm { diff --git a/internal/broadcast/manager.go b/internal/broadcast/manager.go index b5519ff910..64a284c62f 100644 --- a/internal/broadcast/manager.go +++ b/internal/broadcast/manager.go @@ -139,7 +139,8 @@ func (bm *broadcastManager) dispatchBatch(ctx context.Context, state *batch.Disp if err := bm.operations.RunOperation(ctx, opBatchBroadcast(op, batch)); err != nil { return err } - log.L(ctx).Infof("Pinning broadcast batch %s with author=%s key=%s", batch.ID, batch.Author, batch.Key) + state.Persisted.PayloadRef = batch.PayloadRef + log.L(ctx).Infof("Pinning broadcast batch %s with author=%s key=%s payload=%s", batch.ID, batch.Author, batch.Key, state.Persisted.PayloadRef) return bm.batchpin.SubmitPinnedBatch(ctx, &state.Persisted, state.Pins) } diff --git a/internal/broadcast/message.go b/internal/broadcast/message.go index 3cf4d2db31..eb09557f0a 100644 --- a/internal/broadcast/message.go +++ b/internal/broadcast/message.go @@ -170,8 +170,7 @@ func (s *broadcastSender) sendInternal(ctx context.Context, method sendMethod) ( return err } s.mgr.data.UpdateMessageCache(&s.msg.Message, s.data) - s.data = nil // no need to keep hold of this - log.L(ctx).Infof("Sent broadcast message %s:%s sequence=%d", s.msg.Header.Namespace, s.msg.Header.ID, s.msg.Sequence) + log.L(ctx).Infof("Sent broadcast message %s:%s sequence=%d datacount=%d", s.msg.Header.Namespace, s.msg.Header.ID, s.msg.Sequence, len(s.data)) return err } diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index 70e7f32626..91dd15ac0d 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -18,7 +18,6 @@ package data import ( "context" - "encoding/json" "fmt" "io" "time" @@ -39,6 +38,7 @@ type Manager interface { GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data fftypes.DataArray, foundAllData bool, err error) GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data fftypes.DataArray, foundAll bool, err error) UpdateMessageCache(msg *fftypes.Message, data fftypes.DataArray) + UpdateMessageIfCached(ctx context.Context, msg *fftypes.Message) ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataArray, error) ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataArray, []*fftypes.DataAndBlob, error) VerifyNamespaceExists(ctx context.Context, ns string) error @@ -74,6 +74,8 @@ type CacheReadOption int const ( CRORequirePublicBlobRefs CacheReadOption = iota + CRORequirePins + CRORequireBatchID ) func NewDataManager(ctx context.Context, di database.Plugin, pi sharedstorage.Plugin, dx dataexchange.Plugin) (Manager, error) { @@ -203,13 +205,24 @@ func (dm *dataManager) queryMessageCache(ctx context.Context, id *fftypes.UUID, } mce := cached.Value().(*messageCacheEntry) for _, opt := range options { - if opt == CRORequirePublicBlobRefs { + switch opt { + case CRORequirePublicBlobRefs: for idx, d := range mce.data { if d.Blob != nil && d.Blob.Public == "" { log.L(ctx).Debugf("Cache entry for data %d (%s) in message %s is missing public blob ref", idx, d.ID, mce.msg.Header.ID) return nil } } + case CRORequirePins: + if len(mce.msg.Header.Topics) != len(mce.msg.Pins) { + log.L(ctx).Debugf("Cache entry for message %s is missing pins", mce.msg.Header.ID) + return nil + } + case CRORequireBatchID: + if mce.msg.BatchID == nil { + log.L(ctx).Debugf("Cache entry for message %s is missing batch ID", mce.msg.Header.ID) + return nil + } } } log.L(ctx).Debugf("Returning msg %s from cache", id) @@ -227,6 +240,16 @@ func (dm *dataManager) UpdateMessageCache(msg *fftypes.Message, data fftypes.Dat dm.messageCache.Set(msg.Header.ID.String(), cacheEntry, dm.messageCacheTTL) } +// UpdateMessageIfCached is used in order to notify the fields of a message that are not initially filled in, have been filled in. +// It does not guarantee the cache is up to date, and the CacheReadOptions should be used to check you have the updated data. +// But calling this should reduce the possiblity of the CROs missing +func (dm *dataManager) UpdateMessageIfCached(ctx context.Context, msg *fftypes.Message) { + mce := dm.queryMessageCache(ctx, msg.Header.ID) + if mce != nil { + dm.UpdateMessageCache(msg, mce.data) + } +} + func (dm *dataManager) getMessageData(ctx context.Context, msg *fftypes.Message) (data fftypes.DataArray, foundAll bool, err error) { // Load all the data - must all be present for us to send data = make(fftypes.DataArray, 0, len(msg.Data)) @@ -405,19 +428,19 @@ func (dm *dataManager) resolveInlineData(ctx context.Context, ns string, inData if d == nil { return nil, nil, i18n.NewError(ctx, i18n.MsgDataReferenceUnresolvable, i) } - data[i] = d if blob, err = dm.resolveBlob(ctx, d.Blob); err != nil { return nil, nil, err } case dataOrValue.Value != nil || dataOrValue.Blob != nil: // We've got a Value, so we can validate + store it - if data[i], blob, err = dm.validateAndStoreInlined(ctx, ns, dataOrValue); err != nil { + if d, blob, err = dm.validateAndStoreInlined(ctx, ns, dataOrValue); err != nil { return nil, nil, err } default: // We have nothing - this must be a mistake return nil, nil, i18n.NewError(ctx, i18n.MsgDataMissing, i) } + data[i] = d // If the data is being resolved for public broadcast, and there is a blob attachment, that blob // needs to be published by our calller @@ -435,7 +458,7 @@ func (dm *dataManager) resolveInlineData(ctx context.Context, ns string, inData func (dm *dataManager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) { var manifest fftypes.BatchManifest - err := json.Unmarshal([]byte(persistedBatch.Manifest), &manifest) + err := persistedBatch.Manifest.Unmarshal(ctx, &manifest) if err != nil { return nil, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, fmt.Sprintf("batch %s manifest", persistedBatch.ID)) } diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index c4b738377e..75de334014 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -701,9 +701,9 @@ func TestHydrateBatchOK(t *testing.T) { ID: batchID, Namespace: "ns1", }, - Manifest: fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, + Manifest: fftypes.JSONAnyPtr(fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, batchID, msgID, msgHash, dataID, dataHash, - ), + )), TX: fftypes.TransactionRef{ ID: fftypes.NewUUID(), }, @@ -731,7 +731,7 @@ func TestHydrateBatchOK(t *testing.T) { assert.Equal(t, dataID, batch.Payload.Data[0].ID) assert.Equal(t, dataHash, batch.Payload.Data[0].Hash) assert.Equal(t, dataHash, batch.Payload.Data[0].Hash) - assert.Nil(t, batch.Payload.Data[0].Created) + assert.NotNil(t, batch.Payload.Data[0].Created) mdi.AssertExpectations(t) } @@ -751,9 +751,9 @@ func TestHydrateBatchDataFail(t *testing.T) { ID: batchID, Namespace: "ns1", }, - Manifest: fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, + Manifest: fftypes.JSONAnyPtr(fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, batchID, msgID, msgHash, dataID, dataHash, - ), + )), TX: fftypes.TransactionRef{ ID: fftypes.NewUUID(), }, @@ -788,9 +788,9 @@ func TestHydrateBatchMsgNotFound(t *testing.T) { ID: batchID, Namespace: "ns1", }, - Manifest: fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, + Manifest: fftypes.JSONAnyPtr(fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, batchID, msgID, msgHash, dataID, dataHash, - ), + )), TX: fftypes.TransactionRef{ ID: fftypes.NewUUID(), }, @@ -810,7 +810,7 @@ func TestHydrateBatchMsgBadManifest(t *testing.T) { defer cancel() bp := &fftypes.BatchPersisted{ - Manifest: `!json`, + Manifest: fftypes.JSONAnyPtr(`!json`), } _, err := dm.HydrateBatch(ctx, bp) @@ -928,3 +928,69 @@ func TestGetMessageWithDataReadMessageFail(t *testing.T) { mdi.AssertExpectations(t) } + +func TestUpdateMessageCacheCRORequirePins(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + data := fftypes.DataArray{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, + } + msgNoPins := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Topics: fftypes.FFStringArray{"topic1"}, + }, + Data: data.Refs(), + } + msgWithPins := &fftypes.Message{ + Header: msgNoPins.Header, + Data: data.Refs(), + Pins: fftypes.FFStringArray{"pin1"}, + } + + dm.UpdateMessageCache(msgNoPins, data) + + mce := dm.queryMessageCache(ctx, msgNoPins.Header.ID, CRORequirePins) + assert.Nil(t, mce) + + dm.UpdateMessageIfCached(ctx, msgWithPins) + for mce == nil { + mce = dm.queryMessageCache(ctx, msgNoPins.Header.ID, CRORequirePins) + } + +} + +func TestUpdateMessageCacheCRORequireBatchID(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + data := fftypes.DataArray{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, + } + msgNoPins := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Topics: fftypes.FFStringArray{"topic1"}, + }, + Data: data.Refs(), + } + msgWithBatch := &fftypes.Message{ + Header: msgNoPins.Header, + Data: data.Refs(), + BatchID: fftypes.NewUUID(), + } + + dm.UpdateMessageCache(msgNoPins, data) + + mce := dm.queryMessageCache(ctx, msgNoPins.Header.ID, CRORequireBatchID) + assert.Nil(t, mce) + + dm.UpdateMessageIfCached(ctx, msgWithBatch) + for mce == nil { + mce = dm.queryMessageCache(ctx, msgNoPins.Header.ID, CRORequireBatchID) + } + +} diff --git a/internal/database/sqlcommon/batch_sql_test.go b/internal/database/sqlcommon/batch_sql_test.go index e8ec1fb8e1..8841ea884d 100644 --- a/internal/database/sqlcommon/batch_sql_test.go +++ b/internal/database/sqlcommon/batch_sql_test.go @@ -54,11 +54,11 @@ func TestBatch2EWithDB(t *testing.T) { TX: fftypes.TransactionRef{ Type: fftypes.TransactionTypeUnpinned, }, - Manifest: (&fftypes.BatchManifest{ + Manifest: fftypes.JSONAnyPtr((&fftypes.BatchManifest{ Messages: []*fftypes.MessageManifestEntry{ {MessageRef: fftypes.MessageRef{ID: msgID1}}, }, - }).String(), + }).String()), } s.callbacks.On("UUIDCollectionNSEvent", database.CollectionBatches, fftypes.ChangeEventTypeCreated, "ns1", batchID, mock.Anything).Return() @@ -97,12 +97,12 @@ func TestBatch2EWithDB(t *testing.T) { ID: txid, Type: fftypes.TransactionTypeBatchPin, }, - Manifest: (&fftypes.BatchManifest{ + Manifest: fftypes.JSONAnyPtr((&fftypes.BatchManifest{ Messages: []*fftypes.MessageManifestEntry{ {MessageRef: fftypes.MessageRef{ID: msgID1}}, {MessageRef: fftypes.MessageRef{ID: msgID2}}, }, - }).String(), + }).String()), PayloadRef: payloadRef, Confirmed: fftypes.Now(), } diff --git a/internal/definitions/definition_handler.go b/internal/definitions/definition_handler.go index a3d9472e08..99543ab4c5 100644 --- a/internal/definitions/definition_handler.go +++ b/internal/definitions/definition_handler.go @@ -63,6 +63,21 @@ const ( ActionWait ) +func (dma DefinitionMessageAction) String() string { + switch dma { + case ActionReject: + return "reject" + case ActionConfirm: + return "confirm" + case ActionRetry: + return "retry" + case ActionWait: + return "wait" + default: + return "unknown" + } +} + // DefinitionBatchState tracks the state between definition handlers that run in-line on the pin processing route in the // aggregator as part of a batch of pins. They might have complex API calls, and interdependencies, that need to be managed via this state. // The actions to be taken at the end of a definition batch. @@ -122,7 +137,7 @@ func (dh *definitionHandlers) EnsureLocalGroup(ctx context.Context, group *fftyp func (dh *definitionHandlers) HandleDefinitionBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (msgAction HandlerResult, err error) { l := log.L(ctx) - l.Infof("Confirming system definition broadcast '%s' [%s]", msg.Header.Tag, msg.Header.ID) + l.Infof("Processing system definition broadcast '%s' [%s]", msg.Header.Tag, msg.Header.ID) switch msg.Header.Tag { case fftypes.SystemTagDefineDatatype: return dh.handleDatatypeBroadcast(ctx, state, msg, data, tx) diff --git a/internal/definitions/definition_handler_test.go b/internal/definitions/definition_handler_test.go index 96ec4a0e50..35c7a96272 100644 --- a/internal/definitions/definition_handler_test.go +++ b/internal/definitions/definition_handler_test.go @@ -18,6 +18,7 @@ package definitions import ( "context" + "fmt" "testing" "github.com/hyperledger/firefly/mocks/assetmocks" @@ -129,3 +130,11 @@ func TestPrivateMessagingPassthroughs(t *testing.T) { mpm.AssertExpectations(t) } + +func TestActionEnum(t *testing.T) { + assert.Equal(t, "confirm", fmt.Sprintf("%s", ActionConfirm)) + assert.Equal(t, "reject", fmt.Sprintf("%s", ActionReject)) + assert.Equal(t, "retry", fmt.Sprintf("%s", ActionRetry)) + assert.Equal(t, "wait", fmt.Sprintf("%s", ActionWait)) + assert.Equal(t, "unknown", fmt.Sprintf("%s", DefinitionMessageAction(999))) +} diff --git a/internal/events/aggregator.go b/internal/events/aggregator.go index f52a8e8014..539ca72265 100644 --- a/internal/events/aggregator.go +++ b/internal/events/aggregator.go @@ -20,7 +20,6 @@ import ( "context" "crypto/sha256" "database/sql/driver" - "encoding/json" "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/data" @@ -210,7 +209,7 @@ func (ag *aggregator) extractBatchMessagePin(manifest *fftypes.BatchManifest, re func (ag *aggregator) migrateManifest(ctx context.Context, persistedBatch *fftypes.BatchPersisted) *fftypes.BatchManifest { // In version v0.13.x and earlier, we stored the full batch var fullPayload fftypes.BatchPayload - err := json.Unmarshal([]byte(persistedBatch.Manifest), &fullPayload) + err := persistedBatch.Manifest.Unmarshal(ctx, &fullPayload) if err != nil { log.L(ctx).Errorf("Invalid migration persisted batch: %s", err) return nil @@ -228,7 +227,7 @@ func (ag *aggregator) migrateManifest(ctx context.Context, persistedBatch *fftyp func (ag *aggregator) extractManifest(ctx context.Context, batch *fftypes.BatchPersisted) *fftypes.BatchManifest { var manifest fftypes.BatchManifest - err := json.Unmarshal([]byte(batch.Manifest), &manifest) + err := batch.Manifest.Unmarshal(ctx, &manifest) if err != nil { log.L(ctx).Errorf("Invalid manifest: %s", err) return nil @@ -338,7 +337,11 @@ func (ag *aggregator) checkOnchainConsistency(ctx context.Context, msg *fftypes. func (ag *aggregator) processMessage(ctx context.Context, manifest *fftypes.BatchManifest, pin *fftypes.Pin, msgBaseIndex int64, msgEntry *fftypes.MessageManifestEntry, state *batchState) (err error) { l := log.L(ctx) - msg, data, dataAvailable, err := ag.data.GetMessageWithDataCached(ctx, msgEntry.ID) + var cros []data.CacheReadOption + if pin.Masked { + cros = []data.CacheReadOption{data.CRORequirePins} + } + msg, data, dataAvailable, err := ag.data.GetMessageWithDataCached(ctx, msgEntry.ID, cros...) if err != nil { return err } @@ -445,6 +448,7 @@ func (ag *aggregator) attemptMessageDispatch(ctx context.Context, msg *fftypes.M // We handle definition events in-line on the aggregator, as it would be confusing for apps to be // dispatched subsequent events before we have processed the definition events they depend on. handlerResult, err := ag.definitions.HandleDefinitionBroadcast(ctx, state, msg, data, tx) + log.L(ctx).Infof("Result of definition broadcast '%s' [%s]: %s", msg.Header.Tag, msg.Header.ID, handlerResult.Action) if handlerResult.Action == definitions.ActionRetry { return false, err } diff --git a/internal/events/aggregator_test.go b/internal/events/aggregator_test.go index 249080dff2..fe338608bb 100644 --- a/internal/events/aggregator_test.go +++ b/internal/events/aggregator_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/definitions" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/mocks/blockchainmocks" @@ -206,7 +207,7 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { return *np.Hash == *member2NonceOne && np.Nonce == 1 })).Return(nil).Once() // Validate the message is ok - mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePins).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) // Insert the confirmed event mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { @@ -329,7 +330,7 @@ func TestAggregationMaskedNextSequenceMatch(t *testing.T) { {Context: contextUnmasked, Identity: member2org.DID, Hash: member2Nonce500, Nonce: 500, Sequence: 424}, }, nil, nil).Once() // Validate the message is ok - mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePins).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) // Insert the confirmed event mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { @@ -512,7 +513,7 @@ func TestAggregationMigratedBroadcast(t *testing.T) { bp := &fftypes.BatchPersisted{ TX: batch.Payload.TX, BatchHeader: batch.BatchHeader, - Manifest: string(payloadBinary), + Manifest: fftypes.JSONAnyPtr(string(payloadBinary)), } // Get the batch @@ -593,7 +594,7 @@ func TestAggregationMigratedBroadcastNilMessageID(t *testing.T) { bp := &fftypes.BatchPersisted{ TX: batch.Payload.TX, BatchHeader: batch.BatchHeader, - Manifest: string(payloadBinary), + Manifest: fftypes.JSONAnyPtr(string(payloadBinary)), } mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) @@ -657,7 +658,7 @@ func TestAggregationMigratedBroadcastInvalid(t *testing.T) { bp := &fftypes.BatchPersisted{ TX: batch.Payload.TX, BatchHeader: batch.BatchHeader, - Manifest: "{}", + Manifest: fftypes.JSONAnyPtr("{}"), } mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) @@ -893,7 +894,7 @@ func TestProcessMsgFailData(t *testing.T) { defer cancel() mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(nil, nil, false, fmt.Errorf("pop")) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(nil, nil, false, fmt.Errorf("pop")) err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{}, nil) assert.Regexp(t, "pop", err) @@ -906,7 +907,7 @@ func TestProcessMsgFailMissingData(t *testing.T) { defer cancel() mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(&fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}}, nil, false, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(&fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}}, nil, false, nil) err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{}, nil) assert.NoError(t, err) @@ -919,7 +920,7 @@ func TestProcessMsgFailMissingGroup(t *testing.T) { defer cancel() mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(&fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}}, nil, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(&fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}}, nil, true, nil) err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{}, nil) assert.NoError(t, err) @@ -942,7 +943,7 @@ func TestProcessMsgFailBadPin(t *testing.T) { } mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(msg, nil, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(msg, nil, true, nil) err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{ MessageRef: fftypes.MessageRef{ @@ -974,7 +975,7 @@ func TestProcessMsgFailGetNextPins(t *testing.T) { } mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(msg, nil, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(msg, nil, true, nil) err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{ MessageRef: fftypes.MessageRef{ @@ -1060,7 +1061,7 @@ func TestProcessMsgFailPinUpdate(t *testing.T) { mdi.On("GetNextPins", ag.ctx, mock.Anything).Return([]*fftypes.NextPin{ {Context: fftypes.NewRandB32(), Hash: pin, Identity: org1.DID}, }, nil, nil) - mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(msg, nil, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(msg, nil, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(false, nil) mdi.On("InsertEvent", ag.ctx, mock.Anything).Return(nil) mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(nil) @@ -1601,8 +1602,8 @@ func TestDispatchPrivateQueuesLaterDispatch(t *testing.T) { mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID).Return(msg1, fftypes.DataArray{}, true, nil).Once() - mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID).Return(msg2, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID, data.CRORequirePins).Return(msg1, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID, data.CRORequirePins).Return(msg2, fftypes.DataArray{}, true, nil).Once() initNPG := &nextPinGroupState{topic: "topic1", groupID: groupID} member1NonceOne := initNPG.calcPinHash("org1", 1) @@ -1643,8 +1644,8 @@ func TestDispatchPrivateNextPinIncremented(t *testing.T) { mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID).Return(msg1, fftypes.DataArray{}, true, nil).Once() - mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID).Return(msg2, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID, data.CRORequirePins).Return(msg1, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID, data.CRORequirePins).Return(msg2, fftypes.DataArray{}, true, nil).Once() mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) initNPG := &nextPinGroupState{topic: "topic1", groupID: groupID} @@ -2157,7 +2158,7 @@ func TestExtractManifestFail(t *testing.T) { defer cancel() manifest := ag.extractManifest(ag.ctx, &fftypes.BatchPersisted{ - Manifest: "!wrong", + Manifest: fftypes.JSONAnyPtr("!wrong"), }) assert.Nil(t, manifest) @@ -2168,7 +2169,7 @@ func TestExtractManifestBadVersion(t *testing.T) { defer cancel() manifest := ag.extractManifest(ag.ctx, &fftypes.BatchPersisted{ - Manifest: `{"version":999}`, + Manifest: fftypes.JSONAnyPtr(`{"version":999}`), }) assert.Nil(t, manifest) @@ -2179,7 +2180,7 @@ func TestMigrateManifestFail(t *testing.T) { defer cancel() manifest := ag.migrateManifest(ag.ctx, &fftypes.BatchPersisted{ - Manifest: "!wrong", + Manifest: fftypes.JSONAnyPtr("!wrong"), }) assert.Nil(t, manifest) diff --git a/internal/events/dx_callbacks.go b/internal/events/dx_callbacks.go index 34d3104d48..8ddea4f4e7 100644 --- a/internal/events/dx_callbacks.go +++ b/internal/events/dx_callbacks.go @@ -140,7 +140,7 @@ func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch return err } } - manifest = persistedBatch.Manifest + manifest = persistedBatch.Manifest.String() return nil }) }) diff --git a/internal/events/persist_batch.go b/internal/events/persist_batch.go index 6673a92de6..c356bf4402 100644 --- a/internal/events/persist_batch.go +++ b/internal/events/persist_batch.go @@ -55,7 +55,7 @@ func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, bat // Set confirmed on the batch (the messages should not be confirmed at this point - that's the aggregator's job) persistedBatch, _ = batch.Confirmed() - manifestHash := fftypes.HashString(persistedBatch.Manifest) + manifestHash := fftypes.HashString(persistedBatch.Manifest.String()) // Verify the hash calculation. if !manifestHash.Equals(batch.Hash) { @@ -110,7 +110,7 @@ func (em *eventManager) persistBatchContent(ctx context.Context, batch *fftypes. for di, dataRef := range msg.Data { msgData[di] = dataByID[*dataRef.ID] if msgData[di] == nil || !msgData[di].Hash.Equals(dataRef.Hash) { - log.L(ctx).Errorf("Message '%s' in batch '%s' - data not in-line in batch id='%s' hash='%s'", msg.Header.ID, batch.ID, dataRef.ID, dataRef.Hash) + log.L(ctx).Debugf("Message '%s' in batch '%s' - data not in-line in batch id='%s' hash='%s'", msg.Header.ID, batch.ID, dataRef.ID, dataRef.Hash) dataInBatch = false break } @@ -175,9 +175,12 @@ func (em *eventManager) persistReceivedData(ctx context.Context /* db TX context } func (em *eventManager) persistBatchMessage(ctx context.Context /* db TX context*/, batch *fftypes.Batch, i int, msg *fftypes.Message, optimization database.UpsertOptimization) (bool, error) { - if msg != nil && (msg.Header.Author != batch.Author || msg.Header.Key != batch.Key) { - log.L(ctx).Errorf("Mismatched key/author '%s'/'%s' on message entry %d in batch '%s'", msg.Header.Key, msg.Header.Author, i, batch.ID) - return false, nil // skip entry + if msg != nil { + if msg.Header.Author != batch.Author || msg.Header.Key != batch.Key { + log.L(ctx).Errorf("Mismatched key/author '%s'/'%s' on message entry %d in batch '%s'", msg.Header.Key, msg.Header.Author, i, batch.ID) + return false, nil // skip entry + } + msg.BatchID = batch.ID } return em.persistReceivedMessage(ctx, i, msg, "batch", batch.ID, optimization) diff --git a/internal/events/token_pool_created.go b/internal/events/token_pool_created.go index b5b4cf9957..e3d021586b 100644 --- a/internal/events/token_pool_created.go +++ b/internal/events/token_pool_created.go @@ -19,6 +19,7 @@ package events import ( "context" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/blockchain" @@ -133,7 +134,7 @@ func (em *eventManager) TokenPoolCreated(ti tokens.Plugin, pool *tokens.TokenPoo if existingPool.State == fftypes.TokenPoolStateConfirmed { return nil // already confirmed } - if msg, err := em.database.GetMessageByID(ctx, existingPool.Message); err != nil { + if msg, _, _, err := em.data.GetMessageWithDataCached(ctx, existingPool.Message, data.CRORequireBatchID); err != nil { return err } else if msg != nil { batchID = msg.BatchID // trigger rewind after completion of database transaction diff --git a/internal/events/token_pool_created_test.go b/internal/events/token_pool_created_test.go index 70ed361d33..d57d9f3d44 100644 --- a/internal/events/token_pool_created_test.go +++ b/internal/events/token_pool_created_test.go @@ -20,9 +20,11 @@ import ( "fmt" "testing" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/mocks/assetmocks" "github.com/hyperledger/firefly/mocks/broadcastmocks" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/tokenmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" "github.com/hyperledger/firefly/pkg/blockchain" @@ -93,6 +95,7 @@ func TestTokenPoolCreatedConfirm(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mti := &tokenmocks.Plugin{} mth := em.txHelper.(*txcommonmocks.Helper) + mdm := em.data.(*datamocks.Manager) opID := fftypes.NewUUID() txID := fftypes.NewUUID() @@ -140,13 +143,14 @@ func TestTokenPoolCreatedConfirm(t *testing.T) { mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { return e.Type == fftypes.EventTypePoolConfirmed && *e.Reference == *storedPool.ID })).Return(nil).Once() - mdi.On("GetMessageByID", em.ctx, storedPool.Message).Return(nil, fmt.Errorf("pop")).Once() - mdi.On("GetMessageByID", em.ctx, storedPool.Message).Return(storedMessage, nil).Once() + mdm.On("GetMessageWithDataCached", em.ctx, storedPool.Message, data.CRORequireBatchID).Return(nil, nil, false, fmt.Errorf("pop")).Once() + mdm.On("GetMessageWithDataCached", em.ctx, storedPool.Message, data.CRORequireBatchID).Return(storedMessage, nil, true, nil).Once() err := em.TokenPoolCreated(mti, chainPool) assert.NoError(t, err) mdi.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestTokenPoolCreatedAlreadyConfirmed(t *testing.T) { @@ -193,6 +197,7 @@ func TestTokenPoolCreatedMigrate(t *testing.T) { mam := em.assets.(*assetmocks.Manager) mti := &tokenmocks.Plugin{} mth := em.txHelper.(*txcommonmocks.Helper) + mdm := em.data.(*datamocks.Manager) txID := fftypes.NewUUID() info := fftypes.JSONObject{"some": "info"} @@ -235,13 +240,14 @@ func TestTokenPoolCreatedMigrate(t *testing.T) { })).Return(nil).Once() mam.On("ActivateTokenPool", em.ctx, storedPool, info).Return(fmt.Errorf("pop")).Once() mam.On("ActivateTokenPool", em.ctx, storedPool, info).Return(nil).Once() - mdi.On("GetMessageByID", em.ctx, storedPool.Message).Return(storedMessage, nil) + mdm.On("GetMessageWithDataCached", em.ctx, storedPool.Message, data.CRORequireBatchID).Return(storedMessage, nil, true, nil).Once() err := em.TokenPoolCreated(mti, chainPool) assert.NoError(t, err) mdi.AssertExpectations(t) mam.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestConfirmPoolBlockchainEventFail(t *testing.T) { diff --git a/internal/orchestrator/data_query.go b/internal/orchestrator/data_query.go index 5b97328d23..5984b4d649 100644 --- a/internal/orchestrator/data_query.go +++ b/internal/orchestrator/data_query.go @@ -307,3 +307,7 @@ func (or *orchestrator) GetTransactionBlockchainEvents(ctx context.Context, ns, ) return or.database.GetBlockchainEvents(ctx, filter) } + +func (or *orchestrator) GetPins(ctx context.Context, filter database.AndFilter) ([]*fftypes.Pin, *database.FilterResult, error) { + return or.database.GetPins(ctx, filter) +} diff --git a/internal/orchestrator/data_query_test.go b/internal/orchestrator/data_query_test.go index cfe56de831..565968cf41 100644 --- a/internal/orchestrator/data_query_test.go +++ b/internal/orchestrator/data_query_test.go @@ -599,3 +599,13 @@ func TestGetTransactionBlockchainEventsBadID(t *testing.T) { _, _, err := or.GetTransactionBlockchainEvents(context.Background(), "ns1", "") assert.Regexp(t, "FF10142", err) } + +func TestGetPins(t *testing.T) { + or := newTestOrchestrator() + u := fftypes.NewUUID() + or.mdi.On("GetPins", mock.Anything, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) + fb := database.PinQueryFactory.NewFilter(context.Background()) + f := fb.And(fb.Eq("hash", u)) + _, _, err := or.GetPins(context.Background(), f) + assert.NoError(t, err) +} diff --git a/internal/orchestrator/orchestrator.go b/internal/orchestrator/orchestrator.go index 36083494ad..eda7310890 100644 --- a/internal/orchestrator/orchestrator.go +++ b/internal/orchestrator/orchestrator.go @@ -120,6 +120,7 @@ type Orchestrator interface { GetEvents(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Event, *database.FilterResult, error) GetBlockchainEventByID(ctx context.Context, id *fftypes.UUID) (*fftypes.BlockchainEvent, error) GetBlockchainEvents(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.BlockchainEvent, *database.FilterResult, error) + GetPins(ctx context.Context, filter database.AndFilter) ([]*fftypes.Pin, *database.FilterResult, error) // Charts GetChartHistogram(ctx context.Context, ns string, startTime int64, endTime int64, buckets int64, tableName database.CollectionName) ([]*fftypes.ChartHistogram, error) diff --git a/internal/privatemessaging/message.go b/internal/privatemessaging/message.go index 826d4092ee..e81458530d 100644 --- a/internal/privatemessaging/message.go +++ b/internal/privatemessaging/message.go @@ -183,7 +183,6 @@ func (s *messageSender) sendInternal(ctx context.Context, method sendMethod) err return err } s.mgr.data.UpdateMessageCache(&s.msg.Message, s.data) - s.data = nil // no need to keep hold of this log.L(ctx).Infof("Sent private message %s:%s sequence=%d", s.msg.Header.Namespace, s.msg.Header.ID, s.msg.Sequence) return nil diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index a4ec4eb3fb..a736ced748 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -188,7 +188,7 @@ func (pm *privateMessaging) dispatchBatchCommon(ctx context.Context, state *batc tw.Group = group } - return pm.sendData(ctx, tw, nodes, state.Persisted.Manifest) + return pm.sendData(ctx, tw, nodes, state.Persisted.Manifest.String()) } func (pm *privateMessaging) transferBlobs(ctx context.Context, data fftypes.DataArray, txid *fftypes.UUID, node *fftypes.Identity) error { diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go index aefc6ce243..dca27bbbef 100644 --- a/mocks/datamocks/manager.go +++ b/mocks/datamocks/manager.go @@ -253,6 +253,11 @@ func (_m *Manager) UpdateMessageCache(msg *fftypes.Message, _a1 fftypes.DataArra _m.Called(msg, _a1) } +// UpdateMessageIfCached provides a mock function with given fields: ctx, msg +func (_m *Manager) UpdateMessageIfCached(ctx context.Context, msg *fftypes.Message) { + _m.Called(ctx, msg) +} + // UploadBLOB provides a mock function with given fields: ctx, ns, inData, blob, autoMeta func (_m *Manager) UploadBLOB(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) { ret := _m.Called(ctx, ns, inData, blob, autoMeta) diff --git a/mocks/orchestratormocks/orchestrator.go b/mocks/orchestratormocks/orchestrator.go index 9533a4934b..127612dc1b 100644 --- a/mocks/orchestratormocks/orchestrator.go +++ b/mocks/orchestratormocks/orchestrator.go @@ -960,6 +960,38 @@ func (_m *Orchestrator) GetOperations(ctx context.Context, ns string, filter dat return r0, r1, r2 } +// GetPins provides a mock function with given fields: ctx, filter +func (_m *Orchestrator) GetPins(ctx context.Context, filter database.AndFilter) ([]*fftypes.Pin, *database.FilterResult, error) { + ret := _m.Called(ctx, filter) + + var r0 []*fftypes.Pin + if rf, ok := ret.Get(0).(func(context.Context, database.AndFilter) []*fftypes.Pin); ok { + r0 = rf(ctx, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*fftypes.Pin) + } + } + + var r1 *database.FilterResult + if rf, ok := ret.Get(1).(func(context.Context, database.AndFilter) *database.FilterResult); ok { + r1 = rf(ctx, filter) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*database.FilterResult) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, database.AndFilter) error); ok { + r2 = rf(ctx, filter) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // GetStatus provides a mock function with given fields: ctx func (_m *Orchestrator) GetStatus(ctx context.Context) (*fftypes.NodeStatus, error) { ret := _m.Called(ctx) diff --git a/pkg/fftypes/batch.go b/pkg/fftypes/batch.go index b30d447069..3402c91fe1 100644 --- a/pkg/fftypes/batch.go +++ b/pkg/fftypes/batch.go @@ -74,7 +74,7 @@ type Batch struct { // BatchPersisted is the structure written to the database type BatchPersisted struct { BatchHeader - Manifest string `json:"manifest"` // not automatically parsed + Manifest *JSONAny `json:"manifest"` TX TransactionRef `json:"tx"` PayloadRef string `json:"payloadRef,omitempty"` Confirmed *FFTime `json:"confirmed"` @@ -149,7 +149,7 @@ func (b *Batch) Confirmed() (*BatchPersisted, *BatchManifest) { return &BatchPersisted{ BatchHeader: b.BatchHeader, TX: b.Payload.TX, - Manifest: manifestString, + Manifest: JSONAnyPtr(manifestString), Confirmed: Now(), }, manifest } diff --git a/pkg/fftypes/batch_test.go b/pkg/fftypes/batch_test.go index 461c8ca44c..43268411c7 100644 --- a/pkg/fftypes/batch_test.go +++ b/pkg/fftypes/batch_test.go @@ -48,7 +48,7 @@ func TestSQLSerializedManifest(t *testing.T) { mfString := manifest.String() assert.Equal(t, batch.BatchHeader, bp.BatchHeader) assert.Equal(t, batch.Payload.TX, bp.TX) - assert.Equal(t, mfString, bp.Manifest) + assert.Equal(t, mfString, bp.Manifest.String()) assert.NotNil(t, bp.Confirmed) var mf *BatchManifest diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index 6c7539d907..8589721bf4 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -80,9 +80,12 @@ func (d *Data) BatchData(batchType BatchType) *Data { Validator: d.Validator, Namespace: d.Namespace, Hash: d.Hash, + Created: d.Created, Datatype: d.Datatype, Value: d.Value, Blob: d.Blob.BatchBlobRef(batchType), + + ValueSize: d.ValueSize, } } diff --git a/pkg/fftypes/jsonany.go b/pkg/fftypes/jsonany.go index f8137f0848..cd895e4520 100644 --- a/pkg/fftypes/jsonany.go +++ b/pkg/fftypes/jsonany.go @@ -71,6 +71,13 @@ func (h JSONAny) MarshalJSON() ([]byte, error) { return []byte(h), nil } +func (h *JSONAny) Unmarshal(ctx context.Context, v interface{}) error { + if h == nil { + return i18n.NewError(ctx, i18n.MsgNilOrNullObject) + } + return json.Unmarshal([]byte(*h), v) +} + func (h *JSONAny) Hash() *Bytes32 { if h == nil { return nil diff --git a/pkg/fftypes/jsonany_test.go b/pkg/fftypes/jsonany_test.go index 8faf3e482f..a55b69a9d8 100644 --- a/pkg/fftypes/jsonany_test.go +++ b/pkg/fftypes/jsonany_test.go @@ -17,6 +17,7 @@ package fftypes import ( + "context" "encoding/json" "testing" @@ -152,3 +153,18 @@ func TestValue(t *testing.T) { assert.Equal(t, "{}", v) } + +func TestUnmarshal(t *testing.T) { + + var h *JSONAny + var myObj struct { + Key1 string `json:"key1"` + } + err := h.Unmarshal(context.Background(), &myObj) + assert.Regexp(t, "FF10368", err) + + h = JSONAnyPtr(`{"key1":"value1"}`) + err = h.Unmarshal(context.Background(), &myObj) + assert.NoError(t, err) + assert.Equal(t, "value1", myObj.Key1) +}