From a316a3186babe5ed7f4d68b44c05288c5cc6e63a Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 11 Mar 2022 00:04:08 -0500 Subject: [PATCH 01/13] Add batch writing of messages and data Signed-off-by: Peter Broadhurst --- mocks/datamocks/manager.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go index a42b7f5992..f2e35335d6 100644 --- a/mocks/datamocks/manager.go +++ b/mocks/datamocks/manager.go @@ -32,6 +32,11 @@ func (_m *Manager) CheckDatatype(ctx context.Context, ns string, datatype *fftyp return r0 } +// Close provides a mock function with given fields: +func (_m *Manager) Close() { + _m.Called() +} + // CopyBlobPStoDX provides a mock function with given fields: ctx, _a1 func (_m *Manager) CopyBlobPStoDX(ctx context.Context, _a1 *fftypes.Data) (*fftypes.Blob, error) { ret := _m.Called(ctx, _a1) From 2b63f71c2d0f3496c4932c6153c3180b33ffc416 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Thu, 10 Mar 2022 11:52:07 -0500 Subject: [PATCH 02/13] Add transacation cache Signed-off-by: Peter Broadhurst --- internal/assets/manager.go | 4 +- internal/assets/manager_test.go | 9 ++- internal/batch/batch_manager.go | 6 +- internal/batch/batch_manager_test.go | 45 ++++++++---- internal/batch/batch_processor.go | 4 +- internal/batch/batch_processor_test.go | 4 +- internal/config/config.go | 6 ++ internal/contracts/manager.go | 5 +- internal/contracts/manager_test.go | 24 ++++-- internal/events/event_dispatcher.go | 4 +- internal/events/event_dispatcher_test.go | 4 +- internal/events/event_manager.go | 6 +- internal/events/event_manager_test.go | 9 ++- internal/events/subscription_manager.go | 9 ++- internal/events/subscription_manager_test.go | 7 +- internal/orchestrator/orchestrator.go | 13 ++-- internal/orchestrator/orchestrator_test.go | 1 + internal/txcommon/event_enrich_test.go | 22 ++++-- internal/txcommon/txcommon.go | 59 ++++++++++++--- internal/txcommon/txcommon_test.go | 77 ++++++++++++++++---- mocks/txcommonmocks/helper.go | 23 ++++++ 21 files changed, 255 insertions(+), 86 deletions(-) diff --git a/internal/assets/manager.go b/internal/assets/manager.go index e1f3633087..c37feda55f 100644 --- a/internal/assets/manager.go +++ b/internal/assets/manager.go @@ -82,14 +82,14 @@ type assetManager struct { keyNormalization int } -func NewAssetManager(ctx context.Context, di database.Plugin, im identity.Manager, dm data.Manager, sa syncasync.Bridge, bm broadcast.Manager, pm privatemessaging.Manager, ti map[string]tokens.Plugin, mm metrics.Manager, om operations.Manager) (Manager, error) { +func NewAssetManager(ctx context.Context, di database.Plugin, im identity.Manager, dm data.Manager, sa syncasync.Bridge, bm broadcast.Manager, pm privatemessaging.Manager, ti map[string]tokens.Plugin, mm metrics.Manager, om operations.Manager, txHelper txcommon.Helper) (Manager, error) { if di == nil || im == nil || sa == nil || bm == nil || pm == nil || ti == nil || mm == nil || om == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } am := &assetManager{ ctx: ctx, database: di, - txHelper: txcommon.NewTransactionHelper(di), + txHelper: txHelper, identity: im, data: dm, syncasync: sa, diff --git a/internal/assets/manager_test.go b/internal/assets/manager_test.go index b32da3d7e4..5eed3e17fa 100644 --- a/internal/assets/manager_test.go +++ b/internal/assets/manager_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/broadcastmocks" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" @@ -47,11 +48,12 @@ func newTestAssets(t *testing.T) (*assetManager, func()) { mti := &tokenmocks.Plugin{} mm := &metricsmocks.Manager{} mom := &operationmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mti.On("Name").Return("ut_tokens").Maybe() mm.On("IsMetricsEnabled").Return(false) mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) ctx, cancel := context.WithCancel(context.Background()) - a, err := NewAssetManager(ctx, mdi, mim, mdm, msa, mbm, mpm, map[string]tokens.Plugin{"magic-tokens": mti}, mm, mom) + a, err := NewAssetManager(ctx, mdi, mim, mdm, msa, mbm, mpm, map[string]tokens.Plugin{"magic-tokens": mti}, mm, mom, txHelper) rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() rag.RunFn = func(a mock.Arguments) { rag.ReturnArguments = mock.Arguments{a[1].(func(context.Context) error)(a[0].(context.Context))} @@ -73,12 +75,13 @@ func newTestAssetsWithMetrics(t *testing.T) (*assetManager, func()) { mti := &tokenmocks.Plugin{} mm := &metricsmocks.Manager{} mom := &operationmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mti.On("Name").Return("ut_tokens").Maybe() mm.On("IsMetricsEnabled").Return(true) mm.On("TransferSubmitted", mock.Anything) mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) ctx, cancel := context.WithCancel(context.Background()) - a, err := NewAssetManager(ctx, mdi, mim, mdm, msa, mbm, mpm, map[string]tokens.Plugin{"magic-tokens": mti}, mm, mom) + a, err := NewAssetManager(ctx, mdi, mim, mdm, msa, mbm, mpm, map[string]tokens.Plugin{"magic-tokens": mti}, mm, mom, txHelper) rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() rag.RunFn = func(a mock.Arguments) { rag.ReturnArguments = mock.Arguments{a[1].(func(context.Context) error)(a[0].(context.Context))} @@ -90,7 +93,7 @@ func newTestAssetsWithMetrics(t *testing.T) (*assetManager, func()) { } func TestInitFail(t *testing.T) { - _, err := NewAssetManager(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil) + _, err := NewAssetManager(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) assert.Regexp(t, "FF10128", err) } diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index 1197b2c5a3..b40c3595fc 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -28,11 +28,12 @@ import ( "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/retry" "github.com/hyperledger/firefly/internal/sysmessaging" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" ) -func NewBatchManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, dm data.Manager) (Manager, error) { +func NewBatchManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, dm data.Manager, txHelper txcommon.Helper) (Manager, error) { if di == nil || dm == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } @@ -44,6 +45,7 @@ func NewBatchManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, di data ni: ni, database: di, data: dm, + txHelper: txHelper, readOffset: -1, // On restart we trawl for all ready messages readPageSize: uint64(readPageSize), messagePollTimeout: config.GetDuration(config.BatchManagerReadPollTimeout), @@ -85,6 +87,7 @@ type batchManager struct { ni sysmessaging.LocalNodeInfo database database.Plugin data data.Manager + txHelper txcommon.Helper dispatcherMux sync.Mutex dispatchers map[string]*dispatcher newMessages chan int64 @@ -170,6 +173,7 @@ func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fft dispatch: dispatcher.handler, }, bm.retry, + bm.txHelper, ) dispatcher.processors[name] = processor } diff --git a/internal/batch/batch_manager_test.go b/internal/batch/batch_manager_test.go index 26c7155999..90eed813d1 100644 --- a/internal/batch/batch_manager_test.go +++ b/internal/batch/batch_manager_test.go @@ -25,6 +25,7 @@ import ( "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/sysmessagingmocks" @@ -41,6 +42,7 @@ func TestE2EDispatchBroadcast(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) readyForDispatch := make(chan bool) waitForDispatch := make(chan *DispatchState) @@ -70,7 +72,7 @@ func TestE2EDispatchBroadcast(t *testing.T) { return nil } ctx, cancel := context.WithCancel(context.Background()) - bmi, _ := NewBatchManager(ctx, mni, mdi, mdm) + bmi, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) bm := bmi.(*batchManager) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, handler, DispatcherOptions{ @@ -155,6 +157,7 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) readyForDispatch := make(chan bool) waitForDispatch := make(chan *DispatchState) @@ -187,7 +190,7 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { return nil } ctx, cancel := context.WithCancel(context.Background()) - bmi, _ := NewBatchManager(ctx, mni, mdi, mdm) + bmi, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) bm := bmi.(*batchManager) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypePrivate}, handler, DispatcherOptions{ @@ -268,8 +271,9 @@ func TestDispatchUnknownType(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) ctx, cancel := context.WithCancel(context.Background()) - bmi, _ := NewBatchManager(ctx, mni, mdi, mdm) + bmi, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) bm := bmi.(*batchManager) msg := &fftypes.Message{} @@ -285,7 +289,7 @@ func TestDispatchUnknownType(t *testing.T) { } func TestInitFailNoPersistence(t *testing.T) { - _, err := NewBatchManager(context.Background(), nil, nil, nil) + _, err := NewBatchManager(context.Background(), nil, nil, nil, nil) assert.Error(t, err) } @@ -294,7 +298,8 @@ func TestGetInvalidBatchTypeMsg(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) defer bm.Close() _, err := bm.(*batchManager).getProcessor(fftypes.BatchTypeBroadcast, "wrong", nil, "ns1", &fftypes.SignerRef{}) assert.Regexp(t, "FF10126", err) @@ -304,8 +309,9 @@ func TestMessageSequencerCancelledContext(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) defer bm.Close() ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -318,7 +324,8 @@ func TestMessageSequencerMissingMessageData(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeNone, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, func(c context.Context, state *DispatchState) error { return nil @@ -359,9 +366,10 @@ func TestMessageSequencerUpdateMessagesFail(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) ctx, cancelCtx := context.WithCancel(context.Background()) - bm, _ := NewBatchManager(ctx, mni, mdi, mdm) + bm, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, func(c context.Context, state *DispatchState) error { return nil @@ -414,8 +422,9 @@ func TestMessageSequencerDispatchFail(t *testing.T) { mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) ctx, cancelCtx := context.WithCancel(context.Background()) - bm, _ := NewBatchManager(ctx, mni, mdi, mdm) + bm, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, func(c context.Context, state *DispatchState) error { cancelCtx() @@ -454,7 +463,8 @@ func TestMessageSequencerUpdateBatchFail(t *testing.T) { mni := &sysmessagingmocks.LocalNodeInfo{} mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) ctx, cancelCtx := context.WithCancel(context.Background()) - bm, _ := NewBatchManager(ctx, mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, func(c context.Context, state *DispatchState) error { return nil @@ -504,7 +514,8 @@ func TestWaitForPollTimeout(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) bm.(*batchManager).messagePollTimeout = 1 * time.Microsecond bm.(*batchManager).waitForNewMessages() } @@ -513,7 +524,8 @@ func TestWaitForNewMessage(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bmi, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bmi, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) bm := bmi.(*batchManager) bm.readOffset = 22222 bm.NewMessages() <- 12345 @@ -525,7 +537,8 @@ func TestAssembleMessageDataNilData(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) bm.Close() mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, false, nil) _, err := bm.(*batchManager).assembleMessageData(fftypes.BatchTypePrivate, &fftypes.Message{ @@ -541,7 +554,8 @@ func TestGetMessageDataFail(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, false, fmt.Errorf("pop")) bm.Close() _, _ = bm.(*batchManager).assembleMessageData(fftypes.BatchTypePrivate, &fftypes.Message{ @@ -559,7 +573,8 @@ func TestGetMessageNotFound(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) mdm.On("GetMessageDataCached", mock.Anything, mock.Anything, data.CRORequirePublicBlobRefs).Return(nil, false, nil) bm.Close() _, err := bm.(*batchManager).assembleMessageData(fftypes.BatchTypeBroadcast, &fftypes.Message{ diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 40ed6e7c27..9e521a001e 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -101,7 +101,7 @@ type DispatchState struct { const batchSizeEstimateBase = int64(512) -func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, dm data.Manager, conf *batchProcessorConf, baseRetryConf *retry.Retry) *batchProcessor { +func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, dm data.Manager, conf *batchProcessorConf, baseRetryConf *retry.Retry, txHelper txcommon.Helper) *batchProcessor { pCtx := log.WithLogField(log.WithLogField(ctx, "d", conf.dispatcherName), "p", conf.name) pCtx, cancelCtx := context.WithCancel(pCtx) bp := &batchProcessor{ @@ -110,7 +110,7 @@ func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di da ni: ni, database: di, data: dm, - txHelper: txcommon.NewTransactionHelper(di), + txHelper: txHelper, newWork: make(chan *batchWork, conf.BatchMaxSize), quescing: make(chan bool, 1), done: make(chan struct{}), diff --git a/internal/batch/batch_processor_test.go b/internal/batch/batch_processor_test.go index 136b89e310..54c5f6865c 100644 --- a/internal/batch/batch_processor_test.go +++ b/internal/batch/batch_processor_test.go @@ -23,6 +23,7 @@ import ( "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/retry" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/sysmessagingmocks" @@ -36,6 +37,7 @@ func newTestBatchProcessor(dispatch DispatchHandler) (*databasemocks.Plugin, *ba mdi := &databasemocks.Plugin{} mni := &sysmessagingmocks.LocalNodeInfo{} mdm := &datamocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()).Maybe() bp := newBatchProcessor(context.Background(), mni, mdi, mdm, &batchProcessorConf{ namespace: "ns1", @@ -51,7 +53,7 @@ func newTestBatchProcessor(dispatch DispatchHandler) (*databasemocks.Plugin, *ba }, &retry.Retry{ InitialDelay: 1 * time.Microsecond, MaximumDelay: 1 * time.Microsecond, - }) + }, txHelper) bp.txHelper = &txcommonmocks.Helper{} return mdi, bp } diff --git a/internal/config/config.go b/internal/config/config.go index 279f90f909..01d5c5d742 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -230,6 +230,10 @@ var ( SubscriptionsRetryMaxDelay = rootKey("subscription.retry.maxDelay") // SubscriptionsRetryFactor the backoff factor to use for retry of database operations SubscriptionsRetryFactor = rootKey("subscription.retry.factor") + // TransactionCacheSize + TransactionCacheSize = rootKey("transaction.cache.size") + // TransactionCacheTTL + TransactionCacheTTL = rootKey("transaction.cache.ttl") // AssetManagerKeyNormalization mechanism to normalize keys before using them. Valid options: "blockchain_plugin" - use blockchain plugin (default), "none" - do not attempt normalization AssetManagerKeyNormalization = rootKey("asset.manager.keyNormalization") // UIEnabled set to false to disable the UI (default is true, so UI will be enabled if ui.path is valid) @@ -365,6 +369,8 @@ func Reset() { viper.SetDefault(string(SubscriptionsRetryInitialDelay), "250ms") viper.SetDefault(string(SubscriptionsRetryMaxDelay), "30s") viper.SetDefault(string(SubscriptionsRetryFactor), 2.0) + viper.SetDefault(string(TransactionCacheSize), "1Mb") + viper.SetDefault(string(TransactionCacheTTL), "5m") viper.SetDefault(string(UIEnabled), true) viper.SetDefault(string(ValidatorCacheSize), "1Mb") viper.SetDefault(string(ValidatorCacheTTL), "1h") diff --git a/internal/contracts/manager.go b/internal/contracts/manager.go index 0b6d737348..306575bc96 100644 --- a/internal/contracts/manager.go +++ b/internal/contracts/manager.go @@ -22,6 +22,7 @@ import ( "strings" "github.com/hyperledger/firefly/internal/broadcast" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/identity" "github.com/hyperledger/firefly/internal/operations" @@ -70,7 +71,7 @@ type contractManager struct { operations operations.Manager } -func NewContractManager(ctx context.Context, di database.Plugin, bm broadcast.Manager, im identity.Manager, bi blockchain.Plugin, om operations.Manager) (Manager, error) { +func NewContractManager(ctx context.Context, di database.Plugin, dm data.Manager, bm broadcast.Manager, im identity.Manager, bi blockchain.Plugin, om operations.Manager, txHelper txcommon.Helper) (Manager, error) { if di == nil || bm == nil || im == nil || bi == nil || om == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } @@ -81,7 +82,7 @@ func NewContractManager(ctx context.Context, di database.Plugin, bm broadcast.Ma cm := &contractManager{ database: di, - txHelper: txcommon.NewTransactionHelper(di), + txHelper: txHelper, broadcast: bm, identity: im, blockchain: bi, diff --git a/internal/contracts/manager_test.go b/internal/contracts/manager_test.go index aa28114736..8aa8b5d82e 100644 --- a/internal/contracts/manager_test.go +++ b/internal/contracts/manager_test.go @@ -23,9 +23,11 @@ import ( "github.com/hyperledger/firefly/internal/blockchain/ethereum" "github.com/hyperledger/firefly/internal/identity" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/blockchainmocks" "github.com/hyperledger/firefly/mocks/broadcastmocks" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" @@ -37,29 +39,31 @@ import ( ) func newTestContractManager() *contractManager { - mdb := &databasemocks.Plugin{} + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} mbm := &broadcastmocks.Manager{} mim := &identitymanagermocks.Manager{} mbi := &blockchainmocks.Plugin{} mom := &operationmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mbi.On("GetFFIParamValidator", mock.Anything).Return(nil, nil) mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) mbi.On("Name").Return("mockblockchain").Maybe() - rag := mdb.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() + rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() rag.RunFn = func(a mock.Arguments) { rag.ReturnArguments = mock.Arguments{ a[1].(func(context.Context) error)(a[0].(context.Context)), } } - cm, _ := NewContractManager(context.Background(), mdb, mbm, mim, mbi, mom) + cm, _ := NewContractManager(context.Background(), mdi, mdm, mbm, mim, mbi, mom, txHelper) cm.(*contractManager).txHelper = &txcommonmocks.Helper{} return cm.(*contractManager) } func TestNewContractManagerFail(t *testing.T) { - _, err := NewContractManager(context.Background(), nil, nil, nil, nil, nil) + _, err := NewContractManager(context.Background(), nil, nil, nil, nil, nil, nil, nil) assert.Regexp(t, "FF10128", err) } @@ -69,25 +73,29 @@ func TestName(t *testing.T) { } func TestNewContractManagerFFISchemaLoaderFail(t *testing.T) { - mdb := &databasemocks.Plugin{} + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} mbm := &broadcastmocks.Manager{} mim := &identitymanagermocks.Manager{} mbi := &blockchainmocks.Plugin{} mom := &operationmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mbi.On("GetFFIParamValidator", mock.Anything).Return(nil, fmt.Errorf("pop")) - _, err := NewContractManager(context.Background(), mdb, mbm, mim, mbi, mom) + _, err := NewContractManager(context.Background(), mdi, mdm, mbm, mim, mbi, mom, txHelper) assert.Regexp(t, "pop", err) } func TestNewContractManagerFFISchemaLoader(t *testing.T) { - mdb := &databasemocks.Plugin{} + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} mbm := &broadcastmocks.Manager{} mim := &identitymanagermocks.Manager{} mbi := &blockchainmocks.Plugin{} mom := &operationmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mbi.On("GetFFIParamValidator", mock.Anything).Return(ðereum.FFIParamValidator{}, nil) mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) - _, err := NewContractManager(context.Background(), mdb, mbm, mim, mbi, mom) + _, err := NewContractManager(context.Background(), mdi, mdm, mbm, mim, mbi, mom, txHelper) assert.NoError(t, err) } diff --git a/internal/events/event_dispatcher.go b/internal/events/event_dispatcher.go index 95adc12a38..7695ed1a24 100644 --- a/internal/events/event_dispatcher.go +++ b/internal/events/event_dispatcher.go @@ -66,7 +66,7 @@ type eventDispatcher struct { txHelper txcommon.Helper } -func newEventDispatcher(ctx context.Context, ei events.Plugin, di database.Plugin, dm data.Manager, sh definitions.DefinitionHandlers, connID string, sub *subscription, en *eventNotifier, cel *changeEventListener) *eventDispatcher { +func newEventDispatcher(ctx context.Context, ei events.Plugin, di database.Plugin, dm data.Manager, sh definitions.DefinitionHandlers, connID string, sub *subscription, en *eventNotifier, cel *changeEventListener, txHelper txcommon.Helper) *eventDispatcher { ctx, cancelCtx := context.WithCancel(ctx) readAhead := config.GetUint(config.SubscriptionDefaultsReadAhead) if sub.definition.Options.ReadAhead != nil { @@ -94,7 +94,7 @@ func newEventDispatcher(ctx context.Context, ei events.Plugin, di database.Plugi acksNacks: make(chan ackNack), closed: make(chan struct{}), cel: cel, - txHelper: txcommon.NewTransactionHelper(di), + txHelper: txHelper, } pollerConf := &eventPollerConf{ diff --git a/internal/events/event_dispatcher_test.go b/internal/events/event_dispatcher_test.go index 55972e13df..0b1f58041c 100644 --- a/internal/events/event_dispatcher_test.go +++ b/internal/events/event_dispatcher_test.go @@ -24,6 +24,7 @@ import ( "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/definitionsmocks" @@ -42,8 +43,9 @@ func newTestEventDispatcher(sub *subscription) (*eventDispatcher, func()) { mei.On("Name").Return("ut").Maybe() mdm := &datamocks.Manager{} msh := &definitionsmocks.DefinitionHandlers{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) ctx, cancel := context.WithCancel(context.Background()) - return newEventDispatcher(ctx, mei, mdi, mdm, msh, fftypes.NewUUID().String(), sub, newEventNotifier(ctx, "ut"), newChangeEventListener(ctx)), func() { + return newEventDispatcher(ctx, mei, mdi, mdm, msh, fftypes.NewUUID().String(), sub, newEventNotifier(ctx, "ut"), newChangeEventListener(ctx), txHelper), func() { cancel() config.Reset() } diff --git a/internal/events/event_manager.go b/internal/events/event_manager.go index 605ab03147..de46c3b4fe 100644 --- a/internal/events/event_manager.go +++ b/internal/events/event_manager.go @@ -99,7 +99,7 @@ type eventManager struct { metrics metrics.Manager } -func NewEventManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, si sharedstorage.Plugin, di database.Plugin, bi blockchain.Plugin, im identity.Manager, dh definitions.DefinitionHandlers, dm data.Manager, bm broadcast.Manager, pm privatemessaging.Manager, am assets.Manager, mm metrics.Manager) (EventManager, error) { +func NewEventManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, si sharedstorage.Plugin, di database.Plugin, bi blockchain.Plugin, im identity.Manager, dh definitions.DefinitionHandlers, dm data.Manager, bm broadcast.Manager, pm privatemessaging.Manager, am assets.Manager, mm metrics.Manager, txHelper txcommon.Helper) (EventManager, error) { if ni == nil || si == nil || di == nil || bi == nil || im == nil || dh == nil || dm == nil || bm == nil || pm == nil || am == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } @@ -110,7 +110,7 @@ func NewEventManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, si shar ni: ni, sharedstorage: si, database: di, - txHelper: txcommon.NewTransactionHelper(di), + txHelper: txHelper, identity: im, definitions: dh, data: dm, @@ -133,7 +133,7 @@ func NewEventManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, si shar em.internalEvents = ie.(*system.Events) var err error - if em.subManager, err = newSubscriptionManager(ctx, di, dm, newEventNotifier, dh); err != nil { + if em.subManager, err = newSubscriptionManager(ctx, di, dm, newEventNotifier, dh, txHelper); err != nil { return nil, err } diff --git a/internal/events/event_manager_test.go b/internal/events/event_manager_test.go index 59dfebb00b..b8b762fcee 100644 --- a/internal/events/event_manager_test.go +++ b/internal/events/event_manager_test.go @@ -23,6 +23,7 @@ import ( "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/events/system" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/assetmocks" "github.com/hyperledger/firefly/mocks/blockchainmocks" "github.com/hyperledger/firefly/mocks/broadcastmocks" @@ -66,6 +67,7 @@ func newTestEventManagerCommon(t *testing.T, metrics bool) (*eventManager, func( mam := &assetmocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} mmi := &metricsmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mmi.On("IsMetricsEnabled").Return(metrics) if metrics { mmi.On("TransferConfirmed", mock.Anything) @@ -73,7 +75,7 @@ func newTestEventManagerCommon(t *testing.T, metrics bool) (*eventManager, func( mni.On("GetNodeUUID", mock.Anything).Return(testNodeID).Maybe() met.On("Name").Return("ut").Maybe() mbi.On("VerifierType").Return(fftypes.VerifierTypeEthAddress).Maybe() - emi, err := NewEventManager(ctx, mni, mpi, mdi, mbi, mim, msh, mdm, mbm, mpm, mam, mmi) + emi, err := NewEventManager(ctx, mni, mpi, mdi, mbi, mim, msh, mdm, mbm, mpm, mam, mmi, txHelper) em := emi.(*eventManager) em.txHelper = &txcommonmocks.Helper{} rag := mdi.On("RunAsGroup", em.ctx, mock.Anything).Maybe() @@ -104,7 +106,7 @@ func TestStartStop(t *testing.T) { } func TestStartStopBadDependencies(t *testing.T) { - _, err := NewEventManager(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + _, err := NewEventManager(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) assert.Regexp(t, "FF10128", err) } @@ -123,8 +125,9 @@ func TestStartStopBadTransports(t *testing.T) { mni := &sysmessagingmocks.LocalNodeInfo{} mam := &assetmocks.Manager{} mm := &metricsmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mbi.On("VerifierType").Return(fftypes.VerifierTypeEthAddress) - _, err := NewEventManager(context.Background(), mni, mpi, mdi, mbi, mim, msh, mdm, mbm, mpm, mam, mm) + _, err := NewEventManager(context.Background(), mni, mpi, mdi, mbi, mim, msh, mdm, mbm, mpm, mam, mm, txHelper) assert.Regexp(t, "FF10172", err) } diff --git a/internal/events/subscription_manager.go b/internal/events/subscription_manager.go index 613c9057ae..b80a4931dc 100644 --- a/internal/events/subscription_manager.go +++ b/internal/events/subscription_manager.go @@ -29,6 +29,7 @@ import ( "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/retry" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/events" "github.com/hyperledger/firefly/pkg/fftypes" @@ -72,6 +73,7 @@ type subscriptionManager struct { ctx context.Context database database.Plugin data data.Manager + txHelper txcommon.Helper eventNotifier *eventNotifier definitions definitions.DefinitionHandlers transports map[string]events.Plugin @@ -86,7 +88,7 @@ type subscriptionManager struct { retry retry.Retry } -func newSubscriptionManager(ctx context.Context, di database.Plugin, dm data.Manager, en *eventNotifier, sh definitions.DefinitionHandlers) (*subscriptionManager, error) { +func newSubscriptionManager(ctx context.Context, di database.Plugin, dm data.Manager, en *eventNotifier, sh definitions.DefinitionHandlers, txHelper txcommon.Helper) (*subscriptionManager, error) { ctx, cancelCtx := context.WithCancel(ctx) sm := &subscriptionManager{ ctx: ctx, @@ -101,6 +103,7 @@ func newSubscriptionManager(ctx context.Context, di database.Plugin, dm data.Man cancelCtx: cancelCtx, eventNotifier: en, definitions: sh, + txHelper: txHelper, retry: retry.Retry{ InitialDelay: config.GetDuration(config.SubscriptionsRetryInitialDelay), MaximumDelay: config.GetDuration(config.SubscriptionsRetryMaxDelay), @@ -473,7 +476,7 @@ func (sm *subscriptionManager) matchSubToConnLocked(conn *connection, sub *subsc } if conn.transport == sub.definition.Transport && conn.matcher(sub.definition.SubscriptionRef) { if _, ok := conn.dispatchers[*sub.definition.ID]; !ok { - dispatcher := newEventDispatcher(sm.ctx, conn.ei, sm.database, sm.data, sm.definitions, conn.id, sub, sm.eventNotifier, sm.cel) + dispatcher := newEventDispatcher(sm.ctx, conn.ei, sm.database, sm.data, sm.definitions, conn.id, sub, sm.eventNotifier, sm.cel, sm.txHelper) conn.dispatchers[*sub.definition.ID] = dispatcher dispatcher.start() } @@ -510,7 +513,7 @@ func (sm *subscriptionManager) ephemeralSubscription(ei events.Plugin, connID, n } // Create the dispatcher, and start immediately - dispatcher := newEventDispatcher(sm.ctx, ei, sm.database, sm.data, sm.definitions, connID, newSub, sm.eventNotifier, sm.cel) + dispatcher := newEventDispatcher(sm.ctx, ei, sm.database, sm.data, sm.definitions, connID, newSub, sm.eventNotifier, sm.cel, sm.txHelper) dispatcher.start() conn.dispatchers[*subID] = dispatcher diff --git a/internal/events/subscription_manager_test.go b/internal/events/subscription_manager_test.go index b1dc611d71..0fd6d6331c 100644 --- a/internal/events/subscription_manager_test.go +++ b/internal/events/subscription_manager_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/definitionsmocks" @@ -39,6 +40,7 @@ func newTestSubManager(t *testing.T, mei *eventsmocks.PluginAll) (*subscriptionM mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} msh := &definitionsmocks.DefinitionHandlers{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) ctx, cancel := context.WithCancel(context.Background()) mei.On("Name").Return("ut") @@ -47,7 +49,7 @@ func newTestSubManager(t *testing.T, mei *eventsmocks.PluginAll) (*subscriptionM mei.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("GetEvents", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Event{}, nil, nil).Maybe() mdi.On("GetOffset", mock.Anything, mock.Anything, mock.Anything).Return(&fftypes.Offset{RowID: 3333333, Current: 0}, nil).Maybe() - sm, err := newSubscriptionManager(ctx, mdi, mdm, newEventNotifier(ctx, "ut"), msh) + sm, err := newSubscriptionManager(ctx, mdi, mdm, newEventNotifier(ctx, "ut"), msh, txHelper) assert.NoError(t, err) sm.transports = map[string]events.Plugin{ "ut": mei, @@ -164,9 +166,10 @@ func TestRegisterEphemeralSubscriptionsFail(t *testing.T) { func TestSubManagerBadPlugin(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) config.Reset() config.Set(config.EventTransportsEnabled, []string{"!unknown!"}) - _, err := newSubscriptionManager(context.Background(), mdi, mdm, newEventNotifier(context.Background(), "ut"), nil) + _, err := newSubscriptionManager(context.Background(), mdi, mdm, newEventNotifier(context.Background(), "ut"), nil, txHelper) assert.Regexp(t, "FF10172", err) } diff --git a/internal/orchestrator/orchestrator.go b/internal/orchestrator/orchestrator.go index edd34c11a1..d94cfa499f 100644 --- a/internal/orchestrator/orchestrator.go +++ b/internal/orchestrator/orchestrator.go @@ -197,7 +197,6 @@ func (or *orchestrator) Init(ctx context.Context, cancelCtx context.CancelFunc) if err == nil { err = or.initNamespaces(ctx) } - or.txHelper = txcommon.NewTransactionHelper(or.database) // Bind together the blockchain interface callbacks, with the events manager or.bc.bi = or.blockchain or.bc.ei = or.events @@ -460,6 +459,10 @@ func (or *orchestrator) initComponents(ctx context.Context) (err error) { } } + if or.txHelper == nil { + or.txHelper = txcommon.NewTransactionHelper(or.database, or.data) + } + if or.identity == nil { or.identity, err = identity.NewIdentityManager(ctx, or.database, or.identityPlugin, or.blockchain, or.data) if err != nil { @@ -468,7 +471,7 @@ func (or *orchestrator) initComponents(ctx context.Context) (err error) { } if or.batch == nil { - or.batch, err = batch.NewBatchManager(ctx, or, or.database, or.data) + or.batch, err = batch.NewBatchManager(ctx, or, or.database, or.data, or.txHelper) if err != nil { return err } @@ -501,14 +504,14 @@ func (or *orchestrator) initComponents(ctx context.Context) (err error) { } if or.assets == nil { - or.assets, err = assets.NewAssetManager(ctx, or.database, or.identity, or.data, or.syncasync, or.broadcast, or.messaging, or.tokens, or.metrics, or.operations) + or.assets, err = assets.NewAssetManager(ctx, or.database, or.identity, or.data, or.syncasync, or.broadcast, or.messaging, or.tokens, or.metrics, or.operations, or.txHelper) if err != nil { return err } } if or.contracts == nil { - or.contracts, err = contracts.NewContractManager(ctx, or.database, or.broadcast, or.identity, or.blockchain, or.operations) + or.contracts, err = contracts.NewContractManager(ctx, or.database, or.data, or.broadcast, or.identity, or.blockchain, or.operations, or.txHelper) if err != nil { return err } @@ -517,7 +520,7 @@ func (or *orchestrator) initComponents(ctx context.Context) (err error) { or.definitions = definitions.NewDefinitionHandlers(or.database, or.blockchain, or.dataexchange, or.data, or.identity, or.broadcast, or.messaging, or.assets, or.contracts) if or.events == nil { - or.events, err = events.NewEventManager(ctx, or, or.sharedstorage, or.database, or.blockchain, or.identity, or.definitions, or.data, or.broadcast, or.messaging, or.assets, or.metrics) + or.events, err = events.NewEventManager(ctx, or, or.sharedstorage, or.database, or.blockchain, or.identity, or.definitions, or.data, or.broadcast, or.messaging, or.assets, or.metrics, or.txHelper) if err != nil { return err } diff --git a/internal/orchestrator/orchestrator_test.go b/internal/orchestrator/orchestrator_test.go index dc85133e12..98939f6ea1 100644 --- a/internal/orchestrator/orchestrator_test.go +++ b/internal/orchestrator/orchestrator_test.go @@ -516,6 +516,7 @@ func TestInitIdentityComponentFail(t *testing.T) { or := newTestOrchestrator() or.database = nil or.identity = nil + or.txHelper = nil err := or.initComponents(context.Background()) assert.Regexp(t, "FF10128", err) } diff --git a/internal/txcommon/event_enrich_test.go b/internal/txcommon/event_enrich_test.go index 9cfff79a80..63dc1623d3 100644 --- a/internal/txcommon/event_enrich_test.go +++ b/internal/txcommon/event_enrich_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -29,7 +30,8 @@ import ( func TestEnrichMessageConfirmed(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() // Setup the IDs @@ -54,7 +56,8 @@ func TestEnrichMessageConfirmed(t *testing.T) { func TestEnrichMessageFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() // Setup the IDs @@ -76,7 +79,8 @@ func TestEnrichMessageFail(t *testing.T) { func TestEnrichMessageRejected(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() // Setup the IDs @@ -101,7 +105,8 @@ func TestEnrichMessageRejected(t *testing.T) { func TestEnrichTxSubmitted(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() // Setup the IDs @@ -126,7 +131,8 @@ func TestEnrichTxSubmitted(t *testing.T) { func TestEnrichTxFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() // Setup the IDs @@ -148,7 +154,8 @@ func TestEnrichTxFail(t *testing.T) { func TestEnrichBlockchainEventSubmitted(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() // Setup the IDs @@ -173,7 +180,8 @@ func TestEnrichBlockchainEventSubmitted(t *testing.T) { func TestEnrichBlockchainEventFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() // Setup the IDs diff --git a/internal/txcommon/txcommon.go b/internal/txcommon/txcommon.go index d0b7fac37b..ffb71fd200 100644 --- a/internal/txcommon/txcommon.go +++ b/internal/txcommon/txcommon.go @@ -19,10 +19,14 @@ package txcommon import ( "context" "strings" + "time" + "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/karlseguin/ccache" ) type Helper interface { @@ -30,16 +34,45 @@ type Helper interface { PersistTransaction(ctx context.Context, ns string, id *fftypes.UUID, txType fftypes.TransactionType, blockchainTXID string) (valid bool, err error) AddBlockchainTX(ctx context.Context, id *fftypes.UUID, blockchainTXID string) error EnrichEvent(ctx context.Context, event *fftypes.Event) (*fftypes.EnrichedEvent, error) + GetTransactionByIDCached(ctx context.Context, id *fftypes.UUID) (*fftypes.Transaction, error) } type transactionHelper struct { - database database.Plugin + database database.Plugin + data data.Manager + transactionCache *ccache.Cache + transactionCacheTTL time.Duration } -func NewTransactionHelper(di database.Plugin) Helper { - return &transactionHelper{ +func NewTransactionHelper(di database.Plugin, dm data.Manager) Helper { + t := &transactionHelper{ database: di, + data: dm, } + t.transactionCache = ccache.New( + // We use a LRU cache with a size-aware max + ccache.Configure(). + MaxSize(config.GetByteSize(config.MessageCacheTTL)), + ) + return t +} + +func (t *transactionHelper) updateTransactionsCache(tx *fftypes.Transaction) { + t.transactionCache.Set(tx.ID.String(), tx, t.transactionCacheTTL) +} + +func (t *transactionHelper) GetTransactionByIDCached(ctx context.Context, id *fftypes.UUID) (*fftypes.Transaction, error) { + cached := t.transactionCache.Get(id.String()) + if cached != nil { + cached.Extend(t.transactionCacheTTL) + return cached.Value().(*fftypes.Transaction), nil + } + tx, err := t.database.GetTransactionByID(ctx, id) + if err != nil || tx == nil { + return tx, err + } + t.updateTransactionsCache(tx) + return tx, nil } // SubmitNewTransaction is called when there is a new transaction being submitted by the local node @@ -65,6 +98,7 @@ func (t *transactionHelper) SubmitNewTransaction(ctx context.Context, ns string, // PersistTransaction is called when we need to ensure a transaction exists in the DB, and optionally associate a new BlockchainTXID to it func (t *transactionHelper) PersistTransaction(ctx context.Context, ns string, id *fftypes.UUID, txType fftypes.TransactionType, blockchainTXID string) (valid bool, err error) { + // TODO: Consider if this can exploit caching tx, err := t.database.GetTransactionByID(ctx, id) if err != nil { return false, err @@ -91,15 +125,20 @@ func (t *transactionHelper) PersistTransaction(ctx context.Context, ns string, i return false, err } - } else if err = t.database.InsertTransaction(ctx, &fftypes.Transaction{ - ID: id, - Namespace: ns, - Type: txType, - BlockchainIDs: fftypes.NewFFStringArray(strings.ToLower(blockchainTXID)), - }); err != nil { - return false, err + } else { + tx = &fftypes.Transaction{ + ID: id, + Namespace: ns, + Type: txType, + BlockchainIDs: fftypes.NewFFStringArray(strings.ToLower(blockchainTXID)), + } + if err = t.database.InsertTransaction(ctx, tx); err != nil { + return false, err + } } + t.updateTransactionsCache(tx) + return true, nil } diff --git a/internal/txcommon/txcommon_test.go b/internal/txcommon/txcommon_test.go index 8576358ebd..6fe1358a96 100644 --- a/internal/txcommon/txcommon_test.go +++ b/internal/txcommon/txcommon_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" @@ -31,7 +32,8 @@ import ( func TestSubmitNewTransactionOK(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() var txidInserted *fftypes.UUID @@ -58,7 +60,8 @@ func TestSubmitNewTransactionOK(t *testing.T) { func TestSubmitNewTransactionFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() mdi.On("InsertTransaction", ctx, mock.Anything).Return(fmt.Errorf("pop")) @@ -73,7 +76,8 @@ func TestSubmitNewTransactionFail(t *testing.T) { func TestSubmitNewTransactionEventFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() mdi.On("InsertTransaction", ctx, mock.Anything).Return(nil) @@ -89,7 +93,8 @@ func TestSubmitNewTransactionEventFail(t *testing.T) { func TestPersistTransactionNew(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -113,7 +118,8 @@ func TestPersistTransactionNew(t *testing.T) { func TestPersistTransactionNewInserTFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -131,7 +137,8 @@ func TestPersistTransactionNewInserTFail(t *testing.T) { func TestPersistTransactionExistingAddBlockchainID(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -155,7 +162,8 @@ func TestPersistTransactionExistingAddBlockchainID(t *testing.T) { func TestPersistTransactionExistingUpdateFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -179,7 +187,8 @@ func TestPersistTransactionExistingUpdateFail(t *testing.T) { func TestPersistTransactionExistingNoChange(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -202,7 +211,8 @@ func TestPersistTransactionExistingNoChange(t *testing.T) { func TestPersistTransactionExistingNoBlockchainID(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -225,7 +235,8 @@ func TestPersistTransactionExistingNoBlockchainID(t *testing.T) { func TestPersistTransactionExistingLookupFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -242,7 +253,8 @@ func TestPersistTransactionExistingLookupFail(t *testing.T) { func TestPersistTransactionExistingMismatchNS(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -265,7 +277,8 @@ func TestPersistTransactionExistingMismatchNS(t *testing.T) { func TestPersistTransactionExistingMismatchType(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -288,7 +301,8 @@ func TestPersistTransactionExistingMismatchType(t *testing.T) { func TestAddBlockchainTX(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -318,7 +332,8 @@ func TestAddBlockchainTX(t *testing.T) { func TestAddBlockchainTXGetFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -334,7 +349,8 @@ func TestAddBlockchainTXGetFail(t *testing.T) { func TestAddBlockchainTXUpdateFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -357,7 +373,8 @@ func TestAddBlockchainTXUpdateFail(t *testing.T) { func TestAddBlockchainTXUnchanged(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -375,3 +392,31 @@ func TestAddBlockchainTXUnchanged(t *testing.T) { mdi.AssertExpectations(t) } + +func TestGetTransactionByIDCached(t *testing.T) { + + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) + ctx := context.Background() + + txid := fftypes.NewUUID() + mdi.On("GetTransactionByID", ctx, txid).Return(&fftypes.Transaction{ + ID: txid, + Namespace: "ns1", + Type: fftypes.TransactionTypeContractInvoke, + Created: fftypes.Now(), + BlockchainIDs: fftypes.FFStringArray{"0x111111"}, + }, nil).Once() + + tx, err := txHelper.GetTransactionByIDCached(ctx, txid) + assert.NoError(t, err) + assert.Equal(t, txid, tx.ID) + + tx, err = txHelper.GetTransactionByIDCached(ctx, txid) + assert.NoError(t, err) + assert.Equal(t, txid, tx.ID) + + mdi.AssertExpectations(t) + +} diff --git a/mocks/txcommonmocks/helper.go b/mocks/txcommonmocks/helper.go index e476e08d20..18c4a61333 100644 --- a/mocks/txcommonmocks/helper.go +++ b/mocks/txcommonmocks/helper.go @@ -51,6 +51,29 @@ func (_m *Helper) EnrichEvent(ctx context.Context, event *fftypes.Event) (*fftyp return r0, r1 } +// GetTransactionByIDCached provides a mock function with given fields: ctx, id +func (_m *Helper) GetTransactionByIDCached(ctx context.Context, id *fftypes.UUID) (*fftypes.Transaction, error) { + ret := _m.Called(ctx, id) + + var r0 *fftypes.Transaction + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) *fftypes.Transaction); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.Transaction) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // PersistTransaction provides a mock function with given fields: ctx, ns, id, txType, blockchainTXID func (_m *Helper) PersistTransaction(ctx context.Context, ns string, id *fftypes.UUID, txType fftypes.FFEnum, blockchainTXID string) (bool, error) { ret := _m.Called(ctx, ns, id, txType, blockchainTXID) From c8eda29af0250d646c126ea637ec43cdc1bfde66 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Thu, 10 Mar 2022 13:04:40 -0500 Subject: [PATCH 03/13] Use cache in event enrichment Signed-off-by: Peter Broadhurst --- internal/events/event_dispatcher_test.go | 44 +++++++++++++----------- internal/txcommon/event_enrich.go | 4 +-- internal/txcommon/event_enrich_test.go | 10 +++--- 3 files changed, 31 insertions(+), 27 deletions(-) diff --git a/internal/events/event_dispatcher_test.go b/internal/events/event_dispatcher_test.go index 0b1f58041c..5a43a2b90f 100644 --- a/internal/events/event_dispatcher_test.go +++ b/internal/events/event_dispatcher_test.go @@ -159,6 +159,7 @@ func TestEventDispatcherReadAheadOutOfOrderAcks(t *testing.T) { go ed.deliverEvents() mdi := ed.database.(*databasemocks.Plugin) mei := ed.transport.(*eventsmocks.PluginAll) + mdm := ed.data.(*datamocks.Manager) eventDeliveries := make(chan *fftypes.EventDelivery) deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -186,18 +187,18 @@ func TestEventDispatcherReadAheadOutOfOrderAcks(t *testing.T) { offsetUpdates <- v.(int64) } // Setup enrichment - mdi.On("GetMessageByID", mock.Anything, ref1).Return(&fftypes.Message{ + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(&fftypes.Message{ Header: fftypes.MessageHeader{ID: ref1}, - }, nil) - mdi.On("GetMessageByID", mock.Anything, ref2).Return(&fftypes.Message{ + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref2).Return(&fftypes.Message{ Header: fftypes.MessageHeader{ID: ref2}, - }, nil) - mdi.On("GetMessageByID", mock.Anything, ref3).Return(&fftypes.Message{ + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref3).Return(&fftypes.Message{ Header: fftypes.MessageHeader{ID: ref3}, - }, nil) - mdi.On("GetMessageByID", mock.Anything, ref4).Return(&fftypes.Message{ + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref4).Return(&fftypes.Message{ Header: fftypes.MessageHeader{ID: ref4}, - }, nil) + }, nil, true, nil) // Deliver a batch of messages batch1Done := make(chan struct{}) @@ -242,6 +243,7 @@ func TestEventDispatcherReadAheadOutOfOrderAcks(t *testing.T) { mdi.AssertExpectations(t) mei.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestEventDispatcherNoReadAheadInOrder(t *testing.T) { @@ -260,6 +262,7 @@ func TestEventDispatcherNoReadAheadInOrder(t *testing.T) { go ed.deliverEvents() mdi := ed.database.(*databasemocks.Plugin) + mdm := ed.data.(*datamocks.Manager) mei := ed.transport.(*eventsmocks.PluginAll) eventDeliveries := make(chan *fftypes.EventDelivery) @@ -279,18 +282,18 @@ func TestEventDispatcherNoReadAheadInOrder(t *testing.T) { ev4 := fftypes.NewUUID() // Setup enrichment - mdi.On("GetMessageByID", mock.Anything, ref1).Return(&fftypes.Message{ + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(&fftypes.Message{ Header: fftypes.MessageHeader{ID: ref1}, - }, nil) - mdi.On("GetMessageByID", mock.Anything, ref2).Return(&fftypes.Message{ + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref2).Return(&fftypes.Message{ Header: fftypes.MessageHeader{ID: ref2}, - }, nil) - mdi.On("GetMessageByID", mock.Anything, ref3).Return(&fftypes.Message{ + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref3).Return(&fftypes.Message{ Header: fftypes.MessageHeader{ID: ref3}, - }, nil) - mdi.On("GetMessageByID", mock.Anything, ref4).Return(&fftypes.Message{ + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref4).Return(&fftypes.Message{ Header: fftypes.MessageHeader{ID: ref4}, - }, nil) + }, nil, true, nil) // Deliver a batch of messages batch1Done := make(chan struct{}) @@ -331,6 +334,7 @@ func TestEventDispatcherNoReadAheadInOrder(t *testing.T) { mdi.AssertExpectations(t) mei.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestEventDispatcherChangeEvents(t *testing.T) { @@ -407,8 +411,8 @@ func TestEnrichEventsFailGetMessages(t *testing.T) { ed, cancel := newTestEventDispatcher(sub) defer cancel() - mdi := ed.database.(*databasemocks.Plugin) - mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + mdm := ed.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(nil, nil, false, fmt.Errorf("pop")) id1 := fftypes.NewUUID() _, err := ed.enrichEvents([]fftypes.LocallySequenced{&fftypes.Event{ID: id1, Type: fftypes.EventTypeMessageConfirmed}}) @@ -843,8 +847,8 @@ func TestBufferedDeliveryEnrichFail(t *testing.T) { ed, cancel := newTestEventDispatcher(sub) defer cancel() - mdi := ed.database.(*databasemocks.Plugin) - mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + mdm := ed.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(nil, nil, false, fmt.Errorf("pop")) repoll, err := ed.bufferedDelivery([]fftypes.LocallySequenced{&fftypes.Event{ID: fftypes.NewUUID(), Type: fftypes.EventTypeMessageConfirmed}}) assert.False(t, repoll) diff --git a/internal/txcommon/event_enrich.go b/internal/txcommon/event_enrich.go index 8956f74688..145acbcdd2 100644 --- a/internal/txcommon/event_enrich.go +++ b/internal/txcommon/event_enrich.go @@ -29,13 +29,13 @@ func (t *transactionHelper) EnrichEvent(ctx context.Context, event *fftypes.Even switch event.Type { case fftypes.EventTypeTransactionSubmitted: - tx, err := t.database.GetTransactionByID(ctx, event.Reference) + tx, err := t.GetTransactionByIDCached(ctx, event.Reference) if err != nil { return nil, err } e.Transaction = tx case fftypes.EventTypeMessageConfirmed, fftypes.EventTypeMessageRejected: - msg, err := t.database.GetMessageByID(ctx, event.Reference) + msg, _, _, err := t.data.GetMessageWithDataCached(ctx, event.Reference) if err != nil { return nil, err } diff --git a/internal/txcommon/event_enrich_test.go b/internal/txcommon/event_enrich_test.go index 63dc1623d3..d0ee00f968 100644 --- a/internal/txcommon/event_enrich_test.go +++ b/internal/txcommon/event_enrich_test.go @@ -39,9 +39,9 @@ func TestEnrichMessageConfirmed(t *testing.T) { ev1 := fftypes.NewUUID() // Setup enrichment - mdi.On("GetMessageByID", mock.Anything, ref1).Return(&fftypes.Message{ + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(&fftypes.Message{ Header: fftypes.MessageHeader{ID: ref1}, - }, nil) + }, nil, true, nil) event := &fftypes.Event{ ID: ev1, @@ -65,7 +65,7 @@ func TestEnrichMessageFail(t *testing.T) { ev1 := fftypes.NewUUID() // Setup enrichment - mdi.On("GetMessageByID", mock.Anything, ref1).Return(nil, fmt.Errorf("pop")) + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(nil, nil, false, fmt.Errorf("pop")) event := &fftypes.Event{ ID: ev1, @@ -88,9 +88,9 @@ func TestEnrichMessageRejected(t *testing.T) { ev1 := fftypes.NewUUID() // Setup enrichment - mdi.On("GetMessageByID", mock.Anything, ref1).Return(&fftypes.Message{ + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(&fftypes.Message{ Header: fftypes.MessageHeader{ID: ref1}, - }, nil) + }, nil, true, nil) event := &fftypes.Event{ ID: ev1, From f8920eda2a38d93a2d53c774f600b79d291d24f8 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Thu, 10 Mar 2022 14:29:49 -0500 Subject: [PATCH 04/13] Remove dup batch processor listing Signed-off-by: Peter Broadhurst --- internal/batch/batch_manager.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index b40c3595fc..1b110603ae 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -347,10 +347,14 @@ func (bm *batchManager) getProcessors() []*batchProcessor { bm.dispatcherMux.Lock() defer bm.dispatcherMux.Unlock() + exists := make(map[*batchProcessor]bool) var processors []*batchProcessor for _, d := range bm.dispatchers { for _, p := range d.processors { - processors = append(processors, p) + if !exists[p] { + processors = append(processors, p) + exists[p] = true + } } } return processors From fb901cfe7a365819519e422fd086e4f763780344 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Thu, 10 Mar 2022 16:29:47 -0500 Subject: [PATCH 05/13] Add DB tuning to keep conns option, and correct cache miss on group cache Signed-off-by: Peter Broadhurst --- internal/database/sqlcommon/config.go | 13 +++++++-- internal/database/sqlcommon/sqlcommon.go | 8 ++++++ internal/privatemessaging/groupmanager.go | 4 +-- .../privatemessaging/groupmanager_test.go | 12 ++++----- internal/privatemessaging/message_test.go | 12 +++------ internal/privatemessaging/privatemessaging.go | 2 +- internal/privatemessaging/recipients.go | 7 +++-- internal/privatemessaging/recipients_test.go | 10 +++---- pkg/fftypes/transaction.go | 6 +++++ pkg/fftypes/transaction_test.go | 27 +++++++++++++++++++ 10 files changed, 71 insertions(+), 30 deletions(-) create mode 100644 pkg/fftypes/transaction_test.go diff --git a/internal/database/sqlcommon/config.go b/internal/database/sqlcommon/config.go index c27a46244b..700ba03337 100644 --- a/internal/database/sqlcommon/config.go +++ b/internal/database/sqlcommon/config.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -31,6 +31,12 @@ const ( SQLConfDatasourceURL = "url" // SQLConfMaxConnections maximum connections to the database SQLConfMaxConnections = "maxConns" + // SQLConfMaxConnIdleTime maximum connections to the database + SQLConfMaxConnIdleTime = "maxConnIdleTime" + // SQLConfMaxIdleConns maximum connections to the database + SQLConfMaxIdleConns = "maxIdleConns" + // SQLConfMaxConnLifetime maximum connections to the database + SQLConfMaxConnLifetime = "maxConnLifetime" ) const ( @@ -41,5 +47,8 @@ func (s *SQLCommon) InitPrefix(provider Provider, prefix config.Prefix) { prefix.AddKnownKey(SQLConfMigrationsAuto, false) prefix.AddKnownKey(SQLConfDatasourceURL) prefix.AddKnownKey(SQLConfMigrationsDirectory, fmt.Sprintf(defaultMigrationsDirectoryTemplate, provider.MigrationsDir())) - prefix.AddKnownKey(SQLConfMaxConnections) // some providers may set a default + prefix.AddKnownKey(SQLConfMaxConnections) // some providers set a default + prefix.AddKnownKey(SQLConfMaxConnIdleTime, "1m") + prefix.AddKnownKey(SQLConfMaxIdleConns) // defaults to the max connections + prefix.AddKnownKey(SQLConfMaxConnLifetime) } diff --git a/internal/database/sqlcommon/sqlcommon.go b/internal/database/sqlcommon/sqlcommon.go index 34598f48de..8e3cec5152 100644 --- a/internal/database/sqlcommon/sqlcommon.go +++ b/internal/database/sqlcommon/sqlcommon.go @@ -69,6 +69,14 @@ func (s *SQLCommon) Init(ctx context.Context, provider Provider, prefix config.P connLimit := prefix.GetInt(SQLConfMaxConnections) if connLimit > 0 { s.db.SetMaxOpenConns(connLimit) + s.db.SetConnMaxIdleTime(prefix.GetDuration(SQLConfMaxConnIdleTime)) + maxIdleConns := prefix.GetInt(SQLConfMaxIdleConns) + if maxIdleConns <= 0 { + // By default we rely on the idle time, rather than a maximum number of conns to leave open + maxIdleConns = connLimit + } + s.db.SetMaxIdleConns(maxIdleConns) + s.db.SetConnMaxLifetime(prefix.GetDuration(SQLConfMaxConnLifetime)) } if connLimit > 1 { capabilities.Concurrency = true diff --git a/internal/privatemessaging/groupmanager.go b/internal/privatemessaging/groupmanager.go index 10d260ba61..fdc251b57d 100644 --- a/internal/privatemessaging/groupmanager.go +++ b/internal/privatemessaging/groupmanager.go @@ -157,7 +157,7 @@ func (gm *groupManager) GetGroups(ctx context.Context, filter database.AndFilter return gm.database.GetGroups(ctx, filter) } -func (gm *groupManager) getGroupNodes(ctx context.Context, groupHash *fftypes.Bytes32) (*fftypes.Group, []*fftypes.Identity, error) { +func (gm *groupManager) getGroupNodes(ctx context.Context, groupHash *fftypes.Bytes32, allowNil bool) (*fftypes.Group, []*fftypes.Identity, error) { if cached := gm.groupCache.Get(groupHash.String()); cached != nil { cached.Extend(gm.groupCacheTTL) @@ -166,7 +166,7 @@ func (gm *groupManager) getGroupNodes(ctx context.Context, groupHash *fftypes.By } group, err := gm.database.GetGroupByHash(ctx, groupHash) - if err != nil { + if err != nil || (allowNil && group == nil) { return nil, nil, err } if group == nil { diff --git a/internal/privatemessaging/groupmanager_test.go b/internal/privatemessaging/groupmanager_test.go index 559f762dd8..993f647ca2 100644 --- a/internal/privatemessaging/groupmanager_test.go +++ b/internal/privatemessaging/groupmanager_test.go @@ -416,13 +416,13 @@ func TestGetGroupNodesCache(t *testing.T) { }, }, nil).Once() - g, nodes, err := pm.getGroupNodes(pm.ctx, group.Hash) + g, nodes, err := pm.getGroupNodes(pm.ctx, group.Hash, false) assert.NoError(t, err) assert.Equal(t, *node1, *nodes[0].ID) assert.Equal(t, *group.Hash, *g.Hash) // Note this validates the cache as we only mocked the calls once - g, nodes, err = pm.getGroupNodes(pm.ctx, group.Hash) + g, nodes, err = pm.getGroupNodes(pm.ctx, group.Hash, false) assert.NoError(t, err) assert.Equal(t, *node1, *nodes[0].ID) assert.Equal(t, *group.Hash, *g.Hash) @@ -436,7 +436,7 @@ func TestGetGroupNodesGetGroupFail(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - _, _, err := pm.getGroupNodes(pm.ctx, groupID) + _, _, err := pm.getGroupNodes(pm.ctx, groupID, false) assert.EqualError(t, err, "pop") } @@ -448,7 +448,7 @@ func TestGetGroupNodesGetGroupNotFound(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(nil, nil) - _, _, err := pm.getGroupNodes(pm.ctx, groupID) + _, _, err := pm.getGroupNodes(pm.ctx, groupID, false) assert.Regexp(t, "FF10226", err) } @@ -470,7 +470,7 @@ func TestGetGroupNodesNodeLookupFail(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(group, nil).Once() mdi.On("GetIdentityByID", pm.ctx, node1).Return(nil, fmt.Errorf("pop")).Once() - _, _, err := pm.getGroupNodes(pm.ctx, group.Hash) + _, _, err := pm.getGroupNodes(pm.ctx, group.Hash, false) assert.EqualError(t, err, "pop") } @@ -491,7 +491,7 @@ func TestGetGroupNodesNodeLookupNotFound(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(group, nil).Once() mdi.On("GetIdentityByID", pm.ctx, node1).Return(nil, nil).Once() - _, _, err := pm.getGroupNodes(pm.ctx, group.Hash) + _, _, err := pm.getGroupNodes(pm.ctx, group.Hash, false) assert.Regexp(t, "FF10224", err) } diff --git a/internal/privatemessaging/message_test.go b/internal/privatemessaging/message_test.go index 6b11dbb6ec..96fee24bb1 100644 --- a/internal/privatemessaging/message_test.go +++ b/internal/privatemessaging/message_test.go @@ -91,9 +91,7 @@ func TestSendConfirmMessageE2EOk(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{}, nil, nil).Once() mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil).Once() - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{ - {Hash: fftypes.NewRandB32()}, - }, nil, nil).Once() + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() retMsg := &fftypes.Message{ Header: fftypes.MessageHeader{ @@ -234,9 +232,7 @@ func TestResolveAndSendBadInlineData(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil).Once() - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{ - {Hash: fftypes.NewRandB32()}, - }, nil, nil).Once() + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mdm := pm.data.(*datamocks.Manager) mdm.On("ResolveInlineDataPrivate", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) @@ -353,9 +349,7 @@ func TestMessagePrepare(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil).Once() - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{ - {Hash: fftypes.NewRandB32()}, - }, nil, nil).Once() + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mdm := pm.data.(*datamocks.Manager) mdm.On("ResolveInlineDataPrivate", pm.ctx, mock.Anything).Return(nil) diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index 6e61bd9297..0bb2c05f3f 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -177,7 +177,7 @@ func (pm *privateMessaging) dispatchBatchCommon(ctx context.Context, state *batc } // Retrieve the group - group, nodes, err := pm.groupManager.getGroupNodes(ctx, batch.Group) + group, nodes, err := pm.groupManager.getGroupNodes(ctx, batch.Group, false /* fail if not found */) if err != nil { return err } diff --git a/internal/privatemessaging/recipients.go b/internal/privatemessaging/recipients.go index 4bffb9703b..ff693ec621 100644 --- a/internal/privatemessaging/recipients.go +++ b/internal/privatemessaging/recipients.go @@ -178,13 +178,12 @@ func (pm *privateMessaging) findOrGenerateGroup(ctx context.Context, in *fftypes } newCandidate.Seal() - filter := database.GroupQueryFactory.NewFilterLimit(ctx, 1).Eq("hash", newCandidate.Hash) - groups, _, err := pm.database.GetGroups(ctx, filter) + group, _, err = pm.getGroupNodes(ctx, newCandidate.Hash, true) if err != nil { return nil, false, err } - if len(groups) > 0 { - return groups[0], false, nil + if group != nil { + return group, false, nil } return newCandidate, true, nil } diff --git a/internal/privatemessaging/recipients_test.go b/internal/privatemessaging/recipients_test.go index 47ba503e1d..33ff8daaa5 100644 --- a/internal/privatemessaging/recipients_test.go +++ b/internal/privatemessaging/recipients_test.go @@ -43,7 +43,7 @@ func TestResolveMemberListNewGroupE2E(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{remoteNode}, nil, nil).Once() mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil).Once() - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{}, nil, nil) + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(nil, nil).Once() mdi.On("UpsertGroup", pm.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) mim := pm.identity.(*identitymanagermocks.Manager) @@ -114,9 +114,7 @@ func TestResolveMemberListExistingGroup(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil) - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{ - {Hash: fftypes.NewRandB32()}, - }, nil, nil) + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mim := pm.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookup", pm.ctx, "org1").Return(localOrg, false, nil) mim.On("GetNodeOwnerOrg", pm.ctx).Return(localNode, nil) @@ -182,7 +180,7 @@ func TestResolveMemberListGetGroupsFail(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil) - mdi.On("GetGroups", pm.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) mim := pm.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookup", pm.ctx, "org1").Return(localOrg, false, nil) mim.On("GetNodeOwnerOrg", pm.ctx).Return(localNode, nil) @@ -321,7 +319,7 @@ func TestResolveMemberNodeOwnedParentOrg(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{}, nil, nil).Once() mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil) - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{{Hash: fftypes.NewRandB32()}}, nil, nil) + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mim := pm.identity.(*identitymanagermocks.Manager) mim.On("GetNodeOwnerOrg", pm.ctx).Return(parentOrg, nil) mim.On("CachedIdentityLookup", pm.ctx, "org1").Return(childOrg, false, nil) diff --git a/pkg/fftypes/transaction.go b/pkg/fftypes/transaction.go index 0489766426..36ccd06847 100644 --- a/pkg/fftypes/transaction.go +++ b/pkg/fftypes/transaction.go @@ -18,6 +18,8 @@ package fftypes type TransactionType = FFEnum +const transactionBaseSizeEstimate = int64(256) + var ( // TransactionTypeNone deprecated - replaced by TransactionTypeUnpinned TransactionTypeNone TransactionType = ffEnum("txtype", "none") @@ -76,3 +78,7 @@ type TransactionStatus struct { Status OpStatus `json:"status"` Details []*TransactionStatusDetails `json:"details"` } + +func (tx *Transaction) Size() int64 { + return transactionBaseSizeEstimate // currently a static size assessment for caching +} diff --git a/pkg/fftypes/transaction_test.go b/pkg/fftypes/transaction_test.go new file mode 100644 index 0000000000..366f886c3c --- /dev/null +++ b/pkg/fftypes/transaction_test.go @@ -0,0 +1,27 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fftypes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTXSizeEstimate(t *testing.T) { + assert.Equal(t, transactionBaseSizeEstimate, (&Transaction{}).Size()) +} From 887839e7af925e0c681e72bae955b8acd28a6995 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 11 Mar 2022 00:21:05 -0500 Subject: [PATCH 06/13] Explore more workers with better latency Signed-off-by: Peter Broadhurst --- internal/config/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 01d5c5d742..624dfb3fca 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -351,8 +351,8 @@ func Reset() { viper.SetDefault(string(MessageCacheSize), "50Mb") viper.SetDefault(string(MessageCacheTTL), "5m") viper.SetDefault(string(MessageWriterBatchMaxInserts), 200) - viper.SetDefault(string(MessageWriterBatchTimeout), "75ms") - viper.SetDefault(string(MessageWriterCount), 3) + viper.SetDefault(string(MessageWriterBatchTimeout), "50ms") + viper.SetDefault(string(MessageWriterCount), 10) viper.SetDefault(string(NamespacesDefault), "default") viper.SetDefault(string(NamespacesPredefined), fftypes.JSONObjectArray{{"name": "default", "description": "Default predefined namespace"}}) viper.SetDefault(string(OrchestratorStartupAttempts), 5) From 3a0f384aaaa3bf4d3681293187683d115d564d9d Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 11 Mar 2022 00:30:50 -0500 Subject: [PATCH 07/13] Explore more workers with better latency Signed-off-by: Peter Broadhurst --- internal/config/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 624dfb3fca..f435753d0c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -351,8 +351,8 @@ func Reset() { viper.SetDefault(string(MessageCacheSize), "50Mb") viper.SetDefault(string(MessageCacheTTL), "5m") viper.SetDefault(string(MessageWriterBatchMaxInserts), 200) - viper.SetDefault(string(MessageWriterBatchTimeout), "50ms") - viper.SetDefault(string(MessageWriterCount), 10) + viper.SetDefault(string(MessageWriterBatchTimeout), "25ms") + viper.SetDefault(string(MessageWriterCount), 5) viper.SetDefault(string(NamespacesDefault), "default") viper.SetDefault(string(NamespacesPredefined), fftypes.JSONObjectArray{{"name": "default", "description": "Default predefined namespace"}}) viper.SetDefault(string(OrchestratorStartupAttempts), 5) From f56bbf1a4d0e5041d0d5f6f5128ccda8ea8f1d54 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 11 Mar 2022 11:26:46 -0500 Subject: [PATCH 08/13] Ensure we request blob public refs Signed-off-by: Peter Broadhurst --- internal/events/aggregator.go | 12 +++++++----- internal/events/aggregator_test.go | 14 +++++++------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/internal/events/aggregator.go b/internal/events/aggregator.go index 539ca72265..a8f3144ecb 100644 --- a/internal/events/aggregator.go +++ b/internal/events/aggregator.go @@ -337,11 +337,13 @@ func (ag *aggregator) checkOnchainConsistency(ctx context.Context, msg *fftypes. func (ag *aggregator) processMessage(ctx context.Context, manifest *fftypes.BatchManifest, pin *fftypes.Pin, msgBaseIndex int64, msgEntry *fftypes.MessageManifestEntry, state *batchState) (err error) { l := log.L(ctx) - var cros []data.CacheReadOption + var cro data.CacheReadOption if pin.Masked { - cros = []data.CacheReadOption{data.CRORequirePins} + cro = data.CRORequirePins + } else { + cro = data.CRORequirePublicBlobRefs } - msg, data, dataAvailable, err := ag.data.GetMessageWithDataCached(ctx, msgEntry.ID, cros...) + msg, data, dataAvailable, err := ag.data.GetMessageWithDataCached(ctx, msgEntry.ID, cro) if err != nil { return err } @@ -534,14 +536,14 @@ func (ag *aggregator) resolveBlobs(ctx context.Context, data fftypes.DataArray) return false, err } if blob != nil { - l.Debugf("Blob '%s' downloaded from shared storage to local DX with ref '%s'", blob.Hash, blob.PayloadRef) + l.Debugf("Blob '%s' for data %s downloaded from shared storage to local DX with ref '%s'", blob.Hash, d.ID, blob.PayloadRef) continue } } // If we've reached here, the data isn't available yet. // This isn't an error, we just need to wait for it to arrive. - l.Debugf("Blob '%s' not available", d.Blob.Hash) + l.Debugf("Blob '%s' not available for data %s", d.Blob.Hash, d.ID) return false, nil } diff --git a/internal/events/aggregator_test.go b/internal/events/aggregator_test.go index fe338608bb..eefec9d436 100644 --- a/internal/events/aggregator_test.go +++ b/internal/events/aggregator_test.go @@ -428,7 +428,7 @@ func TestAggregationBroadcast(t *testing.T) { // Do not resolve any pins earlier mdi.On("GetPins", mock.Anything, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) // Validate the message is ok - mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePublicBlobRefs).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) // Insert the confirmed event mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { @@ -521,7 +521,7 @@ func TestAggregationMigratedBroadcast(t *testing.T) { // Do not resolve any pins earlier mdi.On("GetPins", mock.Anything, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) // Validate the message is ok - mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePublicBlobRefs).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) // Insert the confirmed event mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { @@ -841,7 +841,7 @@ func TestProcessSkipDupMsg(t *testing.T) { mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(batch.Payload.Messages[0], nil, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePublicBlobRefs).Return(batch.Payload.Messages[0], nil, true, nil) err := ag.processPins(ag.ctx, []*fftypes.Pin{ {Sequence: 12345, Batch: batchID, Index: 0, Hash: fftypes.NewRandB32()}, @@ -879,7 +879,7 @@ func TestProcessMsgFailGetPins(t *testing.T) { mdi.On("GetPins", mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(batch.Payload.Messages[0], nil, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePublicBlobRefs).Return(batch.Payload.Messages[0], nil, true, nil) err := ag.processPins(ag.ctx, []*fftypes.Pin{ {Sequence: 12345, Batch: batchID, Index: 0, Hash: fftypes.NewRandB32()}, @@ -1010,7 +1010,7 @@ func TestProcessMsgFailDispatch(t *testing.T) { } mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(msg, nil, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePublicBlobRefs).Return(msg, nil, true, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) @@ -1572,8 +1572,8 @@ func TestDispatchBroadcastQueuesLaterDispatch(t *testing.T) { mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID).Return(msg1, fftypes.DataArray{}, true, nil).Once() - mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID).Return(msg2, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID, data.CRORequirePublicBlobRefs).Return(msg1, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID, data.CRORequirePublicBlobRefs).Return(msg2, fftypes.DataArray{}, true, nil).Once() mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetPins", ag.ctx, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) From 3190e4791bfc05de61ea57a670d7007ca6dc3cbc Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 11 Mar 2022 12:24:39 -0500 Subject: [PATCH 09/13] Move publish of blobs to batch processor routine Signed-off-by: Peter Broadhurst --- internal/broadcast/definition.go | 6 +- internal/broadcast/manager.go | 45 +++-- internal/broadcast/manager_test.go | 207 +++++++++++++++++----- internal/broadcast/message.go | 11 +- internal/broadcast/message_test.go | 145 +-------------- internal/broadcast/operations.go | 2 + internal/data/data_manager.go | 65 +++---- internal/data/data_manager_test.go | 93 +++++----- internal/data/message_writer.go | 15 +- internal/data/message_writer_test.go | 4 +- internal/privatemessaging/message.go | 2 +- internal/privatemessaging/message_test.go | 14 +- internal/restclient/ffresty.go | 10 +- mocks/datamocks/manager.go | 23 +-- 14 files changed, 293 insertions(+), 349 deletions(-) diff --git a/internal/broadcast/definition.go b/internal/broadcast/definition.go index 7778f99eed..c466fecea5 100644 --- a/internal/broadcast/definition.go +++ b/internal/broadcast/definition.go @@ -87,10 +87,8 @@ func (bm *broadcastManager) broadcastDefinitionCommon(ctx context.Context, ns st }, }, }, - ResolvedData: data.Resolved{ - NewData: fftypes.DataArray{d}, - AllData: fftypes.DataArray{d}, - }, + NewData: fftypes.DataArray{d}, + AllData: fftypes.DataArray{d}, } // Broadcast the message diff --git a/internal/broadcast/manager.go b/internal/broadcast/manager.go index 942211cf02..82674a4ec9 100644 --- a/internal/broadcast/manager.go +++ b/internal/broadcast/manager.go @@ -122,6 +122,12 @@ func (bm *broadcastManager) Name() string { } func (bm *broadcastManager) dispatchBatch(ctx context.Context, state *batch.DispatchState) error { + + // Ensure all the blobs are published + if err := bm.publishBlobs(ctx, state.Payload.Data); err != nil { + return err + } + // The completed SharedStorage upload op := fftypes.NewOperation( bm.sharedstorage, @@ -144,21 +150,32 @@ func (bm *broadcastManager) dispatchBatch(ctx context.Context, state *batch.Disp return bm.batchpin.SubmitPinnedBatch(ctx, &state.Persisted, state.Pins) } -func (bm *broadcastManager) publishBlobs(ctx context.Context, newMsg *data.NewMessage) error { - for _, d := range newMsg.ResolvedData.DataToPublish { - // Stream from the local data exchange ... - reader, err := bm.exchange.DownloadBLOB(ctx, d.Blob.PayloadRef) - if err != nil { - return i18n.WrapError(ctx, err, i18n.MsgDownloadBlobFailed, d.Blob.PayloadRef) - } - defer reader.Close() - - // ... to the shared storage - d.Data.Blob.Public, err = bm.sharedstorage.PublishData(ctx, reader) - if err != nil { - return err +func (bm *broadcastManager) publishBlobs(ctx context.Context, data fftypes.DataArray) error { + for _, d := range data { + // We only need to send a blob if there is one, and it's not been uploaded to the shared storage + if d.Blob != nil && d.Blob.Hash != nil && d.Blob.Public == "" { + blob, err := bm.database.GetBlobMatchingHash(ctx, d.Blob.Hash) + if err != nil { + return err + } + if blob == nil { + return i18n.NewError(ctx, i18n.MsgBlobNotFound, d.Blob.Hash) + } + + // Stream from the local data exchange ... + reader, err := bm.exchange.DownloadBLOB(ctx, blob.PayloadRef) + if err != nil { + return i18n.WrapError(ctx, err, i18n.MsgDownloadBlobFailed, blob.PayloadRef) + } + defer reader.Close() + + // ... to the shared storage + d.Blob.Public, err = bm.sharedstorage.PublishData(ctx, reader) + if err != nil { + return err + } + log.L(ctx).Infof("Published blob with hash '%s' for data '%s' to shared storage: '%s'", d.Blob.Hash, d.ID, d.Blob.Public) } - log.L(ctx).Infof("Published blob with hash '%s' for data '%s' to shared storage: '%s'", d.Blob.Hash, d.Data.ID, d.Data.Blob.Public) } return nil diff --git a/internal/broadcast/manager_test.go b/internal/broadcast/manager_test.go index 818e4fb019..6e95c3c948 100644 --- a/internal/broadcast/manager_test.go +++ b/internal/broadcast/manager_test.go @@ -121,10 +121,8 @@ func TestBroadcastMessageGood(t *testing.T) { }, }, }, - ResolvedData: data.Resolved{ - AllData: fftypes.DataArray{ - {ID: dataID, Hash: dataHash}, - }, + AllData: fftypes.DataArray{ + {ID: dataID, Hash: dataHash}, }, } @@ -170,6 +168,31 @@ func TestBroadcastMessageBad(t *testing.T) { } +func TestDispatchBatchBlobsFaill(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + blobHash := fftypes.NewRandB32() + state := &batch.DispatchState{ + Payload: fftypes.BatchPayload{ + Data: []*fftypes.Data{ + {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ + Hash: blobHash, + }}, + }, + }, + Pins: []*fftypes.Bytes32{fftypes.NewRandB32()}, + } + + mdi := bm.database.(*databasemocks.Plugin) + mdi.On("GetBlobMatchingHash", bm.ctx, blobHash).Return(nil, fmt.Errorf("pop")) + + err := bm.dispatchBatch(bm.ctx, state) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + func TestDispatchBatchInsertOpFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() @@ -276,81 +299,173 @@ func TestDispatchBatchSubmitBroadcastFail(t *testing.T) { mom.AssertExpectations(t) } +func TestPublishBlobsPublishOk(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + mdx := bm.exchange.(*dataexchangemocks.Plugin) + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mdi := bm.database.(*databasemocks.Plugin) + + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + PayloadRef: "blob/1", + } + + var capturedReader io.ReadCloser + ctx := context.Background() + mdi.On("GetBlobMatchingHash", ctx, blob.Hash).Return(blob, nil) + mdx.On("DownloadBLOB", ctx, "blob/1").Return(ioutil.NopCloser(bytes.NewReader([]byte(`some data`))), nil) + mps.On("PublishData", ctx, mock.MatchedBy(func(reader io.ReadCloser) bool { + capturedReader = reader + return true + })).Return("payload-ref1", nil) + + data := &fftypes.Data{ + ID: fftypes.NewUUID(), + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + }, + } + + err := bm.publishBlobs(ctx, fftypes.DataArray{data}) + assert.NoError(t, err) + assert.Equal(t, "payload-ref1", data.Blob.Public) + + b, err := ioutil.ReadAll(capturedReader) + assert.NoError(t, err) + assert.Equal(t, "some data", string(b)) + + mdi.AssertExpectations(t) + mdx.AssertExpectations(t) + mps.AssertExpectations(t) + +} + func TestPublishBlobsPublishFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() mdx := bm.exchange.(*dataexchangemocks.Plugin) mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) - mim := bm.identity.(*identitymanagermocks.Manager) + mdi := bm.database.(*databasemocks.Plugin) - blobHash := fftypes.NewRandB32() + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + PayloadRef: "blob/1", + } dataID := fftypes.NewUUID() + var capturedReader io.ReadCloser ctx := context.Background() + mdi.On("GetBlobMatchingHash", ctx, blob.Hash).Return(blob, nil) mdx.On("DownloadBLOB", ctx, "blob/1").Return(ioutil.NopCloser(bytes.NewReader([]byte(`some data`))), nil) mps.On("PublishData", ctx, mock.MatchedBy(func(reader io.ReadCloser) bool { - b, err := ioutil.ReadAll(reader) - assert.NoError(t, err) - assert.Equal(t, "some data", string(b)) + capturedReader = reader return true })).Return("", fmt.Errorf("pop")) - mim.On("ResolveInputIdentity", ctx, mock.Anything).Return(nil) - - err := bm.publishBlobs(ctx, &data.NewMessage{ - ResolvedData: data.Resolved{ - DataToPublish: []*fftypes.DataAndBlob{ - { - Data: &fftypes.Data{ - ID: dataID, - Blob: &fftypes.BlobRef{ - Hash: blobHash, - }, - }, - Blob: &fftypes.Blob{ - Hash: blobHash, - PayloadRef: "blob/1", - }, - }, + + err := bm.publishBlobs(ctx, fftypes.DataArray{ + { + ID: dataID, + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, }, }, }) assert.EqualError(t, err, "pop") + b, err := ioutil.ReadAll(capturedReader) + assert.NoError(t, err) + assert.Equal(t, "some data", string(b)) + + mdi.AssertExpectations(t) + mdx.AssertExpectations(t) + mps.AssertExpectations(t) + } func TestPublishBlobsDownloadFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdx := bm.exchange.(*dataexchangemocks.Plugin) - mim := bm.identity.(*identitymanagermocks.Manager) + mdi := bm.database.(*databasemocks.Plugin) - blobHash := fftypes.NewRandB32() + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + PayloadRef: "blob/1", + } dataID := fftypes.NewUUID() ctx := context.Background() + mdi.On("GetBlobMatchingHash", ctx, blob.Hash).Return(blob, nil) mdx.On("DownloadBLOB", ctx, "blob/1").Return(nil, fmt.Errorf("pop")) - mim.On("ResolveInputIdentity", ctx, mock.Anything).Return(nil) - - err := bm.publishBlobs(ctx, &data.NewMessage{ - ResolvedData: data.Resolved{ - DataToPublish: []*fftypes.DataAndBlob{ - { - Data: &fftypes.Data{ - ID: dataID, - Blob: &fftypes.BlobRef{ - Hash: blobHash, - }, - }, - Blob: &fftypes.Blob{ - Hash: blobHash, - PayloadRef: "blob/1", - }, - }, + + err := bm.publishBlobs(ctx, fftypes.DataArray{ + { + ID: dataID, + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, }, }, }) assert.Regexp(t, "FF10240", err) mdi.AssertExpectations(t) + mdx.AssertExpectations(t) + +} + +func TestPublishBlobsGetBlobFail(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + mdi := bm.database.(*databasemocks.Plugin) + + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + PayloadRef: "blob/1", + } + dataID := fftypes.NewUUID() + + ctx := context.Background() + mdi.On("GetBlobMatchingHash", ctx, blob.Hash).Return(nil, fmt.Errorf("pop")) + + err := bm.publishBlobs(ctx, fftypes.DataArray{ + { + ID: dataID, + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + }, + }, + }) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) + +} + +func TestPublishBlobsGetBlobNotFound(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + mdi := bm.database.(*databasemocks.Plugin) + + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + PayloadRef: "blob/1", + } + dataID := fftypes.NewUUID() + + ctx := context.Background() + mdi.On("GetBlobMatchingHash", ctx, blob.Hash).Return(nil, nil) + + err := bm.publishBlobs(ctx, fftypes.DataArray{ + { + ID: dataID, + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + }, + }, + }) + assert.Regexp(t, "FF10239", err) + + mdi.AssertExpectations(t) + } diff --git a/internal/broadcast/message.go b/internal/broadcast/message.go index d12d4e4375..eceb37a18c 100644 --- a/internal/broadcast/message.go +++ b/internal/broadcast/message.go @@ -105,13 +105,6 @@ func (s *broadcastSender) resolveAndSend(ctx context.Context, method sendMethod) if msgSizeEstimate > s.mgr.maxBatchPayloadLength { return i18n.NewError(ctx, i18n.MsgTooLargeBroadcast, float64(msgSizeEstimate)/1024, float64(s.mgr.maxBatchPayloadLength)/1024) } - - // Perform deferred processing - if len(s.msg.ResolvedData.DataToPublish) > 0 { - if err := s.mgr.publishBlobs(ctx, s.msg); err != nil { - return err - } - } s.resolved = true } return s.sendInternal(ctx, method) @@ -128,7 +121,7 @@ func (s *broadcastSender) resolve(ctx context.Context) error { } // The data manager is responsible for the heavy lifting of storing/validating all our in-line data elements - err := s.mgr.data.ResolveInlineDataBroadcast(ctx, s.msg) + err := s.mgr.data.ResolveInlineData(ctx, s.msg) return err } @@ -154,7 +147,7 @@ func (s *broadcastSender) sendInternal(ctx context.Context, method sendMethod) ( if err := s.mgr.data.WriteNewMessage(ctx, s.msg); err != nil { return err } - log.L(ctx).Infof("Sent broadcast message %s:%s sequence=%d datacount=%d", msg.Header.Namespace, msg.Header.ID, msg.Sequence, len(s.msg.ResolvedData.AllData)) + log.L(ctx).Infof("Sent broadcast message %s:%s sequence=%d datacount=%d", msg.Header.Namespace, msg.Header.ID, msg.Sequence, len(s.msg.AllData)) return err } diff --git a/internal/broadcast/message_test.go b/internal/broadcast/message_test.go index 397fce10e4..949ca7a82c 100644 --- a/internal/broadcast/message_test.go +++ b/internal/broadcast/message_test.go @@ -17,19 +17,14 @@ package broadcast import ( - "bytes" "context" "fmt" - "io" - "io/ioutil" "testing" "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/syncasync" - "github.com/hyperledger/firefly/mocks/dataexchangemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" - "github.com/hyperledger/firefly/mocks/sharedstoragemocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" @@ -43,7 +38,7 @@ func TestBroadcastMessageOk(t *testing.T) { mim := bm.identity.(*identitymanagermocks.Manager) ctx := context.Background() - mdm.On("ResolveInlineDataBroadcast", ctx, mock.Anything).Return(nil) + mdm.On("ResolveInlineData", ctx, mock.Anything).Return(nil) mdm.On("WriteNewMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) @@ -75,7 +70,7 @@ func TestBroadcastMessageWaitConfirmOk(t *testing.T) { mim := bm.identity.(*identitymanagermocks.Manager) ctx := context.Background() - mdm.On("ResolveInlineDataBroadcast", ctx, mock.Anything).Return(nil) + mdm.On("ResolveInlineData", ctx, mock.Anything).Return(nil) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) replyMsg := &fftypes.Message{ @@ -113,78 +108,6 @@ func TestBroadcastMessageWaitConfirmOk(t *testing.T) { mdm.AssertExpectations(t) } -func TestBroadcastMessageWithBlobsOk(t *testing.T) { - bm, cancel := newTestBroadcast(t) - defer cancel() - mdm := bm.data.(*datamocks.Manager) - mdx := bm.exchange.(*dataexchangemocks.Plugin) - mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) - mim := bm.identity.(*identitymanagermocks.Manager) - - blobHash := fftypes.NewRandB32() - dataID := fftypes.NewUUID() - - ctx := context.Background() - mdm.On("ResolveInlineDataBroadcast", ctx, mock.Anything). - Run(func(args mock.Arguments) { - newMsg := args[1].(*data.NewMessage) - newMsg.ResolvedData.AllData = fftypes.DataArray{ - {ID: dataID, Hash: fftypes.NewRandB32()}, - } - newMsg.ResolvedData.DataToPublish = []*fftypes.DataAndBlob{ - { - Data: &fftypes.Data{ - ID: dataID, - Blob: &fftypes.BlobRef{ - Hash: blobHash, - }, - }, - Blob: &fftypes.Blob{ - Hash: blobHash, - PayloadRef: "blob/1", - }, - }, - } - }). - Return(nil) - mdx.On("DownloadBLOB", ctx, "blob/1").Return(ioutil.NopCloser(bytes.NewReader([]byte(`some data`))), nil) - var readStr string - mps.On("PublishData", ctx, mock.MatchedBy(func(reader io.ReadCloser) bool { - if readStr == "" { // called again in AssertExpectationa - b, err := ioutil.ReadAll(reader) - assert.NoError(t, err) - readStr = string(b) - } - assert.Equal(t, "some data", readStr) - return true - })).Return("payload-ref", nil).Once() - mdm.On("WriteNewMessage", ctx, mock.Anything, mock.Anything).Return(nil) - mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) - - msg, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ - Message: fftypes.Message{ - Header: fftypes.MessageHeader{ - SignerRef: fftypes.SignerRef{ - Author: "did:firefly:org/abcd", - Key: "0x12345", - }, - }, - }, - InlineData: fftypes.InlineData{ - {Blob: &fftypes.BlobRef{ - Hash: blobHash, - }}, - }, - }, false) - assert.NoError(t, err) - assert.Equal(t, "ns1", msg.Header.Namespace) - - mdx.AssertExpectations(t) - mps.AssertExpectations(t) - mdm.AssertExpectations(t) - mim.AssertExpectations(t) -} - func TestBroadcastMessageTooLarge(t *testing.T) { bm, cancel := newTestBroadcast(t) bm.maxBatchPayloadLength = 1000000 @@ -193,7 +116,7 @@ func TestBroadcastMessageTooLarge(t *testing.T) { mim := bm.identity.(*identitymanagermocks.Manager) ctx := context.Background() - mdm.On("ResolveInlineDataBroadcast", ctx, mock.Anything).Run( + mdm.On("ResolveInlineData", ctx, mock.Anything).Run( func(args mock.Arguments) { newMsg := args[1].(*data.NewMessage) newMsg.Message.Data = fftypes.DataRefs{ @@ -228,7 +151,7 @@ func TestBroadcastMessageBadInput(t *testing.T) { mim := bm.identity.(*identitymanagermocks.Manager) ctx := context.Background() - mdm.On("ResolveInlineDataBroadcast", ctx, mock.Anything).Return(fmt.Errorf("pop")) + mdm.On("ResolveInlineData", ctx, mock.Anything).Return(fmt.Errorf("pop")) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) _, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ @@ -259,64 +182,6 @@ func TestBroadcastMessageBadIdentity(t *testing.T) { mim.AssertExpectations(t) } -func TestPublishBlobsSendMessageFail(t *testing.T) { - bm, cancel := newTestBroadcast(t) - defer cancel() - mdm := bm.data.(*datamocks.Manager) - mdx := bm.exchange.(*dataexchangemocks.Plugin) - mim := bm.identity.(*identitymanagermocks.Manager) - - blobHash := fftypes.NewRandB32() - dataID := fftypes.NewUUID() - - ctx := context.Background() - mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) - mdm.On("ResolveInlineDataBroadcast", ctx, mock.Anything). - Run(func(args mock.Arguments) { - newMsg := args[1].(*data.NewMessage) - newMsg.ResolvedData.AllData = fftypes.DataArray{ - {ID: dataID, Hash: fftypes.NewRandB32()}, - } - newMsg.ResolvedData.DataToPublish = []*fftypes.DataAndBlob{ - { - Data: &fftypes.Data{ - ID: dataID, - Blob: &fftypes.BlobRef{ - Hash: blobHash, - }, - }, - Blob: &fftypes.Blob{ - Hash: blobHash, - PayloadRef: "blob/1", - }, - }, - } - }). - Return(nil) - mdx.On("DownloadBLOB", ctx, "blob/1").Return(nil, fmt.Errorf("pop")) - - _, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ - Message: fftypes.Message{ - Header: fftypes.MessageHeader{ - SignerRef: fftypes.SignerRef{ - Author: "did:firefly:org/abcd", - Key: "0x12345", - }, - }, - }, - InlineData: fftypes.InlineData{ - {Blob: &fftypes.BlobRef{ - Hash: blobHash, - }}, - }, - }, false) - assert.Regexp(t, "FF10240", err) - - mdm.AssertExpectations(t) - mdx.AssertExpectations(t) - mim.AssertExpectations(t) -} - func TestBroadcastPrepare(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() @@ -324,7 +189,7 @@ func TestBroadcastPrepare(t *testing.T) { mim := bm.identity.(*identitymanagermocks.Manager) ctx := context.Background() - mdm.On("ResolveInlineDataBroadcast", ctx, mock.Anything).Return(nil) + mdm.On("ResolveInlineData", ctx, mock.Anything).Return(nil) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) msg := &fftypes.MessageInOut{ diff --git a/internal/broadcast/operations.go b/internal/broadcast/operations.go index 74c7fe2b90..571910f864 100644 --- a/internal/broadcast/operations.go +++ b/internal/broadcast/operations.go @@ -22,6 +22,7 @@ import ( "encoding/json" "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" ) @@ -78,6 +79,7 @@ func (bm *broadcastManager) RunOperation(ctx context.Context, op *fftypes.Prepar if err != nil { return false, err } + log.L(ctx).Infof("Published batch '%s' to shared storage: '%s'", data.Batch.ID, payloadRef) // Update the batch to store the payloadRef data.Batch.PayloadRef = payloadRef diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index 12b6ff2850..309afec8ba 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -39,8 +39,7 @@ type Manager interface { GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data fftypes.DataArray, foundAll bool, err error) UpdateMessageCache(msg *fftypes.Message, data fftypes.DataArray) UpdateMessageIfCached(ctx context.Context, msg *fftypes.Message) - ResolveInlineDataPrivate(ctx context.Context, msg *NewMessage) error - ResolveInlineDataBroadcast(ctx context.Context, msg *NewMessage) error + ResolveInlineData(ctx context.Context, msg *NewMessage) error WriteNewMessage(ctx context.Context, newMsg *NewMessage) error VerifyNamespaceExists(ctx context.Context, ns string) error @@ -374,7 +373,7 @@ func (dm *dataManager) checkValidation(ctx context.Context, ns string, validator return nil } -func (dm *dataManager) validateInputData(ctx context.Context, ns string, inData *fftypes.DataRefOrValue) (data *fftypes.Data, blob *fftypes.Blob, err error) { +func (dm *dataManager) validateInputData(ctx context.Context, ns string, inData *fftypes.DataRefOrValue) (data *fftypes.Data, err error) { validator := inData.Validator datatype := inData.Datatype @@ -382,11 +381,12 @@ func (dm *dataManager) validateInputData(ctx context.Context, ns string, inData blobRef := inData.Blob if err := dm.checkValidation(ctx, ns, validator, datatype, value); err != nil { - return nil, nil, err + return nil, err } - if blob, err = dm.resolveBlob(ctx, blobRef); err != nil { - return nil, nil, err + blob, err := dm.resolveBlob(ctx, blobRef) + if err != nil { + return nil, err } // Ok, we're good to generate the full data payload and save it @@ -399,13 +399,13 @@ func (dm *dataManager) validateInputData(ctx context.Context, ns string, inData } err = data.Seal(ctx, blob) if err != nil { - return nil, nil, err + return nil, err } - return data, blob, nil + return data, nil } func (dm *dataManager) UploadJSON(ctx context.Context, ns string, inData *fftypes.DataRefOrValue) (*fftypes.Data, error) { - data, _, err := dm.validateInputData(ctx, ns, inData) + data, err := dm.validateInputData(ctx, ns, inData) if err != nil { return nil, err } @@ -415,34 +415,21 @@ func (dm *dataManager) UploadJSON(ctx context.Context, ns string, inData *fftype return data, err } -func (dm *dataManager) ResolveInlineDataPrivate(ctx context.Context, newMessage *NewMessage) error { - return dm.resolveInlineData(ctx, newMessage, false) -} - -// ResolveInlineDataBroadcast ensures the data object are stored, and returns a list of any data that does not currently -// have a shared storage reference, and hence must be published to sharedstorage before a broadcast message can be sent. -// We deliberately do NOT perform those publishes inside of this action, as we expect to be in a RunAsGroup (trnasaction) -// at this point, and hence expensive things like a multi-megabyte upload should be decoupled by our caller. -func (dm *dataManager) ResolveInlineDataBroadcast(ctx context.Context, newMessage *NewMessage) error { - return dm.resolveInlineData(ctx, newMessage, true) -} - -func (dm *dataManager) resolveInlineData(ctx context.Context, newMessage *NewMessage, broadcast bool) (err error) { +// ResolveInlineData processes an input message that is going to be stored, to see which of the data +// elements are new, and which are existing. It verifies everything that points to an existing +// reference, and returns a list of what data is new separately - so that it can be stored by the +// message writer when the sending code is ready. +func (dm *dataManager) ResolveInlineData(ctx context.Context, newMessage *NewMessage) (err error) { if newMessage.Message == nil { return i18n.NewError(ctx, i18n.MsgNilOrNullObject) } - r := &newMessage.ResolvedData inData := newMessage.Message.InlineData msg := newMessage.Message - r.AllData = make(fftypes.DataArray, len(newMessage.Message.InlineData)) - if broadcast { - r.DataToPublish = make([]*fftypes.DataAndBlob, 0, len(inData)) - } + newMessage.AllData = make(fftypes.DataArray, len(newMessage.Message.InlineData)) for i, dataOrValue := range inData { var d *fftypes.Data - var blob *fftypes.Blob switch { case dataOrValue.ID != nil: // If an ID is supplied, then it must be a reference to existing data @@ -453,31 +440,23 @@ func (dm *dataManager) resolveInlineData(ctx context.Context, newMessage *NewMes if d == nil { return i18n.NewError(ctx, i18n.MsgDataReferenceUnresolvable, i) } - if blob, err = dm.resolveBlob(ctx, d.Blob); err != nil { + if _, err = dm.resolveBlob(ctx, d.Blob); err != nil { return err } case dataOrValue.Value != nil || dataOrValue.Blob != nil: // We've got a Value, so we can validate + store it - if d, blob, err = dm.validateInputData(ctx, msg.Header.Namespace, dataOrValue); err != nil { + if d, err = dm.validateInputData(ctx, msg.Header.Namespace, dataOrValue); err != nil { return err } - r.NewData = append(r.NewData, d) + newMessage.NewData = append(newMessage.NewData, d) default: // We have nothing - this must be a mistake return i18n.NewError(ctx, i18n.MsgDataMissing, i) } - r.AllData[i] = d - - // If the data is being resolved for public broadcast, and there is a blob attachment, that blob - // needs to be published by our calller - if broadcast && blob != nil && d.Blob.Public == "" { - r.DataToPublish = append(r.DataToPublish, &fftypes.DataAndBlob{ - Data: d, - Blob: blob, - }) - } + newMessage.AllData[i] = d + } - newMessage.Message.Data = r.AllData.Refs() + newMessage.Message.Data = newMessage.AllData.Refs() return nil } @@ -529,7 +508,7 @@ func (dm *dataManager) WriteNewMessage(ctx context.Context, newMsg *NewMessage) if err != nil { return err } - dm.UpdateMessageCache(&newMsg.Message.Message, newMsg.ResolvedData.AllData) + dm.UpdateMessageCache(&newMsg.Message.Message, newMsg.AllData) return nil } diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index 0c652c4276..8be4529e84 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -166,12 +166,12 @@ func TestWriteNewMessageE2E(t *testing.T) { {Value: fftypes.JSONAnyPtr(`"message 2 - data C"`)}, } - err = dm.ResolveInlineDataPrivate(ctx, newMsg1) + err = dm.ResolveInlineData(ctx, newMsg1) assert.NoError(t, err) - err = dm.ResolveInlineDataPrivate(ctx, newMsg2) + err = dm.ResolveInlineData(ctx, newMsg2) assert.NoError(t, err) - allData := append(append(fftypes.DataArray{}, newMsg1.ResolvedData.NewData...), newMsg2.ResolvedData.NewData...) + allData := append(append(fftypes.DataArray{}, newMsg1.NewData...), newMsg2.NewData...) assert.Len(t, allData, 4) mdi.On("InsertMessages", mock.Anything, mock.MatchedBy(func(msgs []*fftypes.Message) bool { @@ -189,10 +189,10 @@ func TestWriteNewMessageE2E(t *testing.T) { dataByID[*data.ID] = true } return len(dataArray) == 4 && - dataByID[*newMsg1.ResolvedData.AllData[1].ID] && - dataByID[*newMsg1.ResolvedData.AllData[2].ID] && - dataByID[*newMsg2.ResolvedData.AllData[0].ID] && - dataByID[*newMsg2.ResolvedData.AllData[1].ID] + dataByID[*newMsg1.AllData[1].ID] && + dataByID[*newMsg1.AllData[2].ID] && + dataByID[*newMsg2.AllData[0].ID] && + dataByID[*newMsg2.AllData[1].ID] })).Return(nil).Once() results := make(chan error) @@ -397,9 +397,9 @@ func TestResolveInlineDataEmpty(t *testing.T) { }, } - err := dm.ResolveInlineDataPrivate(ctx, newMsg) + err := dm.ResolveInlineData(ctx, newMsg) assert.NoError(t, err) - assert.Empty(t, newMsg.ResolvedData.AllData) + assert.Empty(t, newMsg.AllData) assert.Empty(t, newMsg.Message.Data) } @@ -417,17 +417,16 @@ func TestResolveInlineDataRefIDOnlyOK(t *testing.T) { Hash: dataHash, }, nil) - err := dm.ResolveInlineDataPrivate(ctx, newMsg) + err := dm.ResolveInlineData(ctx, newMsg) assert.NoError(t, err) - assert.Len(t, newMsg.ResolvedData.AllData, 1) + assert.Len(t, newMsg.AllData, 1) assert.Len(t, newMsg.Message.Data, 1) - assert.Equal(t, dataID, newMsg.ResolvedData.AllData[0].ID) - assert.Equal(t, dataHash, newMsg.ResolvedData.AllData[0].Hash) - assert.Empty(t, newMsg.ResolvedData.NewData) - assert.Empty(t, newMsg.ResolvedData.DataToPublish) + assert.Equal(t, dataID, newMsg.AllData[0].ID) + assert.Equal(t, dataHash, newMsg.AllData[0].Hash) + assert.Empty(t, newMsg.NewData) } -func TestResolveInlineDataBroadcastDataToPublish(t *testing.T) { +func TestResolveInlineDataDataToPublish(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() mdi := dm.database.(*databasemocks.Plugin) @@ -448,20 +447,16 @@ func TestResolveInlineDataBroadcastDataToPublish(t *testing.T) { PayloadRef: "blob/1", }, nil) - err := dm.ResolveInlineDataBroadcast(ctx, newMsg) + err := dm.ResolveInlineData(ctx, newMsg) assert.NoError(t, err) - assert.Len(t, newMsg.ResolvedData.AllData, 1) + assert.Len(t, newMsg.AllData, 1) assert.Len(t, newMsg.Message.Data, 1) - assert.Empty(t, newMsg.ResolvedData.NewData) - assert.Len(t, newMsg.ResolvedData.DataToPublish, 1) - assert.Equal(t, dataID, newMsg.ResolvedData.AllData[0].ID) - assert.Equal(t, dataHash, newMsg.ResolvedData.AllData[0].Hash) - assert.Len(t, newMsg.ResolvedData.DataToPublish, 1) - assert.Equal(t, newMsg.ResolvedData.AllData[0].ID, newMsg.ResolvedData.DataToPublish[0].Data.ID) - assert.Equal(t, "blob/1", newMsg.ResolvedData.DataToPublish[0].Blob.PayloadRef) + assert.Empty(t, newMsg.NewData) + assert.Equal(t, dataID, newMsg.AllData[0].ID) + assert.Equal(t, dataHash, newMsg.AllData[0].Hash) } -func TestResolveInlineDataBroadcastResolveBlobFail(t *testing.T) { +func TestResolveInlineDataResolveBlobFail(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() mdi := dm.database.(*databasemocks.Plugin) @@ -479,7 +474,7 @@ func TestResolveInlineDataBroadcastResolveBlobFail(t *testing.T) { }, nil) mdi.On("GetBlobMatchingHash", ctx, blobHash).Return(nil, fmt.Errorf("pop")) - err := dm.ResolveInlineDataBroadcast(ctx, newMsg) + err := dm.ResolveInlineData(ctx, newMsg) assert.EqualError(t, err, "pop") } @@ -496,7 +491,7 @@ func TestResolveInlineDataRefBadNamespace(t *testing.T) { Hash: dataHash, }, nil) - err := dm.ResolveInlineDataPrivate(ctx, newMsg) + err := dm.ResolveInlineData(ctx, newMsg) assert.Regexp(t, "FF10204", err) } @@ -513,7 +508,7 @@ func TestResolveInlineDataRefBadHash(t *testing.T) { Hash: dataHash, }, nil) - err := dm.ResolveInlineDataPrivate(ctx, newMsg) + err := dm.ResolveInlineData(ctx, newMsg) assert.Regexp(t, "FF10204", err) } @@ -521,7 +516,7 @@ func TestResolveInlineDataNilMsg(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() - err := dm.ResolveInlineDataPrivate(ctx, &NewMessage{}) + err := dm.ResolveInlineData(ctx, &NewMessage{}) assert.Regexp(t, "FF10368", err) } @@ -534,7 +529,7 @@ func TestResolveInlineDataRefLookkupFail(t *testing.T) { mdi.On("GetDataByID", ctx, dataID, true).Return(nil, fmt.Errorf("pop")) - err := dm.ResolveInlineDataPrivate(ctx, newMsg) + err := dm.ResolveInlineData(ctx, newMsg) assert.EqualError(t, err, "pop") } @@ -550,13 +545,13 @@ func TestResolveInlineDataValueNoValidatorOK(t *testing.T) { {Value: fftypes.JSONAnyPtr(`{"some":"json"}`)}, } - err := dm.ResolveInlineDataPrivate(ctx, newMsg) + err := dm.ResolveInlineData(ctx, newMsg) assert.NoError(t, err) - assert.Len(t, newMsg.ResolvedData.AllData, 1) - assert.Len(t, newMsg.ResolvedData.NewData, 1) + assert.Len(t, newMsg.AllData, 1) + assert.Len(t, newMsg.NewData, 1) assert.Len(t, newMsg.Message.Data, 1) - assert.NotNil(t, newMsg.ResolvedData.AllData[0].ID) - assert.NotNil(t, newMsg.ResolvedData.AllData[0].Hash) + assert.NotNil(t, newMsg.AllData[0].ID) + assert.NotNil(t, newMsg.AllData[0].Hash) } func TestResolveInlineDataValueWithValidation(t *testing.T) { @@ -592,12 +587,12 @@ func TestResolveInlineDataValueWithValidation(t *testing.T) { }, } - err := dm.ResolveInlineDataPrivate(ctx, newMsg) + err := dm.ResolveInlineData(ctx, newMsg) assert.NoError(t, err) - assert.Len(t, newMsg.ResolvedData.AllData, 1) - assert.Len(t, newMsg.ResolvedData.NewData, 1) - assert.NotNil(t, newMsg.ResolvedData.AllData[0].ID) - assert.NotNil(t, newMsg.ResolvedData.AllData[0].Hash) + assert.Len(t, newMsg.AllData, 1) + assert.Len(t, newMsg.NewData, 1) + assert.NotNil(t, newMsg.AllData[0].ID) + assert.NotNil(t, newMsg.AllData[0].Hash) newMsg.Message.InlineData = fftypes.InlineData{ { @@ -608,7 +603,7 @@ func TestResolveInlineDataValueWithValidation(t *testing.T) { Value: fftypes.JSONAnyPtr(`{"not_allowed":"value"}`), }, } - err = dm.ResolveInlineDataPrivate(ctx, newMsg) + err = dm.ResolveInlineData(ctx, newMsg) assert.Regexp(t, "FF10198", err) } @@ -621,7 +616,7 @@ func TestResolveInlineDataNoRefOrValue(t *testing.T) { { /* missing */ }, } - err := dm.ResolveInlineDataPrivate(ctx, newMsg) + err := dm.ResolveInlineData(ctx, newMsg) assert.Regexp(t, "FF10205", err) } @@ -654,7 +649,7 @@ func TestValidateAndStoreLoadNilRef(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() - _, _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Validator: fftypes.ValidatorTypeJSON, Datatype: nil, }) @@ -667,7 +662,7 @@ func TestValidateAndStoreLoadValidatorUnknown(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(nil, nil) - _, _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Validator: "wrong!", Datatype: &fftypes.DatatypeRef{ Name: "customer", @@ -684,7 +679,7 @@ func TestValidateAndStoreLoadBadRef(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(nil, nil) - _, _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Datatype: &fftypes.DatatypeRef{ // Missing name }, @@ -698,7 +693,7 @@ func TestValidateAndStoreNotFound(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(nil, nil) - _, _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Datatype: &fftypes.DatatypeRef{ Name: "customer", Version: "0.0.1", @@ -714,7 +709,7 @@ func TestValidateAndStoreBlobError(t *testing.T) { mdi := dm.database.(*databasemocks.Plugin) blobHash := fftypes.NewRandB32() mdi.On("GetBlobMatchingHash", mock.Anything, blobHash).Return(nil, fmt.Errorf("pop")) - _, _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Blob: &fftypes.BlobRef{ Hash: blobHash, }, @@ -729,7 +724,7 @@ func TestValidateAndStoreBlobNotFound(t *testing.T) { mdi := dm.database.(*databasemocks.Plugin) blobHash := fftypes.NewRandB32() mdi.On("GetBlobMatchingHash", mock.Anything, blobHash).Return(nil, nil) - _, _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Blob: &fftypes.BlobRef{ Hash: blobHash, }, diff --git a/internal/data/message_writer.go b/internal/data/message_writer.go index 98c92f4912..7bcfef51f4 100644 --- a/internal/data/message_writer.go +++ b/internal/data/message_writer.go @@ -27,14 +27,9 @@ import ( ) type NewMessage struct { - Message *fftypes.MessageInOut - ResolvedData Resolved -} - -type Resolved struct { - AllData fftypes.DataArray - NewData fftypes.DataArray - DataToPublish []*fftypes.DataAndBlob + Message *fftypes.MessageInOut + AllData fftypes.DataArray + NewData fftypes.DataArray } // writeRequest is a combination of a message and a list of data that is new and needs to be @@ -100,7 +95,7 @@ func (mw *messageWriter) WriteNewMessage(ctx context.Context, newMsg *NewMessage } nmi := &writeRequest{ newMessage: &newMsg.Message.Message, - newData: newMsg.ResolvedData.NewData, + newData: newMsg.NewData, result: make(chan error), } select { @@ -112,7 +107,7 @@ func (mw *messageWriter) WriteNewMessage(ctx context.Context, newMsg *NewMessage } // Otherwise do it in-line on this context return mw.database.RunAsGroup(ctx, func(ctx context.Context) error { - return mw.writeMessages(ctx, []*fftypes.Message{&newMsg.Message.Message}, newMsg.ResolvedData.NewData) + return mw.writeMessages(ctx, []*fftypes.Message{&newMsg.Message.Message}, newMsg.NewData) }) } diff --git a/internal/data/message_writer_test.go b/internal/data/message_writer_test.go index 0f8bbea2e4..59ffb007b7 100644 --- a/internal/data/message_writer_test.go +++ b/internal/data/message_writer_test.go @@ -91,9 +91,7 @@ func TestWriteNewMessageSyncFallback(t *testing.T) { err := mw.WriteNewMessage(customCtx, &NewMessage{ Message: msg1, - ResolvedData: Resolved{ - NewData: fftypes.DataArray{data1}, - }, + NewData: fftypes.DataArray{data1}, }) assert.NoError(t, err) diff --git a/internal/privatemessaging/message.go b/internal/privatemessaging/message.go index 6f79d1a115..e2526c1ce6 100644 --- a/internal/privatemessaging/message.go +++ b/internal/privatemessaging/message.go @@ -141,7 +141,7 @@ func (s *messageSender) resolve(ctx context.Context) error { } // The data manager is responsible for the heavy lifting of storing/validating all our in-line data elements - err := s.mgr.data.ResolveInlineDataPrivate(ctx, s.msg) + err := s.mgr.data.ResolveInlineData(ctx, s.msg) return err } diff --git a/internal/privatemessaging/message_test.go b/internal/privatemessaging/message_test.go index 96fee24bb1..586ce6b568 100644 --- a/internal/privatemessaging/message_test.go +++ b/internal/privatemessaging/message_test.go @@ -85,7 +85,7 @@ func TestSendConfirmMessageE2EOk(t *testing.T) { mim.On("CachedIdentityLookupByID", pm.ctx, rootOrg.ID).Return(rootOrg, nil) mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, mock.Anything).Return(nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(nil) mdm.On("WriteNewMessage", pm.ctx, mock.Anything).Return(nil).Once() mdi := pm.database.(*databasemocks.Plugin) @@ -138,7 +138,7 @@ func TestSendUnpinnedMessageE2EOk(t *testing.T) { groupID := fftypes.NewRandB32() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, mock.Anything).Return(nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(nil) mdm.On("WriteNewMessage", pm.ctx, mock.Anything).Return(nil).Once() mdi := pm.database.(*databasemocks.Plugin) @@ -235,7 +235,7 @@ func TestResolveAndSendBadInlineData(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) message := &messageSender{ mgr: pm, @@ -277,7 +277,7 @@ func TestSendUnpinnedMessageTooLarge(t *testing.T) { dataID := fftypes.NewUUID() groupID := fftypes.NewRandB32() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, mock.Anything).Run(func(args mock.Arguments) { + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Run(func(args mock.Arguments) { newMsg := args[1].(*data.NewMessage) newMsg.Message.Data = fftypes.DataRefs{ {ID: dataID, Hash: fftypes.NewRandB32(), ValueSize: 100001}, @@ -352,7 +352,7 @@ func TestMessagePrepare(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, mock.Anything).Return(nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(nil) message := pm.NewMessage("ns1", &fftypes.MessageInOut{ Message: fftypes.Message{ @@ -425,7 +425,7 @@ func TestSendUnpinnedMessageInsertFail(t *testing.T) { groupID := fftypes.NewRandB32() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, mock.Anything).Return(nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(nil) mdm.On("WriteNewMessage", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")).Once() mdi := pm.database.(*databasemocks.Plugin) @@ -606,7 +606,7 @@ func TestRequestReplySuccess(t *testing.T) { Return(nil, nil) mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, mock.Anything).Return(nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(nil) mdm.On("WriteNewMessage", pm.ctx, mock.Anything).Return(nil).Once() groupID := fftypes.NewRandB32() diff --git a/internal/restclient/ffresty.go b/internal/restclient/ffresty.go index 36f93c80b2..1c03a9c3d9 100644 --- a/internal/restclient/ffresty.go +++ b/internal/restclient/ffresty.go @@ -31,6 +31,7 @@ import ( "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/sirupsen/logrus" ) type retryCtxKey struct{} @@ -52,7 +53,12 @@ func OnAfterResponse(c *resty.Client, resp *resty.Response) { rctx := resp.Request.Context() rc := rctx.Value(retryCtxKey{}).(*retryCtx) elapsed := float64(time.Since(rc.start)) / float64(time.Millisecond) - log.L(rctx).Infof("<== %s %s [%d] (%.2fms)", resp.Request.Method, resp.Request.URL, resp.StatusCode(), elapsed) + level := logrus.DebugLevel + status := resp.StatusCode() + if status >= 300 { + level = logrus.ErrorLevel + } + log.L(rctx).Logf(level, "<== %s %s [%d] (%.2fms)", resp.Request.Method, resp.Request.URL, status, elapsed) } // New creates a new Resty client, using static configuration (from the config file) @@ -117,7 +123,7 @@ func New(ctx context.Context, staticConfig config.Prefix) *resty.Client { rctx = log.WithLogger(rctx, l) req.SetContext(rctx) } - log.L(rctx).Infof("==> %s %s%s", req.Method, url, req.URL) + log.L(rctx).Debugf("==> %s %s%s", req.Method, url, req.URL) return nil }) diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go index f2e35335d6..1a8594774e 100644 --- a/mocks/datamocks/manager.go +++ b/mocks/datamocks/manager.go @@ -32,11 +32,6 @@ func (_m *Manager) CheckDatatype(ctx context.Context, ns string, datatype *fftyp return r0 } -// Close provides a mock function with given fields: -func (_m *Manager) Close() { - _m.Called() -} - // CopyBlobPStoDX provides a mock function with given fields: ctx, _a1 func (_m *Manager) CopyBlobPStoDX(ctx context.Context, _a1 *fftypes.Data) (*fftypes.Blob, error) { ret := _m.Called(ctx, _a1) @@ -198,22 +193,8 @@ func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.Bat return r0, r1 } -// ResolveInlineDataBroadcast provides a mock function with given fields: ctx, msg -func (_m *Manager) ResolveInlineDataBroadcast(ctx context.Context, msg *data.NewMessage) error { - ret := _m.Called(ctx, msg) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *data.NewMessage) error); ok { - r0 = rf(ctx, msg) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ResolveInlineDataPrivate provides a mock function with given fields: ctx, msg -func (_m *Manager) ResolveInlineDataPrivate(ctx context.Context, msg *data.NewMessage) error { +// ResolveInlineData provides a mock function with given fields: ctx, msg +func (_m *Manager) ResolveInlineData(ctx context.Context, msg *data.NewMessage) error { ret := _m.Called(ctx, msg) var r0 error From 4a20fb6b95e1b27e2028f67b45ce89db2b950414 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 11 Mar 2022 12:27:53 -0500 Subject: [PATCH 10/13] Do not expect blob references to be filled in prior to batch dispatch Signed-off-by: Peter Broadhurst --- internal/batch/batch_manager.go | 10 +++------- internal/batch/batch_manager_test.go | 11 +++++------ 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index 1b110603ae..8b134c873d 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -181,14 +181,10 @@ func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fft return processor, nil } -func (bm *batchManager) assembleMessageData(batchType fftypes.BatchType, msg *fftypes.Message) (retData fftypes.DataArray, err error) { - var cro []data.CacheReadOption - if batchType == fftypes.BatchTypeBroadcast { - cro = append(cro, data.CRORequirePublicBlobRefs) - } +func (bm *batchManager) assembleMessageData(msg *fftypes.Message) (retData fftypes.DataArray, err error) { var foundAll = false err = bm.retry.Do(bm.ctx, fmt.Sprintf("assemble message %s data", msg.Header.ID), func(attempt int) (retry bool, err error) { - retData, foundAll, err = bm.data.GetMessageDataCached(bm.ctx, msg, cro...) + retData, foundAll, err = bm.data.GetMessageDataCached(bm.ctx, msg) // continual retry for persistence error (distinct from not-found) return true, err }) @@ -240,7 +236,7 @@ func (bm *batchManager) messageSequencer() { continue } - data, err := bm.assembleMessageData(processor.conf.DispatcherOptions.BatchType, msg) + data, err := bm.assembleMessageData(msg) if err != nil { l.Errorf("Failed to retrieve message data for %s: %s", msg.Header.ID, err) continue diff --git a/internal/batch/batch_manager_test.go b/internal/batch/batch_manager_test.go index 90eed813d1..27bbc328e1 100644 --- a/internal/batch/batch_manager_test.go +++ b/internal/batch/batch_manager_test.go @@ -23,7 +23,6 @@ import ( "time" "github.com/hyperledger/firefly/internal/config" - "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/databasemocks" @@ -352,7 +351,7 @@ func TestMessageSequencerMissingMessageData(t *testing.T) { }). Once() mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) - mdm.On("GetMessageDataCached", mock.Anything, mock.Anything, data.CRORequirePublicBlobRefs).Return(fftypes.DataArray{}, false, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{}, false, nil) bm.(*batchManager).messageSequencer() @@ -541,7 +540,7 @@ func TestAssembleMessageDataNilData(t *testing.T) { bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) bm.Close() mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, false, nil) - _, err := bm.(*batchManager).assembleMessageData(fftypes.BatchTypePrivate, &fftypes.Message{ + _, err := bm.(*batchManager).assembleMessageData(&fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), }, @@ -558,7 +557,7 @@ func TestGetMessageDataFail(t *testing.T) { bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, false, fmt.Errorf("pop")) bm.Close() - _, _ = bm.(*batchManager).assembleMessageData(fftypes.BatchTypePrivate, &fftypes.Message{ + _, _ = bm.(*batchManager).assembleMessageData(&fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), }, @@ -575,9 +574,9 @@ func TestGetMessageNotFound(t *testing.T) { mni := &sysmessagingmocks.LocalNodeInfo{} txHelper := txcommon.NewTransactionHelper(mdi, mdm) bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) - mdm.On("GetMessageDataCached", mock.Anything, mock.Anything, data.CRORequirePublicBlobRefs).Return(nil, false, nil) + mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, false, nil) bm.Close() - _, err := bm.(*batchManager).assembleMessageData(fftypes.BatchTypeBroadcast, &fftypes.Message{ + _, err := bm.(*batchManager).assembleMessageData(&fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), }, From cb5aa60acfb1241579ada835ba18b22d7c1c92d8 Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 11 Mar 2022 16:45:44 -0500 Subject: [PATCH 11/13] Use multiple value insert for pins, and correct enablement for PSQL Signed-off-by: Peter Broadhurst --- internal/database/postgres/postgres.go | 1 + internal/database/sqlcommon/pin_sql.go | 74 ++++++++++++++++----- internal/database/sqlcommon/pin_sql_test.go | 55 +++++++++++++++ internal/database/sqlcommon/sqlcommon.go | 28 ++++++-- internal/events/batch_pin_complete.go | 17 ++++- internal/events/batch_pin_complete_test.go | 4 +- mocks/databasemocks/plugin.go | 14 ++++ pkg/database/plugin.go | 3 + test/e2e/tokens_test.go | 1 + 9 files changed, 172 insertions(+), 25 deletions(-) diff --git a/internal/database/postgres/postgres.go b/internal/database/postgres/postgres.go index 115f4c5795..6cb99e5e1f 100644 --- a/internal/database/postgres/postgres.go +++ b/internal/database/postgres/postgres.go @@ -57,6 +57,7 @@ func (psql *Postgres) Features() sqlcommon.SQLFeatures { features.ExclusiveTableLockSQL = func(table string) string { return fmt.Sprintf(`LOCK TABLE "%s" IN EXCLUSIVE MODE;`, table) } + features.MultiRowInsert = true return features } diff --git a/internal/database/sqlcommon/pin_sql.go b/internal/database/sqlcommon/pin_sql.go index 8cd2008b4d..7e9a7da92a 100644 --- a/internal/database/sqlcommon/pin_sql.go +++ b/internal/database/sqlcommon/pin_sql.go @@ -74,22 +74,7 @@ func (s *SQLCommon) UpsertPin(ctx context.Context, pin *fftypes.Pin) (err error) log.L(ctx).Debugf("Existing pin returned at sequence %d", pin.Sequence) } else { pinRows.Close() - if pin.Sequence, err = s.insertTx(ctx, tx, - sq.Insert("pins"). - Columns(pinColumns...). - Values( - pin.Masked, - pin.Hash, - pin.Batch, - pin.Index, - pin.Signer, - pin.Dispatched, - pin.Created, - ), - func() { - s.callbacks.OrderedCollectionEvent(database.CollectionPins, fftypes.ChangeEventTypeCreated, pin.Sequence) - }, - ); err != nil { + if err = s.attemptPinInsert(ctx, tx, pin); err != nil { return err } @@ -98,6 +83,63 @@ func (s *SQLCommon) UpsertPin(ctx context.Context, pin *fftypes.Pin) (err error) return s.commitTx(ctx, tx, autoCommit) } +func (s *SQLCommon) attemptPinInsert(ctx context.Context, tx *txWrapper, pin *fftypes.Pin) (err error) { + pin.Sequence, err = s.insertTx(ctx, tx, + s.setPinInsertValues(sq.Insert("pins").Columns(pinColumns...), pin), + func() { + log.L(ctx).Debugf("Triggering creation event for pin %d", pin.Sequence) + s.callbacks.OrderedCollectionEvent(database.CollectionPins, fftypes.ChangeEventTypeCreated, pin.Sequence) + }, + ) + return err +} + +func (s *SQLCommon) setPinInsertValues(query sq.InsertBuilder, pin *fftypes.Pin) sq.InsertBuilder { + return query.Values( + pin.Masked, + pin.Hash, + pin.Batch, + pin.Index, + pin.Signer, + pin.Dispatched, + pin.Created, + ) +} + +func (s *SQLCommon) InsertPins(ctx context.Context, pins []*fftypes.Pin) error { + ctx, tx, autoCommit, err := s.beginOrUseTx(ctx) + if err != nil { + return err + } + defer s.rollbackTx(ctx, tx, autoCommit) + + if s.features.MultiRowInsert { + query := sq.Insert("pins").Columns(pinColumns...) + for _, pin := range pins { + query = s.setPinInsertValues(query, pin) + } + sequences := make([]int64, len(pins)) + err := s.insertTxRows(ctx, tx, query, func() { + for i, pin := range pins { + pin.Sequence = sequences[i] + s.callbacks.OrderedCollectionEvent(database.CollectionPins, fftypes.ChangeEventTypeCreated, pin.Sequence) + } + }, sequences, true /* we want the caller to be able to retry with individual upserts */) + if err != nil { + return err + } + } else { + // Fall back to individual inserts grouped in a TX + for _, pin := range pins { + if err := s.attemptPinInsert(ctx, tx, pin); err != nil { + return err + } + } + } + + return s.commitTx(ctx, tx, autoCommit) +} + func (s *SQLCommon) pinResult(ctx context.Context, row *sql.Rows) (*fftypes.Pin, error) { pin := fftypes.Pin{} err := row.Scan( diff --git a/internal/database/sqlcommon/pin_sql_test.go b/internal/database/sqlcommon/pin_sql_test.go index b2510247fa..2c98c5b1b3 100644 --- a/internal/database/sqlcommon/pin_sql_test.go +++ b/internal/database/sqlcommon/pin_sql_test.go @@ -141,6 +141,61 @@ func TestUpsertPinFailCommit(t *testing.T) { assert.NoError(t, mock.ExpectationsWereMet()) } +func TestInsertPinsBeginFail(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.InsertPins(context.Background(), []*fftypes.Pin{}) + assert.Regexp(t, "FF10114", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertPinsMultiRowOK(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + + pin1 := &fftypes.Pin{Hash: fftypes.NewRandB32()} + pin2 := &fftypes.Pin{Hash: fftypes.NewRandB32()} + s.callbacks.On("OrderedCollectionEvent", database.CollectionPins, fftypes.ChangeEventTypeCreated, int64(1001)) + s.callbacks.On("OrderedCollectionEvent", database.CollectionPins, fftypes.ChangeEventTypeCreated, int64(1002)) + + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*").WillReturnRows(sqlmock.NewRows([]string{sequenceColumn}). + AddRow(int64(1001)). + AddRow(int64(1002)), + ) + mock.ExpectCommit() + err := s.InsertPins(context.Background(), []*fftypes.Pin{pin1, pin2}) + assert.NoError(t, err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertPinsMultiRowFail(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + pin1 := &fftypes.Pin{Hash: fftypes.NewRandB32()} + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertPins(context.Background(), []*fftypes.Pin{pin1}) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertPinsSingleRowFail(t *testing.T) { + s, mock := newMockProvider().init() + pin1 := &fftypes.Pin{Hash: fftypes.NewRandB32()} + mock.ExpectBegin() + mock.ExpectExec("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertPins(context.Background(), []*fftypes.Pin{pin1}) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + func TestGetPinQueryFail(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) diff --git a/internal/database/sqlcommon/sqlcommon.go b/internal/database/sqlcommon/sqlcommon.go index 8e3cec5152..f725787bd9 100644 --- a/internal/database/sqlcommon/sqlcommon.go +++ b/internal/database/sqlcommon/sqlcommon.go @@ -20,6 +20,7 @@ import ( "context" "database/sql" "fmt" + "strings" sq "github.com/Masterminds/squirrel" "github.com/golang-migrate/migrate/v4" @@ -51,6 +52,21 @@ type txWrapper struct { tableLocks []string } +func shortenSQL(sqlString string) string { + buff := strings.Builder{} + spaceCount := 0 + for _, c := range sqlString { + if c == ' ' { + spaceCount++ + if spaceCount >= 3 { + break + } + } + buff.WriteRune(c) + } + return buff.String() +} + func (s *SQLCommon) Init(ctx context.Context, provider Provider, prefix config.Prefix, callbacks database.Callbacks, capabilities *database.Capabilities) (err error) { s.capabilities = capabilities s.callbacks = callbacks @@ -176,7 +192,7 @@ func (s *SQLCommon) queryTx(ctx context.Context, tx *txWrapper, q sq.SelectBuild if err != nil { return nil, tx, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } - l.Debugf(`SQL-> query: %s`, sqlQuery) + l.Debugf(`SQL-> query: %s`, shortenSQL(sqlQuery)) l.Tracef(`SQL-> query args: %+v`, args) var rows *sql.Rows if tx != nil { @@ -212,7 +228,7 @@ func (s *SQLCommon) countQuery(ctx context.Context, tx *txWrapper, tableName str if err != nil { return count, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } - l.Debugf(`SQL-> count query: %s`, sqlQuery) + l.Debugf(`SQL-> count query: %s`, shortenSQL(sqlQuery)) l.Tracef(`SQL-> count query args: %+v`, args) var rows *sql.Rows if tx != nil { @@ -265,7 +281,7 @@ func (s *SQLCommon) insertTxRows(ctx context.Context, tx *txWrapper, q sq.Insert if err != nil { return i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } - l.Debugf(`SQL-> insert: %s`, sqlQuery) + l.Debugf(`SQL-> insert: %s`, shortenSQL(sqlQuery)) l.Tracef(`SQL-> insert args: %+v`, args) if useQuery { result, err := tx.sqlTX.QueryContext(ctx, sqlQuery, args...) @@ -312,7 +328,7 @@ func (s *SQLCommon) deleteTx(ctx context.Context, tx *txWrapper, q sq.DeleteBuil if err != nil { return i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } - l.Debugf(`SQL-> delete: %s`, sqlQuery) + l.Debugf(`SQL-> delete: %s`, shortenSQL(sqlQuery)) l.Tracef(`SQL-> delete args: %+v`, args) res, err := tx.sqlTX.ExecContext(ctx, sqlQuery, args...) if err != nil { @@ -337,7 +353,7 @@ func (s *SQLCommon) updateTx(ctx context.Context, tx *txWrapper, q sq.UpdateBuil if err != nil { return -1, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } - l.Debugf(`SQL-> update: %s`, sqlQuery) + l.Debugf(`SQL-> update: %s`, shortenSQL(sqlQuery)) l.Tracef(`SQL-> update args: %+v`, args) res, err := tx.sqlTX.ExecContext(ctx, sqlQuery, args...) if err != nil { @@ -375,7 +391,7 @@ func (s *SQLCommon) lockTableExclusiveTx(ctx context.Context, tx *txWrapper, tab if s.features.ExclusiveTableLockSQL != nil && !tx.tableIsLocked(table) { sqlQuery := s.features.ExclusiveTableLockSQL(table) - l.Debugf(`SQL-> lock: %s`, sqlQuery) + l.Debugf(`SQL-> lock: %s`, shortenSQL(sqlQuery)) _, err := tx.sqlTX.ExecContext(ctx, sqlQuery) if err != nil { l.Errorf(`SQL lock failed: %s sql=[ %s ]`, err, sqlQuery) diff --git a/internal/events/batch_pin_complete.go b/internal/events/batch_pin_complete.go index b85eb3af86..19e8144e4d 100644 --- a/internal/events/batch_pin_complete.go +++ b/internal/events/batch_pin_complete.go @@ -77,15 +77,28 @@ func (em *eventManager) persistBatchTransaction(ctx context.Context, batchPin *b } func (em *eventManager) persistContexts(ctx context.Context, batchPin *blockchain.BatchPin, signingKey *fftypes.VerifierRef, private bool) error { + pins := make([]*fftypes.Pin, len(batchPin.Contexts)) for idx, hash := range batchPin.Contexts { - if err := em.database.UpsertPin(ctx, &fftypes.Pin{ + pins[idx] = &fftypes.Pin{ Masked: private, Hash: hash, Batch: batchPin.BatchID, Index: int64(idx), Signer: signingKey.Value, // We don't store the type as we can infer that from the blockchain Created: fftypes.Now(), - }); err != nil { + } + } + + // First attempt a single batch insert + err := em.database.InsertPins(ctx, pins) + if err == nil { + return nil + } + log.L(ctx).Warnf("Batch insert of pins failed - assuming replay and performing upserts: %s", err) + + // Fall back to an upsert + for _, pin := range pins { + if err := em.database.UpsertPin(ctx, pin); err != nil { return err } } diff --git a/internal/events/batch_pin_complete_test.go b/internal/events/batch_pin_complete_test.go index ec3a8a687e..15934090bc 100644 --- a/internal/events/batch_pin_complete_test.go +++ b/internal/events/batch_pin_complete_test.go @@ -140,7 +140,7 @@ func TestBatchPinCompleteOkBroadcast(t *testing.T) { mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(e *fftypes.Event) bool { return e.Type == fftypes.EventTypeBlockchainEventReceived })).Return(nil).Times(2) - mdi.On("UpsertPin", mock.Anything, mock.Anything).Return(nil).Once() + mdi.On("InsertPins", mock.Anything, mock.Anything).Return(nil).Once() mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil).Once() mbi := &blockchainmocks.Plugin{} @@ -198,6 +198,7 @@ func TestBatchPinCompleteOkPrivate(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdi.On("RunAsGroup", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertPins", mock.Anything, mock.Anything).Return(fmt.Errorf("These pins have been seen before")) // simulate replay fallback mdi.On("UpsertPin", mock.Anything, mock.Anything).Return(nil) mbi := &blockchainmocks.Plugin{} @@ -764,6 +765,7 @@ func TestPersistContextsFail(t *testing.T) { defer cancel() mdi := em.database.(*databasemocks.Plugin) + mdi.On("InsertPins", mock.Anything, mock.Anything).Return(fmt.Errorf("duplicate pins")) mdi.On("UpsertPin", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) err := em.persistContexts(em.ctx, &blockchain.BatchPin{ diff --git a/mocks/databasemocks/plugin.go b/mocks/databasemocks/plugin.go index 0bfbcd2189..e15b3dc4e7 100644 --- a/mocks/databasemocks/plugin.go +++ b/mocks/databasemocks/plugin.go @@ -2291,6 +2291,20 @@ func (_m *Plugin) InsertOperation(ctx context.Context, operation *fftypes.Operat return r0 } +// InsertPins provides a mock function with given fields: ctx, pins +func (_m *Plugin) InsertPins(ctx context.Context, pins []*fftypes.Pin) error { + ret := _m.Called(ctx, pins) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []*fftypes.Pin) error); ok { + r0 = rf(ctx, pins) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // InsertTransaction provides a mock function with given fields: ctx, data func (_m *Plugin) InsertTransaction(ctx context.Context, data *fftypes.Transaction) error { ret := _m.Called(ctx, data) diff --git a/pkg/database/plugin.go b/pkg/database/plugin.go index 427b224247..c19d3ef63d 100644 --- a/pkg/database/plugin.go +++ b/pkg/database/plugin.go @@ -185,6 +185,9 @@ type iOffsetCollection interface { } type iPinCollection interface { + // InsertPins - Inserts a list of pins - fails if they already exist, so caller can fall back to UpsertPin individually + InsertPins(ctx context.Context, pins []*fftypes.Pin) (err error) + // UpsertPin - Will insert a pin at the end of the sequence, unless the batch+hash+index sequence already exists UpsertPin(ctx context.Context, parked *fftypes.Pin) (err error) diff --git a/test/e2e/tokens_test.go b/test/e2e/tokens_test.go index 3e42b7db7a..4b5be2ffd9 100644 --- a/test/e2e/tokens_test.go +++ b/test/e2e/tokens_test.go @@ -75,6 +75,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { Operator: suite.testState.org2key.Value, Approved: true, }, + Pool: poolName, } approvalOut := TokenApproval(suite.T(), suite.testState.client1, approval, false) From 2e244da8c619fe0b0c61d02c807affd39bd120cd Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 11 Mar 2022 17:07:36 -0500 Subject: [PATCH 12/13] Retry longer delay now we have confidence of applying multi-row Signed-off-by: Peter Broadhurst --- internal/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/config/config.go b/internal/config/config.go index f435753d0c..1c0bb9b40c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -351,7 +351,7 @@ func Reset() { viper.SetDefault(string(MessageCacheSize), "50Mb") viper.SetDefault(string(MessageCacheTTL), "5m") viper.SetDefault(string(MessageWriterBatchMaxInserts), 200) - viper.SetDefault(string(MessageWriterBatchTimeout), "25ms") + viper.SetDefault(string(MessageWriterBatchTimeout), "50ms") viper.SetDefault(string(MessageWriterCount), 5) viper.SetDefault(string(NamespacesDefault), "default") viper.SetDefault(string(NamespacesPredefined), fftypes.JSONObjectArray{{"name": "default", "description": "Default predefined namespace"}}) From d25da79cf14b792e41a631d425e28a8acd68920d Mon Sep 17 00:00:00 2001 From: Peter Broadhurst Date: Fri, 11 Mar 2022 17:32:45 -0500 Subject: [PATCH 13/13] Throughput optimize for events to avoid DB polling overhead Signed-off-by: Peter Broadhurst --- internal/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/config/config.go b/internal/config/config.go index 1c0bb9b40c..f6b7bb9a0a 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -333,7 +333,7 @@ func Reset() { viper.SetDefault(string(EventAggregatorOpCorrelationRetries), 3) viper.SetDefault(string(EventDBEventsBufferSize), 100) viper.SetDefault(string(EventDispatcherBufferLength), 5) - viper.SetDefault(string(EventDispatcherBatchTimeout), "0") + viper.SetDefault(string(EventDispatcherBatchTimeout), "250ms") viper.SetDefault(string(EventDispatcherPollTimeout), "30s") viper.SetDefault(string(EventTransportsEnabled), []string{"websockets", "webhooks"}) viper.SetDefault(string(EventTransportsDefault), "websockets")