From e6acccbb40ff7f45f22104edf6e477139fea0415 Mon Sep 17 00:00:00 2001 From: Nicko Guyer Date: Fri, 4 Mar 2022 09:20:40 -0500 Subject: [PATCH 1/6] Add blockchain metrics Signed-off-by: Nicko Guyer --- docs/swagger/swagger.yaml | 12 ++++ internal/blockchain/ethereum/ethereum.go | 21 ++++++- internal/blockchain/ethereum/ethereum_test.go | 36 ++++++----- internal/blockchain/fabric/fabric.go | 6 +- internal/blockchain/fabric/fabric_test.go | 21 +++---- internal/events/blockchain_event.go | 5 ++ internal/events/tokens_transferred.go | 4 ++ internal/metrics/contracts.go | 59 +++++++++++++++++++ internal/metrics/metrics.go | 15 +++++ internal/metrics/prometheus.go | 2 + internal/orchestrator/orchestrator.go | 9 +-- internal/orchestrator/orchestrator_test.go | 38 ++++++------ internal/tokens/fftokens/fftokens.go | 12 ++++ manifest.json | 6 +- mocks/blockchainmocks/plugin.go | 12 ++-- mocks/metricsmocks/manager.go | 15 +++++ pkg/blockchain/plugin.go | 9 ++- pkg/fftypes/blockchainevent.go | 2 + 18 files changed, 224 insertions(+), 60 deletions(-) create mode 100644 internal/metrics/contracts.go diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index 9f112ada0c..1f7599ac31 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -1285,6 +1285,8 @@ paths: additionalProperties: {} type: object listener: {} + location: + type: string name: type: string namespace: @@ -1297,6 +1299,8 @@ paths: sequence: format: int64 type: integer + signature: + type: string source: type: string timestamp: {} @@ -1346,6 +1350,8 @@ paths: additionalProperties: {} type: object listener: {} + location: + type: string name: type: string namespace: @@ -1358,6 +1364,8 @@ paths: sequence: format: int64 type: integer + signature: + type: string source: type: string timestamp: {} @@ -8723,6 +8731,8 @@ paths: additionalProperties: {} type: object listener: {} + location: + type: string name: type: string namespace: @@ -8735,6 +8745,8 @@ paths: sequence: format: int64 type: integer + signature: + type: string source: type: string timestamp: {} diff --git a/internal/blockchain/ethereum/ethereum.go b/internal/blockchain/ethereum/ethereum.go index 748ac525c1..2dd07e74cf 100644 --- a/internal/blockchain/ethereum/ethereum.go +++ b/internal/blockchain/ethereum/ethereum.go @@ -29,6 +29,7 @@ import ( "github.com/hyperledger/firefly/internal/config/wsconfig" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/internal/metrics" "github.com/hyperledger/firefly/internal/restclient" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/fftypes" @@ -62,6 +63,7 @@ type Ethereum struct { wsconn wsclient.WSClient closed chan struct{} addressResolver *addressResolver + metrics metrics.Manager } type eventStreamWebsocket struct { @@ -138,7 +140,6 @@ type FFIGenerationInput struct { ABI []ABIElementMarshaling `json:"abi,omitempty"` } -// var batchPinEvent = "BatchPin" var addressVerify = regexp.MustCompile("^[0-9a-f]{40}$") func (e *Ethereum) Name() string { @@ -149,13 +150,13 @@ func (e *Ethereum) VerifierType() fftypes.VerifierType { return fftypes.VerifierTypeEthAddress } -func (e *Ethereum) Init(ctx context.Context, prefix config.Prefix, callbacks blockchain.Callbacks) (err error) { - +func (e *Ethereum) Init(ctx context.Context, prefix config.Prefix, callbacks blockchain.Callbacks, metrics metrics.Manager) (err error) { ethconnectConf := prefix.SubPrefix(EthconnectConfigKey) addressResolverConf := prefix.SubPrefix(AddressResolverConfigKey) e.ctx = log.WithLogField(ctx, "proto", "ethereum") e.callbacks = callbacks + e.metrics = metrics if addressResolverConf.GetString(AddressResolverURLTemplate) != "" { if e.addressResolver, err = newAddressResolver(ctx, addressResolverConf); err != nil { @@ -341,6 +342,8 @@ func (e *Ethereum) handleBatchPinEvent(ctx context.Context, msgJSON fftypes.JSON Output: dataJSON, Info: msgJSON, Timestamp: timestamp, + Location: e.buildEventLocationString(msgJSON), + Signature: msgJSON.GetString("signature"), }, } @@ -378,6 +381,8 @@ func (e *Ethereum) handleContractEvent(ctx context.Context, msgJSON fftypes.JSON Output: dataJSON, Info: msgJSON, Timestamp: timestamp, + Location: e.buildEventLocationString(msgJSON), + Signature: msgJSON.GetString("signature"), }, } @@ -409,6 +414,10 @@ func (e *Ethereum) handleReceipt(ctx context.Context, reply fftypes.JSONObject) return e.callbacks.BlockchainOpUpdate(operationID, updateType, txHash, message, reply) } +func (e *Ethereum) buildEventLocationString(msgJSON fftypes.JSONObject) string { + return fmt.Sprintf("address=%s", msgJSON.GetString("address")) +} + func (e *Ethereum) handleMessageBatch(ctx context.Context, messages []interface{}) error { l := log.L(ctx) @@ -511,6 +520,9 @@ func (e *Ethereum) NormalizeSigningKey(ctx context.Context, key string) (string, } func (e *Ethereum) invokeContractMethod(ctx context.Context, address, signingKey string, abi ABIElementMarshaling, requestID string, input []interface{}) (*resty.Response, error) { + if e.metrics.IsMetricsEnabled() { + e.metrics.BlockchainTransaction(address, abi.Name) + } body := EthconnectMessageRequest{ Headers: EthconnectMessageHeaders{ Type: "SendTransaction", @@ -528,6 +540,9 @@ func (e *Ethereum) invokeContractMethod(ctx context.Context, address, signingKey } func (e *Ethereum) queryContractMethod(ctx context.Context, address string, abi ABIElementMarshaling, input []interface{}) (*resty.Response, error) { + if e.metrics.IsMetricsEnabled() { + e.metrics.BlockchainQuery(address, abi.Name) + } body := EthconnectMessageRequest{ Headers: EthconnectMessageHeaders{ Type: "Query", diff --git a/internal/blockchain/ethereum/ethereum_test.go b/internal/blockchain/ethereum/ethereum_test.go index 8d81a0a389..795232c5ac 100644 --- a/internal/blockchain/ethereum/ethereum_test.go +++ b/internal/blockchain/ethereum/ethereum_test.go @@ -29,6 +29,7 @@ import ( "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/restclient" "github.com/hyperledger/firefly/mocks/blockchainmocks" + "github.com/hyperledger/firefly/mocks/metricsmocks" "github.com/hyperledger/firefly/mocks/wsmocks" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/fftypes" @@ -74,6 +75,10 @@ func newTestEthereum() (*Ethereum, func()) { ctx, cancel := context.WithCancel(context.Background()) em := &blockchainmocks.Callbacks{} wsm := &wsmocks.WSClient{} + mm := &metricsmocks.Manager{} + mm.On("IsMetricsEnabled").Return(true) + mm.On("BlockchainTransaction", mock.Anything, mock.Anything).Return(nil) + mm.On("BlockchainQuery", mock.Anything, mock.Anything).Return(nil) e := &Ethereum{ ctx: ctx, client: resty.New().SetBaseURL("http://localhost:12345"), @@ -83,6 +88,7 @@ func newTestEthereum() (*Ethereum, func()) { prefixLong: defaultPrefixLong, callbacks: em, wsconn: wsm, + metrics: mm, } return e, func() { cancel() @@ -97,7 +103,7 @@ func TestInitMissingURL(t *testing.T) { e, cancel := newTestEthereum() defer cancel() resetConf() - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10138.*url", err) } @@ -106,7 +112,7 @@ func TestInitBadAddressResolver(t *testing.T) { defer cancel() resetConf() utAddressResolverConf.Set(AddressResolverURLTemplate, "{{unclosed}") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10337.*urlTemplate", err) } @@ -117,7 +123,7 @@ func TestInitMissingInstance(t *testing.T) { utEthconnectConf.Set(restclient.HTTPConfigURL, "http://localhost:12345") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10138.*instance", err) } @@ -128,7 +134,7 @@ func TestInitMissingTopic(t *testing.T) { utEthconnectConf.Set(restclient.HTTPConfigURL, "http://localhost:12345") utEthconnectConf.Set(EthconnectConfigInstancePath, "/instances/0x12345") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10138.*topic", err) } @@ -169,7 +175,7 @@ func TestInitAllNewStreamsAndWSEvent(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "/instances/0x12345") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.NoError(t, err) assert.Equal(t, "ethereum", e.Name()) @@ -207,7 +213,7 @@ func TestWSInitFail(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "/instances/0x12345") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10162", err) } @@ -249,7 +255,7 @@ func TestInitAllExistingStreams(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "0x12345") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Equal(t, 3, httpmock.GetTotalCallCount()) assert.Equal(t, "es12345", e.initInfo.stream.ID) @@ -298,7 +304,7 @@ func TestInitOldInstancePathContracts(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "/contracts/firefly") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.NoError(t, err) assert.Equal(t, e.instancePath, "0x12345") } @@ -332,7 +338,7 @@ func TestInitOldInstancePathInstances(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "/instances/0x12345") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.NoError(t, err) assert.Equal(t, e.instancePath, "0x12345") } @@ -369,7 +375,7 @@ func TestInitOldInstancePathError(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "/contracts/firefly") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10111", err) assert.Regexp(t, "pop", err) } @@ -393,7 +399,7 @@ func TestStreamQueryError(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "/instances/0x12345") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10111", err) assert.Regexp(t, "pop", err) @@ -421,7 +427,7 @@ func TestStreamCreateError(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "/instances/0x12345") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10111", err) assert.Regexp(t, "pop", err) @@ -449,7 +455,7 @@ func TestStreamUpdateError(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "/instances/0x12345") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10111", err) assert.Regexp(t, "pop", err) @@ -479,7 +485,7 @@ func TestSubQueryError(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "/instances/0x12345") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10111", err) assert.Regexp(t, "pop", err) @@ -511,7 +517,7 @@ func TestSubQueryCreateError(t *testing.T) { utEthconnectConf.Set(EthconnectConfigInstancePath, "/instances/0x12345") utEthconnectConf.Set(EthconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10111", err) assert.Regexp(t, "pop", err) diff --git a/internal/blockchain/fabric/fabric.go b/internal/blockchain/fabric/fabric.go index e7ea955a28..a78c55b4af 100644 --- a/internal/blockchain/fabric/fabric.go +++ b/internal/blockchain/fabric/fabric.go @@ -31,6 +31,7 @@ import ( "github.com/hyperledger/firefly/internal/config/wsconfig" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/internal/metrics" "github.com/hyperledger/firefly/internal/restclient" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/fftypes" @@ -60,6 +61,7 @@ type Fabric struct { idCache map[string]*fabIdentity wsconn wsclient.WSClient closed chan struct{} + metrics metrics.Manager } type eventStreamWebsocket struct { @@ -153,13 +155,13 @@ func (f *Fabric) VerifierType() fftypes.VerifierType { return fftypes.VerifierTypeMSPIdentity } -func (f *Fabric) Init(ctx context.Context, prefix config.Prefix, callbacks blockchain.Callbacks) (err error) { - +func (f *Fabric) Init(ctx context.Context, prefix config.Prefix, callbacks blockchain.Callbacks, metrics metrics.Manager) (err error) { fabconnectConf := prefix.SubPrefix(FabconnectConfigKey) f.ctx = log.WithLogField(ctx, "proto", "fabric") f.callbacks = callbacks f.idCache = make(map[string]*fabIdentity) + f.metrics = metrics if fabconnectConf.GetString(restclient.HTTPConfigURL) == "" { return i18n.NewError(ctx, i18n.MsgMissingPluginConfig, "url", "blockchain.fabconnect") diff --git a/internal/blockchain/fabric/fabric_test.go b/internal/blockchain/fabric/fabric_test.go index 4c42d2ba8a..5d9e0de2bc 100644 --- a/internal/blockchain/fabric/fabric_test.go +++ b/internal/blockchain/fabric/fabric_test.go @@ -30,6 +30,7 @@ import ( "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/restclient" "github.com/hyperledger/firefly/mocks/blockchainmocks" + "github.com/hyperledger/firefly/mocks/metricsmocks" "github.com/hyperledger/firefly/mocks/wsmocks" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/fftypes" @@ -103,7 +104,7 @@ func TestInitMissingURL(t *testing.T) { e, cancel := newTestFabric() defer cancel() resetConf() - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10138.*url", err) } @@ -114,7 +115,7 @@ func TestInitMissingChaincode(t *testing.T) { utFabconnectConf.Set(restclient.HTTPConfigURL, "http://localhost:12345") utFabconnectConf.Set(FabconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10138.*chaincode", err) } @@ -126,7 +127,7 @@ func TestInitMissingTopic(t *testing.T) { utFabconnectConf.Set(FabconnectConfigChaincode, "Firefly") utFabconnectConf.Set(FabconnectConfigSigner, "signer001") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10138.*topic", err) } @@ -169,7 +170,7 @@ func TestInitAllNewStreamsAndWSEvent(t *testing.T) { utFabconnectConf.Set(FabconnectConfigSigner, "signer001") utFabconnectConf.Set(FabconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.NoError(t, err) assert.Equal(t, "fabric", e.Name()) @@ -208,7 +209,7 @@ func TestWSInitFail(t *testing.T) { utFabconnectConf.Set(FabconnectConfigSigner, "signer001") utFabconnectConf.Set(FabconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10162", err) } @@ -249,7 +250,7 @@ func TestInitAllExistingStreams(t *testing.T) { utFabconnectConf.Set(FabconnectConfigSigner, "signer001") utFabconnectConf.Set(FabconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Equal(t, 2, httpmock.GetTotalCallCount()) assert.Equal(t, "es12345", e.initInfo.stream.ID) @@ -279,7 +280,7 @@ func TestStreamQueryError(t *testing.T) { utFabconnectConf.Set(FabconnectConfigSigner, "signer001") utFabconnectConf.Set(FabconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10284", err) assert.Regexp(t, "pop", err) @@ -308,7 +309,7 @@ func TestStreamCreateError(t *testing.T) { utFabconnectConf.Set(FabconnectConfigSigner, "signer001") utFabconnectConf.Set(FabconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10284", err) assert.Regexp(t, "pop", err) @@ -339,7 +340,7 @@ func TestSubQueryError(t *testing.T) { utFabconnectConf.Set(FabconnectConfigSigner, "signer001") utFabconnectConf.Set(FabconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10284", err) assert.Regexp(t, "pop", err) @@ -372,7 +373,7 @@ func TestSubQueryCreateError(t *testing.T) { utFabconnectConf.Set(FabconnectConfigSigner, "signer001") utFabconnectConf.Set(FabconnectConfigTopic, "topic1") - err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}) + err := e.Init(e.ctx, utConfPrefix, &blockchainmocks.Callbacks{}, &metricsmocks.Manager{}) assert.Regexp(t, "FF10284", err) assert.Regexp(t, "pop", err) diff --git a/internal/events/blockchain_event.go b/internal/events/blockchain_event.go index b5eca28b63..147c338e28 100644 --- a/internal/events/blockchain_event.go +++ b/internal/events/blockchain_event.go @@ -35,6 +35,8 @@ func buildBlockchainEvent(ns string, subID *fftypes.UUID, event *blockchain.Even Output: event.Output, Info: event.Info, Timestamp: event.Timestamp, + Location: event.Location, + Signature: event.Signature, } if tx != nil { ev.TX = *tx @@ -43,6 +45,9 @@ func buildBlockchainEvent(ns string, subID *fftypes.UUID, event *blockchain.Even } func (em *eventManager) persistBlockchainEvent(ctx context.Context, chainEvent *fftypes.BlockchainEvent) error { + if em.metrics.IsMetricsEnabled() && chainEvent.Location != "" && chainEvent.Signature != "" { + em.metrics.BlockchainEvent(chainEvent.Location, chainEvent.Signature) + } if err := em.database.InsertBlockchainEvent(ctx, chainEvent); err != nil { return err } diff --git a/internal/events/tokens_transferred.go b/internal/events/tokens_transferred.go index 453364c05a..6a07db61b6 100644 --- a/internal/events/tokens_transferred.go +++ b/internal/events/tokens_transferred.go @@ -143,6 +143,10 @@ func (em *eventManager) TokensTransferred(ti tokens.Plugin, transfer *tokens.Tok } } + if em.metrics.IsMetricsEnabled() && transfer.Event.Location != "" && transfer.Event.Signature != "" { + em.metrics.BlockchainEvent(transfer.Event.Location, transfer.Event.Signature) + } + event := fftypes.NewEvent(fftypes.EventTypeTransferConfirmed, transfer.Namespace, transfer.LocalID, transfer.TX.ID) return em.database.InsertEvent(ctx, event) }) diff --git a/internal/metrics/contracts.go b/internal/metrics/contracts.go new file mode 100644 index 0000000000..baf3b9e604 --- /dev/null +++ b/internal/metrics/contracts.go @@ -0,0 +1,59 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +var BlockchainTransactionsCounter *prometheus.CounterVec +var BlockchainQueriesCounter *prometheus.CounterVec +var BlockchainEventsCounter *prometheus.CounterVec + +// BlockchainTransactionsCounterName is the prometheus metric for tracking the total number of blockchain transactions +var BlockchainTransactionsCounterName = "ff_blockchain_transactions_total" + +// BlockchainQueriesCounterName is the prometheus metric for tracking the total number of blockchain queries +var BlockchainQueriesCounterName = "ff_blockchain_queries_total" + +// BlockchainEventsCounterName is the prometheus metric for tracking the total number of blockchain events +var BlockchainEventsCounterName = "ff_blockchain_events_total" + +var LocationLabelName = "location" +var MethodNameLabelName = "methodName" +var SignatureLabelName = "signature" + +func InitBlockchainMetrics() { + BlockchainTransactionsCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: BlockchainTransactionsCounterName, + Help: "Number of blockchain transactions", + }, []string{LocationLabelName, MethodNameLabelName}) + BlockchainQueriesCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: BlockchainQueriesCounterName, + Help: "Number of blockchain queries", + }, []string{LocationLabelName, MethodNameLabelName}) + BlockchainEventsCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: BlockchainEventsCounterName, + Help: "Number of blockchain events", + }, []string{LocationLabelName, SignatureLabelName}) +} + +func RegisterBlockchainMetrics() { + registry.MustRegister(BlockchainTransactionsCounter) + registry.MustRegister(BlockchainQueriesCounter) + registry.MustRegister(BlockchainEventsCounter) +} diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go index 906519a662..0888c91edf 100644 --- a/internal/metrics/metrics.go +++ b/internal/metrics/metrics.go @@ -33,6 +33,9 @@ type Manager interface { MessageConfirmed(msg *fftypes.Message, eventType fftypes.FFEnum) TransferSubmitted(transfer *fftypes.TokenTransfer) TransferConfirmed(transfer *fftypes.TokenTransfer) + BlockchainTransaction(location, methodName string) + BlockchainQuery(location, methodName string) + BlockchainEvent(location, signature string) AddTime(id string) GetTime(id string) time.Time DeleteTime(id string) @@ -129,6 +132,18 @@ func (mm *metricsManager) TransferConfirmed(transfer *fftypes.TokenTransfer) { } } +func (mm *metricsManager) BlockchainTransaction(location, methodName string) { + BlockchainTransactionsCounter.WithLabelValues(location, methodName).Inc() +} + +func (mm *metricsManager) BlockchainQuery(location, methodName string) { + BlockchainQueriesCounter.WithLabelValues(location, methodName).Inc() +} + +func (mm *metricsManager) BlockchainEvent(location, signature string) { + BlockchainEventsCounter.WithLabelValues(location, signature).Inc() +} + func (mm *metricsManager) AddTime(id string) { mutex.Lock() mm.timeMap[id] = time.Now() diff --git a/internal/metrics/prometheus.go b/internal/metrics/prometheus.go index 1c497fa13c..5bb7047e1c 100644 --- a/internal/metrics/prometheus.go +++ b/internal/metrics/prometheus.go @@ -80,6 +80,7 @@ func initMetricsCollectors() { InitTokenTransferMetrics() InitTokenBurnMetrics() InitBatchPinMetrics() + InitBlockchainMetrics() } func registerMetricsCollectors() { @@ -92,4 +93,5 @@ func registerMetricsCollectors() { RegisterTokenMintMetrics() RegisterTokenTransferMetrics() RegisterTokenBurnMetrics() + RegisterBlockchainMetrics() } diff --git a/internal/orchestrator/orchestrator.go b/internal/orchestrator/orchestrator.go index aabe12c060..14e82855c6 100644 --- a/internal/orchestrator/orchestrator.go +++ b/internal/orchestrator/orchestrator.go @@ -341,6 +341,10 @@ func (or *orchestrator) initDataExchange(ctx context.Context) (err error) { func (or *orchestrator) initPlugins(ctx context.Context) (err error) { + if or.metrics == nil { + or.metrics = metrics.NewMetricsManager(ctx) + } + if err = or.initDatabaseCheckPreinit(ctx); err != nil { return err } else if or.preInitMode { @@ -363,7 +367,7 @@ func (or *orchestrator) initPlugins(ctx context.Context) (err error) { return err } } - if err = or.blockchain.Init(ctx, blockchainConfig.SubPrefix(or.blockchain.Name()), &or.bc); err != nil { + if err = or.blockchain.Init(ctx, blockchainConfig.SubPrefix(or.blockchain.Name()), &or.bc, or.metrics); err != nil { return err } @@ -433,9 +437,6 @@ func (or *orchestrator) initPlugins(ctx context.Context) (err error) { } func (or *orchestrator) initComponents(ctx context.Context) (err error) { - if or.metrics == nil { - or.metrics = metrics.NewMetricsManager(ctx) - } if or.data == nil { or.data, err = data.NewDataManager(ctx, or.database, or.sharedstorage, or.dataexchange) diff --git a/internal/orchestrator/orchestrator_test.go b/internal/orchestrator/orchestrator_test.go index e2a517667e..7f09835f88 100644 --- a/internal/orchestrator/orchestrator_test.go +++ b/internal/orchestrator/orchestrator_test.go @@ -197,7 +197,7 @@ func TestBlockchainInitFail(t *testing.T) { or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) ctx, cancelCtx := context.WithCancel(context.Background()) err := or.Init(ctx, cancelCtx) assert.EqualError(t, err, "pop") @@ -208,7 +208,7 @@ func TestBlockchainInitGetConfigRecordsFail(t *testing.T) { or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) ctx, cancelCtx := context.WithCancel(context.Background()) err := or.Init(ctx, cancelCtx) assert.EqualError(t, err, "pop") @@ -224,7 +224,7 @@ func TestBlockchainInitMergeConfigRecordsFail(t *testing.T) { }, nil, nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) ctx, cancelCtx := context.WithCancel(context.Background()) err := or.Init(ctx, cancelCtx) @@ -237,7 +237,7 @@ func TestBadSharedStoragePlugin(t *testing.T) { or.sharedstorage = nil or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) ctx, cancelCtx := context.WithCancel(context.Background()) err := or.Init(ctx, cancelCtx) @@ -250,7 +250,7 @@ func TestBadSharedStoragePluginOldConfig(t *testing.T) { or.sharedstorage = nil or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) ctx, cancelCtx := context.WithCancel(context.Background()) err := or.Init(ctx, cancelCtx) @@ -261,7 +261,7 @@ func TestBadSharedStorageInitFail(t *testing.T) { or := newTestOrchestrator() or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) ctx, cancelCtx := context.WithCancel(context.Background()) @@ -275,7 +275,7 @@ func TestBadDataExchangePlugin(t *testing.T) { or.dataexchange = nil or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) ctx, cancelCtx := context.WithCancel(context.Background()) @@ -287,7 +287,7 @@ func TestBadDataExchangeInitFail(t *testing.T) { or := newTestOrchestrator() or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mdi.On("GetIdentities", mock.Anything, mock.Anything).Return([]*fftypes.Identity{}, nil, nil) @@ -305,7 +305,7 @@ func TestDataExchangePluginOldName(t *testing.T) { or.dataexchange = nil or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mdi.On("GetIdentities", mock.Anything, mock.Anything).Return([]*fftypes.Identity{}, nil, nil) @@ -324,7 +324,7 @@ func TestBadTokensPlugin(t *testing.T) { or.tokens = nil or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mdi.On("GetIdentities", mock.Anything, mock.Anything).Return([]*fftypes.Identity{}, nil, nil) @@ -345,7 +345,7 @@ func TestBadTokensPluginNoConnector(t *testing.T) { or.tokens = nil or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mbi.On("VerifyIdentitySyntax", mock.Anything, mock.Anything, mock.Anything).Return("", nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -367,7 +367,7 @@ func TestBadTokensPluginNoName(t *testing.T) { or.tokens = nil or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mdi.On("GetIdentities", mock.Anything, mock.Anything).Return([]*fftypes.Identity{}, nil, nil) @@ -388,7 +388,7 @@ func TestBadTokensPluginInvalidName(t *testing.T) { or.tokens = nil or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mdi.On("GetIdentities", mock.Anything, mock.Anything).Return([]*fftypes.Identity{}, nil, nil) @@ -409,7 +409,7 @@ func TestBadTokensPluginNoType(t *testing.T) { or.tokens = nil or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mbi.On("VerifyIdentitySyntax", mock.Anything, mock.Anything, mock.Anything).Return("", nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -433,7 +433,7 @@ func TestGoodTokensPlugin(t *testing.T) { or.tokens = nil or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mdi.On("GetIdentities", mock.Anything, mock.Anything).Return([]*fftypes.Identity{}, nil, nil) @@ -497,7 +497,11 @@ func TestInitDataComponentFail(t *testing.T) { func TestInitMetricsComponent(t *testing.T) { or := newTestOrchestrator() or.metrics = nil - or.initComponents(context.Background()) + or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.initPlugins(context.Background()) } func TestInitIdentityComponentFail(t *testing.T) { @@ -632,7 +636,7 @@ func TestInitOK(t *testing.T) { or.mdi.On("GetConfigRecords", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.ConfigRecord{}, nil, nil) or.mdi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mii.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) - or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) + or.mbi.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mps.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) or.mdi.On("GetIdentities", mock.Anything, mock.Anything).Return([]*fftypes.Identity{}, nil, nil) or.mdx.On("Init", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) diff --git a/internal/tokens/fftokens/fftokens.go b/internal/tokens/fftokens/fftokens.go index 3f6337483a..4d637bfb7a 100644 --- a/internal/tokens/fftokens/fftokens.go +++ b/internal/tokens/fftokens/fftokens.go @@ -193,6 +193,8 @@ func (ft *FFTokens) handleTokenPoolCreate(ctx context.Context, data fftypes.JSON rawOutput := data.GetObject("rawOutput") // optional tx := data.GetObject("transaction") txHash := tx.GetString("transactionHash") // optional + location := data.GetString("location") + signature := data.GetString("signature") timestampStr := data.GetString("timestamp") timestamp, err := fftypes.ParseTimeString(timestampStr) @@ -229,6 +231,8 @@ func (ft *FFTokens) handleTokenPoolCreate(ctx context.Context, data fftypes.JSON Output: rawOutput, Info: tx, Timestamp: timestamp, + Location: location, + Signature: signature, }, } @@ -248,6 +252,8 @@ func (ft *FFTokens) handleTokenTransfer(ctx context.Context, t fftypes.TokenTran rawOutput := data.GetObject("rawOutput") // optional tx := data.GetObject("transaction") txHash := tx.GetString("transactionHash") // optional + location := data.GetString("location") + signature := data.GetString("signature") timestampStr := data.GetString("timestamp") timestamp, err := fftypes.ParseTimeString(timestampStr) @@ -318,6 +324,8 @@ func (ft *FFTokens) handleTokenTransfer(ctx context.Context, t fftypes.TokenTran Output: rawOutput, Info: tx, Timestamp: timestamp, + Location: location, + Signature: signature, }, } @@ -334,6 +342,8 @@ func (ft *FFTokens) handleTokenApproval(ctx context.Context, data fftypes.JSONOb rawOutput := data.GetObject("rawOutput") // optional tx := data.GetObject("transaction") txHash := tx.GetString("transactionHash") // optional + location := data.GetString("location") + signature := data.GetString("signature") timestampStr := data.GetString("timestamp") timestamp, err := fftypes.ParseTimeString(timestampStr) @@ -380,6 +390,8 @@ func (ft *FFTokens) handleTokenApproval(ctx context.Context, data fftypes.JSONOb Output: rawOutput, Info: tx, Timestamp: timestamp, + Location: location, + Signature: signature, }, } diff --git a/manifest.json b/manifest.json index 37235b0194..41f37a0438 100644 --- a/manifest.json +++ b/manifest.json @@ -15,9 +15,9 @@ "sha": "8625bb1685738365359174ae3bafecece934e6cc6a60c15e2a364324565c7bf7" }, "tokens-erc1155": { - "image": "ghcr.io/hyperledger/firefly-tokens-erc1155", - "tag": "v0.10.5", - "sha": "2cf3711b1a5e3864bfc9c313e270073edb19148303de37e3f8afc5d6b95c390f" + "image": "tokens", + "tag": "latest", + "local": true }, "tokens-erc20-erc721": { "image": "ghcr.io/hyperledger/firefly-tokens-erc20-erc721", diff --git a/mocks/blockchainmocks/plugin.go b/mocks/blockchainmocks/plugin.go index 6321894d0b..d5a07e0a7c 100644 --- a/mocks/blockchainmocks/plugin.go +++ b/mocks/blockchainmocks/plugin.go @@ -10,6 +10,8 @@ import ( fftypes "github.com/hyperledger/firefly/pkg/fftypes" + metrics "github.com/hyperledger/firefly/internal/metrics" + mock "github.com/stretchr/testify/mock" ) @@ -108,13 +110,13 @@ func (_m *Plugin) GetFFIParamValidator(ctx context.Context) (fftypes.FFIParamVal return r0, r1 } -// Init provides a mock function with given fields: ctx, prefix, callbacks -func (_m *Plugin) Init(ctx context.Context, prefix config.Prefix, callbacks blockchain.Callbacks) error { - ret := _m.Called(ctx, prefix, callbacks) +// Init provides a mock function with given fields: ctx, prefix, callbacks, _a3 +func (_m *Plugin) Init(ctx context.Context, prefix config.Prefix, callbacks blockchain.Callbacks, _a3 metrics.Manager) error { + ret := _m.Called(ctx, prefix, callbacks, _a3) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, config.Prefix, blockchain.Callbacks) error); ok { - r0 = rf(ctx, prefix, callbacks) + if rf, ok := ret.Get(0).(func(context.Context, config.Prefix, blockchain.Callbacks, metrics.Manager) error); ok { + r0 = rf(ctx, prefix, callbacks, _a3) } else { r0 = ret.Error(0) } diff --git a/mocks/metricsmocks/manager.go b/mocks/metricsmocks/manager.go index 4a6fcb2ad5..36e88081f0 100644 --- a/mocks/metricsmocks/manager.go +++ b/mocks/metricsmocks/manager.go @@ -20,6 +20,21 @@ func (_m *Manager) AddTime(id string) { _m.Called(id) } +// BlockchainEvent provides a mock function with given fields: location, signature +func (_m *Manager) BlockchainEvent(location string, signature string) { + _m.Called(location, signature) +} + +// BlockchainQuery provides a mock function with given fields: location, methodName +func (_m *Manager) BlockchainQuery(location string, methodName string) { + _m.Called(location, methodName) +} + +// BlockchainTransaction provides a mock function with given fields: location, methodName +func (_m *Manager) BlockchainTransaction(location string, methodName string) { + _m.Called(location, methodName) +} + // CountBatchPin provides a mock function with given fields: func (_m *Manager) CountBatchPin() { _m.Called() diff --git a/pkg/blockchain/plugin.go b/pkg/blockchain/plugin.go index 07ececf419..96fdddbd67 100644 --- a/pkg/blockchain/plugin.go +++ b/pkg/blockchain/plugin.go @@ -20,6 +20,7 @@ import ( "context" "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/metrics" "github.com/hyperledger/firefly/pkg/fftypes" ) @@ -32,7 +33,7 @@ type Plugin interface { // Init initializes the plugin, with configuration // Returns the supported featureset of the interface - Init(ctx context.Context, prefix config.Prefix, callbacks Callbacks) error + Init(ctx context.Context, prefix config.Prefix, callbacks Callbacks, metrics metrics.Manager) error // Blockchain interface must not deliver any events until start is called Start() error @@ -170,6 +171,12 @@ type Event struct { // We capture the blockchain TXID as in the case // of a FireFly transaction we want to reflect that blockchain TX back onto the FireFly TX object BlockchainTXID string + + // Location is the blockchain location of the contract that emitted the event + Location string + + // Signature is the event signature, including the event name and output types + Signature string } type EventWithSubscription struct { diff --git a/pkg/fftypes/blockchainevent.go b/pkg/fftypes/blockchainevent.go index b9c4bd5100..3b83edcfbc 100644 --- a/pkg/fftypes/blockchainevent.go +++ b/pkg/fftypes/blockchainevent.go @@ -28,4 +28,6 @@ type BlockchainEvent struct { Info JSONObject `json:"info,omitempty"` Timestamp *FFTime `json:"timestamp,omitempty"` TX TransactionRef `json:"tx"` + Location string `json:"location,omitempty"` + Signature string `json:"signature,omitempty"` } From 2fb249c078758634d46044784aed54c7850794d6 Mon Sep 17 00:00:00 2001 From: Nicko Guyer Date: Fri, 4 Mar 2022 09:24:23 -0500 Subject: [PATCH 2/6] Exclude signature and location from OpenAPI spec Signed-off-by: Nicko Guyer --- docs/swagger/swagger.yaml | 12 ------------ pkg/fftypes/blockchainevent.go | 4 ++-- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index 1f7599ac31..9f112ada0c 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -1285,8 +1285,6 @@ paths: additionalProperties: {} type: object listener: {} - location: - type: string name: type: string namespace: @@ -1299,8 +1297,6 @@ paths: sequence: format: int64 type: integer - signature: - type: string source: type: string timestamp: {} @@ -1350,8 +1346,6 @@ paths: additionalProperties: {} type: object listener: {} - location: - type: string name: type: string namespace: @@ -1364,8 +1358,6 @@ paths: sequence: format: int64 type: integer - signature: - type: string source: type: string timestamp: {} @@ -8731,8 +8723,6 @@ paths: additionalProperties: {} type: object listener: {} - location: - type: string name: type: string namespace: @@ -8745,8 +8735,6 @@ paths: sequence: format: int64 type: integer - signature: - type: string source: type: string timestamp: {} diff --git a/pkg/fftypes/blockchainevent.go b/pkg/fftypes/blockchainevent.go index 3b83edcfbc..fc6b443752 100644 --- a/pkg/fftypes/blockchainevent.go +++ b/pkg/fftypes/blockchainevent.go @@ -28,6 +28,6 @@ type BlockchainEvent struct { Info JSONObject `json:"info,omitempty"` Timestamp *FFTime `json:"timestamp,omitempty"` TX TransactionRef `json:"tx"` - Location string `json:"location,omitempty"` - Signature string `json:"signature,omitempty"` + Location string `json:"-"` + Signature string `json:"-"` } From 62037fd9344c72ff61b5b9e50767d76ed480c58f Mon Sep 17 00:00:00 2001 From: Nicko Guyer Date: Fri, 4 Mar 2022 12:47:09 -0500 Subject: [PATCH 3/6] Fix code coverage and update manifest.json Signed-off-by: Nicko Guyer --- internal/metrics/metrics_test.go | 35 ++++++++++++++++++++++++++++++++ manifest.json | 22 ++++++++++---------- 2 files changed, 46 insertions(+), 11 deletions(-) diff --git a/internal/metrics/metrics_test.go b/internal/metrics/metrics_test.go index 57ab703dca..9e15458191 100644 --- a/internal/metrics/metrics_test.go +++ b/internal/metrics/metrics_test.go @@ -23,6 +23,8 @@ import ( "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" ) @@ -174,6 +176,39 @@ func TestTransferConfirmedMintBurn(t *testing.T) { assert.Equal(t, len(mm.timeMap), 0) } +func TestBlockchainTransaction(t *testing.T) { + mm, cancel := newTestMetricsManager(t) + defer cancel() + mm.timeMap[tokenLocalID.String()] = time.Now() + mm.BlockchainTransaction("location", "methodName") + m, err := BlockchainTransactionsCounter.GetMetricWith(prometheus.Labels{LocationLabelName: "location", MethodNameLabelName: "methodName"}) + assert.NoError(t, err) + v := testutil.ToFloat64(m) + assert.Equal(t, float64(1), v) +} + +func TestBlockchainQuery(t *testing.T) { + mm, cancel := newTestMetricsManager(t) + defer cancel() + mm.timeMap[tokenLocalID.String()] = time.Now() + mm.BlockchainQuery("location", "methodName") + m, err := BlockchainQueriesCounter.GetMetricWith(prometheus.Labels{LocationLabelName: "location", MethodNameLabelName: "methodName"}) + assert.NoError(t, err) + v := testutil.ToFloat64(m) + assert.Equal(t, float64(1), v) +} + +func TestBlockchainEvents(t *testing.T) { + mm, cancel := newTestMetricsManager(t) + defer cancel() + mm.timeMap[tokenLocalID.String()] = time.Now() + mm.BlockchainEvent("location", "signature") + m, err := BlockchainEventsCounter.GetMetricWith(prometheus.Labels{LocationLabelName: "location", SignatureLabelName: "signature"}) + assert.NoError(t, err) + v := testutil.ToFloat64(m) + assert.Equal(t, float64(1), v) +} + func TestIsMetricsEnabledTrue(t *testing.T) { mm, cancel := newTestMetricsManager(t) defer cancel() diff --git a/manifest.json b/manifest.json index 41f37a0438..294e7dd26b 100644 --- a/manifest.json +++ b/manifest.json @@ -1,28 +1,28 @@ { "ethconnect": { "image": "ghcr.io/hyperledger/firefly-ethconnect", - "tag": "v3.1.4", - "sha": "c18b15187f8fbcab066e873b13e3d2c23ce24043a73249d08438f9aa2dd5f7f1" + "tag": "v3.1.3-20220301-21", + "sha": "62d84aeb9119303f635a02e2e43e7cf5baa1af8f0eb586da8a33a5d8d8ddf55c" }, "fabconnect": { "image": "ghcr.io/hyperledger/firefly-fabconnect", - "tag": "v0.9.11", - "sha": "0744f9cb01ea1de4e141d83c29aab9aadf1ee8bc4481b7c8025c712da7f79932" + "tag": "v0.9.10-20220221-16", + "sha": "0cc6125718cb8e467d448311036c1b4384aa1843c9d0f16e0dc3ac4fac736a8e" }, "dataexchange-https": { "image": "ghcr.io/hyperledger/firefly-dataexchange-https", - "tag": "v0.10.4", - "sha": "8625bb1685738365359174ae3bafecece934e6cc6a60c15e2a364324565c7bf7" + "tag": "v0.10.3-20220209-6", + "sha": "a94776c7f89c27548149e080627fe3c55ad528835ecea0131b1c1ae96981398e" }, "tokens-erc1155": { - "image": "tokens", - "tag": "latest", - "local": true + "image": "ghcr.io/hyperledger/firefly-tokens-erc1155", + "tag": "v0.10.5-20220304-19", + "sha": "2e58cdd26f89d864c7a73c35293ffd4a9b6e74e7aa5b6b5ee37d305f9d6ef389" }, "tokens-erc20-erc721": { "image": "ghcr.io/hyperledger/firefly-tokens-erc20-erc721", - "tag": "v0.1.6", - "sha": "142dcd72355a73c2ba69f3c1eeab8a7ab5c304a7e01ee19f14a8a95d20b4262d" + "tag": "v0.1.6-20220304-17", + "sha": "0d830b676c13f1c578b068be8e9512f1b2dbd6d8fb76d5d64cf34febe5d5042b" }, "build": { "firefly-builder": { From 38a5400a56a7336a0072efd35a87f0b9c49d81ce Mon Sep 17 00:00:00 2001 From: Nicko Guyer Date: Mon, 21 Mar 2022 10:01:49 -0400 Subject: [PATCH 4/6] Refactor blockchain metrics code Signed-off-by: Nicko Guyer --- internal/events/batch_pin_complete.go | 1 + internal/events/blockchain_event.go | 12 ++++++----- internal/events/blockchain_event_test.go | 26 ++++++++++++++++++++++++ internal/events/token_pool_created.go | 1 + internal/events/tokens_approved.go | 2 +- internal/events/tokens_transferred.go | 7 ++----- pkg/fftypes/blockchainevent.go | 2 -- 7 files changed, 38 insertions(+), 13 deletions(-) diff --git a/internal/events/batch_pin_complete.go b/internal/events/batch_pin_complete.go index b85eb3af86..8505c3a317 100644 --- a/internal/events/batch_pin_complete.go +++ b/internal/events/batch_pin_complete.go @@ -122,6 +122,7 @@ func (em *eventManager) handleBroadcastPinComplete(batchPin *blockchain.BatchPin if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { return err } + em.emitBlockchainEventMetric(batchPin.Event) if err := em.persistBatchTransaction(ctx, batchPin); err != nil { return err } diff --git a/internal/events/blockchain_event.go b/internal/events/blockchain_event.go index 147c338e28..f55e6d4894 100644 --- a/internal/events/blockchain_event.go +++ b/internal/events/blockchain_event.go @@ -35,8 +35,6 @@ func buildBlockchainEvent(ns string, subID *fftypes.UUID, event *blockchain.Even Output: event.Output, Info: event.Info, Timestamp: event.Timestamp, - Location: event.Location, - Signature: event.Signature, } if tx != nil { ev.TX = *tx @@ -45,9 +43,6 @@ func buildBlockchainEvent(ns string, subID *fftypes.UUID, event *blockchain.Even } func (em *eventManager) persistBlockchainEvent(ctx context.Context, chainEvent *fftypes.BlockchainEvent) error { - if em.metrics.IsMetricsEnabled() && chainEvent.Location != "" && chainEvent.Signature != "" { - em.metrics.BlockchainEvent(chainEvent.Location, chainEvent.Signature) - } if err := em.database.InsertBlockchainEvent(ctx, chainEvent); err != nil { return err } @@ -58,6 +53,12 @@ func (em *eventManager) persistBlockchainEvent(ctx context.Context, chainEvent * return nil } +func (em *eventManager) emitBlockchainEventMetric(event blockchain.Event) { + if em.metrics.IsMetricsEnabled() && event.Location != "" && event.Signature != "" { + em.metrics.BlockchainEvent(event.Location, event.Signature) + } +} + func (em *eventManager) BlockchainEvent(event *blockchain.EventWithSubscription) error { return em.retry.Do(em.ctx, "persist contract event", func(attempt int) (bool, error) { err := em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { @@ -75,6 +76,7 @@ func (em *eventManager) BlockchainEvent(event *blockchain.EventWithSubscription) if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { return err } + em.emitBlockchainEventMetric(event.Event) return nil }) return err != nil, err diff --git a/internal/events/blockchain_event_test.go b/internal/events/blockchain_event_test.go index f92a5b760b..8f8cf7d3ed 100644 --- a/internal/events/blockchain_event_test.go +++ b/internal/events/blockchain_event_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/metricsmocks" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" @@ -95,3 +96,28 @@ func TestContractEventUnknownSubscription(t *testing.T) { mdi.AssertExpectations(t) } + +func TestBlockchainEventMetric(t *testing.T) { + em, cancel := newTestEventManager(t) + defer cancel() + mm := &metricsmocks.Manager{} + em.metrics = mm + mm.On("IsMetricsEnabled").Return(true) + mm.On("BlockchainEvent", mock.Anything, mock.Anything).Return() + + event := blockchain.Event{ + BlockchainTXID: "0xabcd1234", + Name: "Changed", + Output: fftypes.JSONObject{ + "value": "1", + }, + Info: fftypes.JSONObject{ + "blockNumber": "10", + }, + Location: "0x12345", + Signature: "John Hancock", + } + + em.emitBlockchainEventMetric(event) + mm.AssertExpectations(t) +} diff --git a/internal/events/token_pool_created.go b/internal/events/token_pool_created.go index 5a04baf905..2c2714d584 100644 --- a/internal/events/token_pool_created.go +++ b/internal/events/token_pool_created.go @@ -45,6 +45,7 @@ func (em *eventManager) confirmPool(ctx context.Context, pool *fftypes.TokenPool if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { return err } + em.emitBlockchainEventMetric(*ev) if op, err := em.findTXOperation(ctx, pool.TX.ID, fftypes.OpTypeTokenActivatePool); err != nil { return err } else if op == nil { diff --git a/internal/events/tokens_approved.go b/internal/events/tokens_approved.go index 931d7ccbd0..71a7fac079 100644 --- a/internal/events/tokens_approved.go +++ b/internal/events/tokens_approved.go @@ -86,7 +86,7 @@ func (em *eventManager) persistTokenApproval(ctx context.Context, approval *toke if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { return false, err } - + em.emitBlockchainEventMetric(approval.Event) if err := em.database.UpsertTokenApproval(ctx, &approval.TokenApproval); err != nil { log.L(ctx).Errorf("Failed to record token approval '%s': %s", approval.ProtocolID, err) return false, err diff --git a/internal/events/tokens_transferred.go b/internal/events/tokens_transferred.go index 6a07db61b6..dd61093ca5 100644 --- a/internal/events/tokens_transferred.go +++ b/internal/events/tokens_transferred.go @@ -100,7 +100,7 @@ func (em *eventManager) persistTokenTransfer(ctx context.Context, transfer *toke if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { return false, err } - + em.emitBlockchainEventMetric(transfer.Event) if err := em.database.UpsertTokenTransfer(ctx, &transfer.TokenTransfer); err != nil { log.L(ctx).Errorf("Failed to record token transfer '%s': %s", transfer.ProtocolID, err) return false, err @@ -142,10 +142,7 @@ func (em *eventManager) TokensTransferred(ti tokens.Plugin, transfer *tokens.Tok } } } - - if em.metrics.IsMetricsEnabled() && transfer.Event.Location != "" && transfer.Event.Signature != "" { - em.metrics.BlockchainEvent(transfer.Event.Location, transfer.Event.Signature) - } + em.emitBlockchainEventMetric(transfer.Event) event := fftypes.NewEvent(fftypes.EventTypeTransferConfirmed, transfer.Namespace, transfer.LocalID, transfer.TX.ID) return em.database.InsertEvent(ctx, event) diff --git a/pkg/fftypes/blockchainevent.go b/pkg/fftypes/blockchainevent.go index fc6b443752..b9c4bd5100 100644 --- a/pkg/fftypes/blockchainevent.go +++ b/pkg/fftypes/blockchainevent.go @@ -28,6 +28,4 @@ type BlockchainEvent struct { Info JSONObject `json:"info,omitempty"` Timestamp *FFTime `json:"timestamp,omitempty"` TX TransactionRef `json:"tx"` - Location string `json:"-"` - Signature string `json:"-"` } From b4fde1483659843a5ffdd8b56c7f16c4958ac35d Mon Sep 17 00:00:00 2001 From: Nicko Guyer Date: Mon, 21 Mar 2022 11:02:34 -0400 Subject: [PATCH 5/6] Merge branch 'main' into blockchain-metrics Signed-off-by: Nicko Guyer --- .github/workflows/go.yml | 8 + .github/workflows/integration.yml | 1 + .vscode/settings.json | 3 +- Makefile | 5 +- cmd/firefly.go | 5 +- .../000008_create_operations_table.up.sql | 1 + .../000069_add_operation_retry.down.sql | 3 + .../000069_add_operation_retry.up.sql | 3 + .../000070_add_subscriptions_filters.down.sql | 7 + .../000070_add_subscriptions_filters.up.sql | 8 + ...00071_rename_batch_payload_column.down.sql | 3 + .../000071_rename_batch_payload_column.up.sql | 3 + .../000072_add_tokenpool_info.down.sql | 3 + .../postgres/000072_add_tokenpool_info.up.sql | 3 + ...073_add_contractlisteners_options.down.sql | 3 + ...00073_add_contractlisteners_options.up.sql | 3 + ...0074_add_event_and_listener_topic.down.sql | 8 + ...000074_add_event_and_listener_topic.up.sql | 14 + .../000075_add_pins_batch_hash.down.sql | 5 + .../000075_add_pins_batch_hash.up.sql | 5 + .../000008_create_operations_table.up.sql | 1 + .../000063_create_identities_table.down.sql | 8 +- .../000063_create_identities_table.up.sql | 10 +- .../000069_add_operation_retry.down.sql | 1 + .../sqlite/000069_add_operation_retry.up.sql | 1 + .../000070_add_subscriptions_filters.down.sql | 5 + .../000070_add_subscriptions_filters.up.sql | 6 + ...00071_rename_batch_payload_column.down.sql | 1 + .../000071_rename_batch_payload_column.up.sql | 1 + .../sqlite/000072_add_tokenpool_info.down.sql | 1 + .../sqlite/000072_add_tokenpool_info.up.sql | 1 + ...073_add_contractlisteners_options.down.sql | 1 + ...00073_add_contractlisteners_options.up.sql | 1 + ...0074_add_event_and_listener_topic.down.sql | 5 + ...000074_add_event_and_listener_topic.up.sql | 7 + .../000075_add_pins_batch_hash.down.sql | 1 + .../sqlite/000075_add_pins_batch_hash.up.sql | 1 + docs/Gemfile.lock | 29 +- docs/contributors/contributors.md | 9 +- docs/gettingstarted/events.md | 17 +- docs/swagger/swagger.yaml | 744 ++++++----- go.mod | 2 +- go.sum | 4 +- internal/apiserver/route_get_batch_by_id.go | 4 +- .../apiserver/route_get_batch_by_id_test.go | 2 +- internal/apiserver/route_get_batches.go | 4 +- internal/apiserver/route_get_batches_test.go | 2 +- internal/apiserver/route_get_data.go | 2 +- internal/apiserver/route_get_data_test.go | 2 +- internal/apiserver/route_get_events.go | 8 +- internal/apiserver/route_get_events_test.go | 24 + internal/apiserver/route_get_msg_data.go | 2 +- internal/apiserver/route_get_msg_data_test.go | 2 +- internal/apiserver/route_get_net_node.go | 6 +- internal/apiserver/route_get_net_node_test.go | 2 +- internal/apiserver/route_get_net_org.go | 6 +- internal/apiserver/route_get_net_org_test.go | 2 +- ...et_msg_ops.go => route_get_status_pins.go} | 21 +- ..._test.go => route_get_status_pins_test.go} | 8 +- internal/apiserver/route_post_op_retry.go | 52 + .../apiserver/route_post_op_retry_test.go | 62 + internal/apiserver/route_post_token_pool.go | 2 +- internal/apiserver/routes.go | 123 +- internal/assets/manager.go | 59 +- internal/assets/manager_test.go | 45 +- internal/assets/operations.go | 176 +++ internal/assets/operations_test.go | 526 ++++++++ internal/assets/token_approval.go | 8 +- internal/assets/token_approval_test.go | 104 +- internal/assets/token_pool.go | 26 +- internal/assets/token_pool_test.go | 213 +++- internal/assets/token_transfer.go | 35 +- internal/assets/token_transfer_test.go | 325 ++--- internal/batch/batch_manager.go | 143 ++- internal/batch/batch_manager_test.go | 337 ++--- internal/batch/batch_processor.go | 192 ++- internal/batch/batch_processor_test.go | 261 +++- internal/batchpin/batchpin.go | 45 +- internal/batchpin/batchpin_test.go | 119 +- internal/batchpin/operations.go | 103 ++ internal/batchpin/operations_test.go | 148 +++ internal/blockchain/ethereum/ethereum.go | 14 +- internal/blockchain/ethereum/ethereum_test.go | 20 +- internal/blockchain/ethereum/eventstream.go | 18 +- internal/blockchain/fabric/eventstream.go | 13 +- internal/blockchain/fabric/fabric.go | 10 +- internal/blockchain/fabric/fabric_test.go | 25 +- internal/broadcast/datatype_test.go | 35 +- internal/broadcast/definition.go | 47 +- internal/broadcast/definition_test.go | 23 - internal/broadcast/manager.go | 122 +- internal/broadcast/manager_test.go | 352 +++--- internal/broadcast/message.go | 88 +- internal/broadcast/message_test.go | 206 +-- internal/broadcast/namespace_test.go | 5 +- internal/broadcast/operations.go | 187 +++ internal/broadcast/operations_test.go | 516 ++++++++ internal/broadcast/tokenpool_test.go | 11 +- internal/config/config.go | 63 +- internal/contracts/manager.go | 75 +- internal/contracts/manager_test.go | 88 +- internal/contracts/operations.go | 79 ++ internal/contracts/operations_test.go | 94 ++ internal/data/blobstore.go | 41 +- internal/data/blobstore_test.go | 214 +--- internal/data/data_manager.go | 333 +++-- internal/data/data_manager_test.go | 682 ++++++++-- internal/data/message_writer.go | 203 +++ internal/data/message_writer_test.go | 142 +++ internal/database/postgres/postgres.go | 1 + internal/database/sqlcommon/batch_sql.go | 32 +- internal/database/sqlcommon/batch_sql_test.go | 86 +- internal/database/sqlcommon/chart_sql.go | 190 ++- internal/database/sqlcommon/chart_sql_test.go | 132 +- internal/database/sqlcommon/config.go | 13 +- .../sqlcommon/contractlisteners_sql.go | 8 + .../sqlcommon/contractlisteners_sql_test.go | 32 +- internal/database/sqlcommon/data_sql.go | 115 +- internal/database/sqlcommon/data_sql_test.go | 59 +- internal/database/sqlcommon/event_sql.go | 68 +- internal/database/sqlcommon/event_sql_test.go | 59 + internal/database/sqlcommon/message_sql.go | 137 +- .../database/sqlcommon/message_sql_test.go | 119 ++ internal/database/sqlcommon/operation_sql.go | 13 +- .../database/sqlcommon/operation_sql_test.go | 14 +- internal/database/sqlcommon/pin_sql.go | 82 +- internal/database/sqlcommon/pin_sql_test.go | 56 + internal/database/sqlcommon/provider.go | 2 + .../database/sqlcommon/provider_mock_test.go | 6 +- internal/database/sqlcommon/sqlcommon.go | 86 +- internal/database/sqlcommon/sqlcommon_test.go | 23 + .../database/sqlcommon/subscription_sql.go | 29 +- .../sqlcommon/subscription_sql_test.go | 14 +- internal/database/sqlcommon/tokenpool_sql.go | 4 + .../database/sqlcommon/tokenpool_sql_test.go | 3 + .../database/sqlcommon/tokentransfer_sql.go | 1 + internal/dataexchange/ffdx/ffdx.go | 2 +- internal/dataexchange/ffdx/ffdx_test.go | 2 +- internal/definitions/definition_handler.go | 23 +- .../definition_handler_contracts.go | 8 +- .../definition_handler_contracts_test.go | 14 +- .../definition_handler_datatype.go | 4 +- .../definition_handler_datatype_test.go | 16 +- .../definition_handler_identity_claim.go | 6 +- .../definition_handler_identity_claim_test.go | 50 +- .../definition_handler_identity_update.go | 4 +- ...definition_handler_identity_update_test.go | 14 +- ...efinition_handler_identity_verification.go | 4 +- ...tion_handler_identity_verification_test.go | 28 +- .../definition_handler_namespace.go | 4 +- .../definition_handler_namespace_test.go | 20 +- .../definition_handler_network_node.go | 2 +- .../definition_handler_network_node_test.go | 8 +- .../definition_handler_network_org.go | 2 +- .../definition_handler_network_org_test.go | 4 +- .../definitions/definition_handler_test.go | 15 +- .../definition_handler_tokenpool.go | 8 +- .../definition_handler_tokenpool_test.go | 16 +- internal/events/aggregator.go | 335 +++-- internal/events/aggregator_batch_state.go | 64 +- .../events/aggregator_batch_state_test.go | 29 +- internal/events/aggregator_test.go | 1111 ++++++++++------- internal/events/batch_pin_complete.go | 109 +- internal/events/batch_pin_complete_test.go | 581 ++++----- internal/events/blockchain_event.go | 53 +- internal/events/blockchain_event_test.go | 51 +- internal/events/dx_callbacks.go | 111 +- internal/events/dx_callbacks_test.go | 183 ++- internal/events/event_dispatcher.go | 125 +- internal/events/event_dispatcher_test.go | 590 ++++++--- internal/events/event_manager.go | 93 +- internal/events/event_manager_test.go | 12 +- internal/events/event_poller.go | 83 +- internal/events/event_poller_test.go | 49 +- internal/events/operation_update.go | 30 +- internal/events/operation_update_test.go | 1 + internal/events/persist_batch.go | 285 +++-- internal/events/persist_batch_test.go | 228 +++- internal/events/ss_callbacks.go | 71 ++ internal/events/ss_callbacks_test.go | 145 +++ internal/events/subscription_manager.go | 145 ++- internal/events/subscription_manager_test.go | 223 +++- internal/events/system/events.go | 4 +- internal/events/system/events_test.go | 18 +- internal/events/token_pool_created.go | 48 +- internal/events/token_pool_created_test.go | 209 +++- internal/events/tokens_approved.go | 6 +- internal/events/tokens_transferred.go | 8 +- internal/events/webhooks/webhooks.go | 6 +- internal/events/webhooks/webhooks_test.go | 210 ++-- .../events/websockets/websocket_connection.go | 16 +- internal/events/websockets/websockets.go | 4 +- internal/events/websockets/websockets_test.go | 20 +- internal/i18n/en_translations.go | 8 + internal/networkmap/data_query.go | 46 +- internal/networkmap/data_query_test.go | 60 +- internal/networkmap/manager.go | 4 +- internal/operations/cache.go | 84 ++ internal/operations/cache_test.go | 129 ++ internal/operations/manager.go | 160 +++ internal/operations/manager_test.go | 383 ++++++ internal/orchestrator/bound_callbacks.go | 14 +- internal/orchestrator/bound_callbacks_test.go | 16 +- internal/orchestrator/data_query.go | 36 +- internal/orchestrator/data_query_test.go | 135 +- internal/orchestrator/orchestrator.go | 69 +- internal/orchestrator/orchestrator_test.go | 46 + internal/orchestrator/txn_status.go | 46 +- internal/orchestrator/txn_status_test.go | 88 +- internal/privatemessaging/groupmanager.go | 6 +- .../privatemessaging/groupmanager_test.go | 24 +- internal/privatemessaging/message.go | 76 +- internal/privatemessaging/message_test.go | 274 ++-- internal/privatemessaging/operations.go | 158 +++ internal/privatemessaging/operations_test.go | 534 ++++++++ internal/privatemessaging/privatemessaging.go | 76 +- .../privatemessaging/privatemessaging_test.go | 281 +++-- internal/privatemessaging/recipients.go | 38 +- internal/privatemessaging/recipients_test.go | 10 +- internal/restclient/ffresty.go | 10 +- internal/shareddownload/download_manager.go | 244 ++++ .../shareddownload/download_manager_test.go | 278 +++++ internal/shareddownload/download_worker.go | 74 ++ internal/shareddownload/operations.go | 191 +++ internal/shareddownload/operations_test.go | 136 ++ internal/sharedstorage/ipfs/ipfs.go | 4 +- internal/sharedstorage/ipfs/ipfs_test.go | 10 +- internal/syncasync/sync_async_bridge.go | 10 +- internal/syncasync/sync_async_bridge_test.go | 418 ++++--- internal/tokens/fftokens/fftokens.go | 56 +- internal/tokens/fftokens/fftokens_test.go | 53 +- internal/txcommon/event_enrich.go | 51 + internal/txcommon/event_enrich_test.go | 202 +++ internal/txcommon/token_inputs.go | 81 +- internal/txcommon/token_inputs_test.go | 76 +- internal/txcommon/txcommon.go | 76 +- internal/txcommon/txcommon_test.go | 86 +- manifest.json | 22 +- mocks/assetmocks/manager.go | 84 +- mocks/batchpinmocks/submitter.go | 71 +- mocks/blockchainmocks/plugin.go | 8 +- mocks/broadcastmocks/manager.go | 67 + mocks/contractmocks/manager.go | 67 + mocks/databasemocks/plugin.go | 124 +- mocks/dataexchangemocks/callbacks.go | 28 +- mocks/datamocks/manager.go | 202 ++- mocks/definitionsmocks/definition_handlers.go | 6 +- mocks/eventmocks/event_manager.go | 67 +- mocks/eventsmocks/plugin.go | 4 +- mocks/eventsmocks/plugin_all.go | 4 +- mocks/networkmapmocks/manager.go | 20 +- mocks/operationmocks/manager.go | 103 ++ mocks/orchestratormocks/orchestrator.go | 114 +- mocks/privatemessagingmocks/manager.go | 67 + mocks/shareddownloadmocks/callbacks.go | 50 + mocks/shareddownloadmocks/manager.go | 62 + mocks/sharedstoragemocks/plugin.go | 50 +- mocks/tokenmocks/plugin.go | 14 +- mocks/txcommonmocks/helper.go | 56 +- pkg/blockchain/plugin.go | 8 +- pkg/database/plugin.go | 54 +- pkg/dataexchange/plugin.go | 4 +- pkg/events/plugin.go | 4 +- pkg/fftypes/batch.go | 176 ++- pkg/fftypes/batch_test.go | 59 +- pkg/fftypes/bytetypes.go | 7 + pkg/fftypes/charthistogram.go | 18 +- pkg/fftypes/constants.go | 7 + pkg/fftypes/contract_listener.go | 42 +- pkg/fftypes/contract_listener_test.go | 34 + pkg/fftypes/contracts.go | 6 +- pkg/fftypes/contracts_test.go | 2 +- pkg/fftypes/data.go | 57 + pkg/fftypes/data_test.go | 47 + pkg/fftypes/datatype.go | 8 +- pkg/fftypes/datatype_test.go | 2 +- pkg/fftypes/event.go | 45 +- pkg/fftypes/event_test.go | 3 +- pkg/fftypes/ffi.go | 2 +- pkg/fftypes/ffi_test.go | 2 +- .../{manifest.go => id_and_sequence.go} | 15 +- pkg/fftypes/identity.go | 6 +- pkg/fftypes/jsonany.go | 7 + pkg/fftypes/jsonany_test.go | 16 + pkg/fftypes/manifest_test.go | 39 - pkg/fftypes/message.go | 59 +- pkg/fftypes/message_test.go | 19 +- pkg/fftypes/namespace.go | 22 +- pkg/fftypes/namespace_test.go | 2 +- pkg/fftypes/offset.go | 8 +- pkg/fftypes/operation.go | 45 +- pkg/fftypes/operation_test.go | 4 +- pkg/fftypes/pin.go | 5 +- pkg/fftypes/stringarray.go | 4 +- pkg/fftypes/stringarray_test.go | 10 +- pkg/fftypes/subscription.go | 72 +- pkg/fftypes/subscription_test.go | 40 +- pkg/fftypes/tokenpool.go | 13 +- pkg/fftypes/tokenpool_test.go | 2 +- pkg/fftypes/tokentransfer.go | 6 +- pkg/fftypes/transaction.go | 20 +- pkg/fftypes/transaction_test.go | 27 + pkg/fftypes/transport_wrapper.go | 4 +- pkg/fftypes/transport_wrapper_test.go | 10 +- pkg/fftypes/verifier.go | 6 +- pkg/fftypes/websocket_actions.go | 10 +- pkg/sharedstorage/plugin.go | 8 +- pkg/tokens/plugin.go | 13 +- test/data/erc20/ERC20WithData.json | 489 ++++++++ test/data/erc721/ERC721WithData.json | 572 +++++++++ test/data/simplestorage/simple_storage.json | 55 + .../data/simplestorage/simplestorage.abi.json | 47 - test/data/simplestorage/simplestorage.bin | Bin 446 -> 0 bytes test/e2e/e2e_test.go | 29 +- test/e2e/ethereum_contract_test.go | 50 +- test/e2e/restclient_test.go | 18 +- test/e2e/run.sh | 26 + test/e2e/stack.go | 1 + test/e2e/tokens_test.go | 70 +- 319 files changed, 17443 insertions(+), 5892 deletions(-) create mode 100644 db/migrations/postgres/000069_add_operation_retry.down.sql create mode 100644 db/migrations/postgres/000069_add_operation_retry.up.sql create mode 100644 db/migrations/postgres/000070_add_subscriptions_filters.down.sql create mode 100644 db/migrations/postgres/000070_add_subscriptions_filters.up.sql create mode 100644 db/migrations/postgres/000071_rename_batch_payload_column.down.sql create mode 100644 db/migrations/postgres/000071_rename_batch_payload_column.up.sql create mode 100644 db/migrations/postgres/000072_add_tokenpool_info.down.sql create mode 100644 db/migrations/postgres/000072_add_tokenpool_info.up.sql create mode 100644 db/migrations/postgres/000073_add_contractlisteners_options.down.sql create mode 100644 db/migrations/postgres/000073_add_contractlisteners_options.up.sql create mode 100644 db/migrations/postgres/000074_add_event_and_listener_topic.down.sql create mode 100644 db/migrations/postgres/000074_add_event_and_listener_topic.up.sql create mode 100644 db/migrations/postgres/000075_add_pins_batch_hash.down.sql create mode 100644 db/migrations/postgres/000075_add_pins_batch_hash.up.sql create mode 100644 db/migrations/sqlite/000069_add_operation_retry.down.sql create mode 100644 db/migrations/sqlite/000069_add_operation_retry.up.sql create mode 100644 db/migrations/sqlite/000070_add_subscriptions_filters.down.sql create mode 100644 db/migrations/sqlite/000070_add_subscriptions_filters.up.sql create mode 100644 db/migrations/sqlite/000071_rename_batch_payload_column.down.sql create mode 100644 db/migrations/sqlite/000071_rename_batch_payload_column.up.sql create mode 100644 db/migrations/sqlite/000072_add_tokenpool_info.down.sql create mode 100644 db/migrations/sqlite/000072_add_tokenpool_info.up.sql create mode 100644 db/migrations/sqlite/000073_add_contractlisteners_options.down.sql create mode 100644 db/migrations/sqlite/000073_add_contractlisteners_options.up.sql create mode 100644 db/migrations/sqlite/000074_add_event_and_listener_topic.down.sql create mode 100644 db/migrations/sqlite/000074_add_event_and_listener_topic.up.sql create mode 100644 db/migrations/sqlite/000075_add_pins_batch_hash.down.sql create mode 100644 db/migrations/sqlite/000075_add_pins_batch_hash.up.sql rename internal/apiserver/{route_get_msg_ops.go => route_get_status_pins.go} (65%) rename internal/apiserver/{route_get_msg_ops_test.go => route_get_status_pins_test.go} (78%) create mode 100644 internal/apiserver/route_post_op_retry.go create mode 100644 internal/apiserver/route_post_op_retry_test.go create mode 100644 internal/assets/operations.go create mode 100644 internal/assets/operations_test.go create mode 100644 internal/batchpin/operations.go create mode 100644 internal/batchpin/operations_test.go create mode 100644 internal/broadcast/operations.go create mode 100644 internal/broadcast/operations_test.go create mode 100644 internal/contracts/operations.go create mode 100644 internal/contracts/operations_test.go create mode 100644 internal/data/message_writer.go create mode 100644 internal/data/message_writer_test.go create mode 100644 internal/events/ss_callbacks.go create mode 100644 internal/events/ss_callbacks_test.go create mode 100644 internal/operations/cache.go create mode 100644 internal/operations/cache_test.go create mode 100644 internal/operations/manager.go create mode 100644 internal/operations/manager_test.go create mode 100644 internal/privatemessaging/operations.go create mode 100644 internal/privatemessaging/operations_test.go create mode 100644 internal/shareddownload/download_manager.go create mode 100644 internal/shareddownload/download_manager_test.go create mode 100644 internal/shareddownload/download_worker.go create mode 100644 internal/shareddownload/operations.go create mode 100644 internal/shareddownload/operations_test.go create mode 100644 internal/txcommon/event_enrich.go create mode 100644 internal/txcommon/event_enrich_test.go create mode 100644 mocks/operationmocks/manager.go create mode 100644 mocks/shareddownloadmocks/callbacks.go create mode 100644 mocks/shareddownloadmocks/manager.go rename pkg/fftypes/{manifest.go => id_and_sequence.go} (70%) delete mode 100644 pkg/fftypes/manifest_test.go create mode 100644 pkg/fftypes/transaction_test.go create mode 100644 test/data/erc20/ERC20WithData.json create mode 100644 test/data/erc721/ERC721WithData.json create mode 100644 test/data/simplestorage/simple_storage.json delete mode 100644 test/data/simplestorage/simplestorage.abi.json delete mode 100644 test/data/simplestorage/simplestorage.bin diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 779cd5bc60..9956812bf8 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -32,12 +32,19 @@ jobs: matrix: test-suite: [TestEthereumE2ESuite, TestFabricE2ESuite] blockchain-provider: [geth, fabric] + token-provider: [none, erc1155, erc20_erc721] database-type: [sqlite3] exclude: - blockchain-provider: geth test-suite: TestFabricE2ESuite - blockchain-provider: fabric test-suite: TestEthereumE2ESuite + - blockchain-provider: fabric + token-provider: erc1155 + - blockchain-provider: fabric + token-provider: erc20_erc721 + - blockchain-provider: geth + token-provider: none fail-fast: false steps: - uses: actions/checkout@v2 @@ -53,6 +60,7 @@ jobs: env: TEST_SUITE: ${{ matrix.test-suite }} BLOCKCHAIN_PROVIDER: ${{ matrix.blockchain-provider }} + TOKENS_PROVIDER: ${{ matrix.token-provider }} DATABASE_TYPE: ${{ matrix.database-type }} run: ./test/e2e/run.sh diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index cba5bdf539..a2ab48eecd 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -45,6 +45,7 @@ jobs: BLOCKCHAIN_PROVIDER: ${{ matrix.blockchain-provider }} DATABASE_TYPE: ${{ matrix.database-type }} BUILD_FIREFLY: false + RESTART: true run: ./test/e2e/run.sh - name: Archive container logs diff --git a/.vscode/settings.json b/.vscode/settings.json index a5d380fdf7..bd8aa59b03 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -5,5 +5,6 @@ "go.lintTool": "golangci-lint", "cSpell.words": [ "fftypes" - ] + ], + "go.testTimeout": "10s" } diff --git a/Makefile b/Makefile index a95f768897..5f609bf9b0 100644 --- a/Makefile +++ b/Makefile @@ -58,6 +58,8 @@ $(eval $(call makemock, internal/data, Manager, datamocks $(eval $(call makemock, internal/batch, Manager, batchmocks)) $(eval $(call makemock, internal/broadcast, Manager, broadcastmocks)) $(eval $(call makemock, internal/privatemessaging, Manager, privatemessagingmocks)) +$(eval $(call makemock, internal/shareddownload, Manager, shareddownloadmocks)) +$(eval $(call makemock, internal/shareddownload, Callbacks, shareddownloadmocks)) $(eval $(call makemock, internal/definitions, DefinitionHandlers, definitionsmocks)) $(eval $(call makemock, internal/events, EventManager, eventmocks)) $(eval $(call makemock, internal/networkmap, Manager, networkmapmocks)) @@ -68,6 +70,7 @@ $(eval $(call makemock, internal/orchestrator, Orchestrator, orchestra $(eval $(call makemock, internal/apiserver, Server, apiservermocks)) $(eval $(call makemock, internal/apiserver, IServer, apiservermocks)) $(eval $(call makemock, internal/metrics, Manager, metricsmocks)) +$(eval $(call makemock, internal/operations, Manager, operationmocks)) firefly-nocgo: ${GOFILES} CGO_ENABLED=0 $(VGO) build -o ${BINARY_NAME}-nocgo -ldflags "-X main.buildDate=`date -u +\"%Y-%m-%dT%H:%M:%SZ\"` -X main.buildVersion=$(BUILD_VERSION)" -tags=prod -tags=prod -v @@ -91,4 +94,4 @@ swagger: manifest: ./manifestgen.sh docker: - ./docker_build.sh $(DOCKER_ARGS) \ No newline at end of file + ./docker_build.sh $(DOCKER_ARGS) diff --git a/cmd/firefly.go b/cmd/firefly.go index 59391a31e6..0f5ec9baa1 100644 --- a/cmd/firefly.go +++ b/cmd/firefly.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -95,7 +95,8 @@ func run() error { // Setup logging after reading config (even if failed), to output header correctly ctx, cancelCtx := context.WithCancel(context.Background()) - ctx = log.WithLogger(ctx, logrus.WithField("pid", os.Getpid())) + ctx = log.WithLogger(ctx, logrus.WithField("pid", fmt.Sprintf("%d", os.Getpid()))) + ctx = log.WithLogger(ctx, logrus.WithField("prefix", config.GetString(config.NodeName))) config.SetupLogging(ctx) log.L(ctx).Infof("Project Firefly") diff --git a/db/migrations/postgres/000008_create_operations_table.up.sql b/db/migrations/postgres/000008_create_operations_table.up.sql index 14011cd450..570191f4b5 100644 --- a/db/migrations/postgres/000008_create_operations_table.up.sql +++ b/db/migrations/postgres/000008_create_operations_table.up.sql @@ -19,5 +19,6 @@ CREATE UNIQUE INDEX operations_id ON operations(id); CREATE INDEX operations_created ON operations(created); CREATE INDEX operations_backend ON operations(backend_id); CREATE INDEX operations_tx ON operations(tx_id); +CREATE INDEX operations_type_status ON operations(optype,opstatus); COMMIT; diff --git a/db/migrations/postgres/000069_add_operation_retry.down.sql b/db/migrations/postgres/000069_add_operation_retry.down.sql new file mode 100644 index 0000000000..e315703994 --- /dev/null +++ b/db/migrations/postgres/000069_add_operation_retry.down.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE operations DROP COLUMN retry_id; +COMMIT; diff --git a/db/migrations/postgres/000069_add_operation_retry.up.sql b/db/migrations/postgres/000069_add_operation_retry.up.sql new file mode 100644 index 0000000000..8c3db8d2a3 --- /dev/null +++ b/db/migrations/postgres/000069_add_operation_retry.up.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE operations ADD COLUMN retry_id UUID; +COMMIT; diff --git a/db/migrations/postgres/000070_add_subscriptions_filters.down.sql b/db/migrations/postgres/000070_add_subscriptions_filters.down.sql new file mode 100644 index 0000000000..06dc7ec67f --- /dev/null +++ b/db/migrations/postgres/000070_add_subscriptions_filters.down.sql @@ -0,0 +1,7 @@ +BEGIN; +ALTER TABLE subscriptions DROP COLUMN filters; +ALTER TABLE subscriptions INSERT COLUMN filter_events text; +ALTER TABLE subscriptions INSERT COLUMN filter_topics text; +ALTER TABLE subscriptions INSERT COLUMN filter_tag text; +ALTER TABLE subscriptions INSERT COLUMN filter_group text; +COMMIT; \ No newline at end of file diff --git a/db/migrations/postgres/000070_add_subscriptions_filters.up.sql b/db/migrations/postgres/000070_add_subscriptions_filters.up.sql new file mode 100644 index 0000000000..4b0e7fb721 --- /dev/null +++ b/db/migrations/postgres/000070_add_subscriptions_filters.up.sql @@ -0,0 +1,8 @@ +BEGIN; +ALTER TABLE subscriptions ADD COLUMN filters TEXT; +UPDATE subscriptions SET filters='{"events":"' || filter_events || '","message":{"topics":"' || filter_topics || '","tag":"' || filter_tag || '","group":"' || filter_group || '"}}'; +ALTER TABLE subscriptions DROP COLUMN filter_events; +ALTER TABLE subscriptions DROP COLUMN filter_topics; +ALTER TABLE subscriptions DROP COLUMN filter_tag; +ALTER TABLE subscriptions DROP COLUMN filter_group; +COMMIT; \ No newline at end of file diff --git a/db/migrations/postgres/000071_rename_batch_payload_column.down.sql b/db/migrations/postgres/000071_rename_batch_payload_column.down.sql new file mode 100644 index 0000000000..8259011d2a --- /dev/null +++ b/db/migrations/postgres/000071_rename_batch_payload_column.down.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE batches RENAME COLUMN manifest TO payload; +COMMIT; \ No newline at end of file diff --git a/db/migrations/postgres/000071_rename_batch_payload_column.up.sql b/db/migrations/postgres/000071_rename_batch_payload_column.up.sql new file mode 100644 index 0000000000..8f9f9cefab --- /dev/null +++ b/db/migrations/postgres/000071_rename_batch_payload_column.up.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE batches RENAME COLUMN payload TO manifest; +COMMIT; diff --git a/db/migrations/postgres/000072_add_tokenpool_info.down.sql b/db/migrations/postgres/000072_add_tokenpool_info.down.sql new file mode 100644 index 0000000000..f810b6bcb4 --- /dev/null +++ b/db/migrations/postgres/000072_add_tokenpool_info.down.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE tokenpool DROP COLUMN info; +COMMIT; diff --git a/db/migrations/postgres/000072_add_tokenpool_info.up.sql b/db/migrations/postgres/000072_add_tokenpool_info.up.sql new file mode 100644 index 0000000000..e4c3396da1 --- /dev/null +++ b/db/migrations/postgres/000072_add_tokenpool_info.up.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE tokenpool ADD COLUMN info TEXT; +COMMIT; diff --git a/db/migrations/postgres/000073_add_contractlisteners_options.down.sql b/db/migrations/postgres/000073_add_contractlisteners_options.down.sql new file mode 100644 index 0000000000..5ff0be55ef --- /dev/null +++ b/db/migrations/postgres/000073_add_contractlisteners_options.down.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE contractlisteners DROP COLUMN options; +COMMIT; \ No newline at end of file diff --git a/db/migrations/postgres/000073_add_contractlisteners_options.up.sql b/db/migrations/postgres/000073_add_contractlisteners_options.up.sql new file mode 100644 index 0000000000..5dd774d0e9 --- /dev/null +++ b/db/migrations/postgres/000073_add_contractlisteners_options.up.sql @@ -0,0 +1,3 @@ +BEGIN; +ALTER TABLE contractlisteners ADD COLUMN options TEXT; +COMMIT; \ No newline at end of file diff --git a/db/migrations/postgres/000074_add_event_and_listener_topic.down.sql b/db/migrations/postgres/000074_add_event_and_listener_topic.down.sql new file mode 100644 index 0000000000..e90ee446ae --- /dev/null +++ b/db/migrations/postgres/000074_add_event_and_listener_topic.down.sql @@ -0,0 +1,8 @@ +BEGIN; + +DROP INDEX events_topic; + +ALTER TABLE events DROP COLUMN topic; +ALTER TABLE contractlisteners DROP COLUMN topic; + +COMMIT; diff --git a/db/migrations/postgres/000074_add_event_and_listener_topic.up.sql b/db/migrations/postgres/000074_add_event_and_listener_topic.up.sql new file mode 100644 index 0000000000..a18c8d9c7d --- /dev/null +++ b/db/migrations/postgres/000074_add_event_and_listener_topic.up.sql @@ -0,0 +1,14 @@ +BEGIN; + +ALTER TABLE events ADD COLUMN topic VARCHAR(64); +ALTER TABLE contractlisteners ADD COLUMN topic VARCHAR(64); + +UPDATE events SET topic = ''; +UPDATE contractlisteners SET topic = ''; + +ALTER TABLE events ALTER COLUMN topic SET NOT NULL; +ALTER TABLE contractlisteners ALTER COLUMN topic SET NOT NULL; + +CREATE INDEX events_topic ON events(topic); + +COMMIT; diff --git a/db/migrations/postgres/000075_add_pins_batch_hash.down.sql b/db/migrations/postgres/000075_add_pins_batch_hash.down.sql new file mode 100644 index 0000000000..753f0f04d1 --- /dev/null +++ b/db/migrations/postgres/000075_add_pins_batch_hash.down.sql @@ -0,0 +1,5 @@ +BEGIN; + +ALTER TABLE pins DROP COLUMN batch_hash; + +COMMIT; diff --git a/db/migrations/postgres/000075_add_pins_batch_hash.up.sql b/db/migrations/postgres/000075_add_pins_batch_hash.up.sql new file mode 100644 index 0000000000..1fd7b99777 --- /dev/null +++ b/db/migrations/postgres/000075_add_pins_batch_hash.up.sql @@ -0,0 +1,5 @@ +BEGIN; + +ALTER TABLE pins ADD COLUMN batch_hash VARCHAR(64); + +COMMIT; diff --git a/db/migrations/sqlite/000008_create_operations_table.up.sql b/db/migrations/sqlite/000008_create_operations_table.up.sql index ce67be59ce..d586d622af 100644 --- a/db/migrations/sqlite/000008_create_operations_table.up.sql +++ b/db/migrations/sqlite/000008_create_operations_table.up.sql @@ -18,3 +18,4 @@ CREATE UNIQUE INDEX operations_id ON operations(id); CREATE INDEX operations_created ON operations(created); CREATE INDEX operations_backend ON operations(backend_id); CREATE INDEX operations_tx ON operations(tx_id); +CREATE INDEX operations_type_status ON operations(optype,opstatus); diff --git a/db/migrations/sqlite/000063_create_identities_table.down.sql b/db/migrations/sqlite/000063_create_identities_table.down.sql index 1703138561..48ac40d9ac 100644 --- a/db/migrations/sqlite/000063_create_identities_table.down.sql +++ b/db/migrations/sqlite/000063_create_identities_table.down.sql @@ -51,8 +51,8 @@ INSERT INTO orgs ( i.created, v.value as identity FROM identities as i - LEFT JOIN verifiers v ON v.hash = REPLACE(hex(i.id),'-','') || REPLACE(hex(i.id),'-','') - LEFT JOIN verifiers pv ON pv.hash = REPLACE(hex(i.parent),'-','') || REPLACE(hex(i.parent),'-','') + LEFT JOIN verifiers v ON v.hash = REPLACE(i.id,'-','') || REPLACE(i.id,'-','') + LEFT JOIN verifiers pv ON pv.hash = REPLACE(i.parent,'-','') || REPLACE(i.parent,'-','') WHERE i.did LIKE 'did:firefly:org/%' AND v.hash IS NOT NULL; -- We only reconstitute nodes that were dropped during the original up migration. @@ -76,8 +76,8 @@ INSERT INTO nodes ( i.created, v.value as dx_peer FROM identities as i - LEFT JOIN verifiers v ON v.hash = REPLACE(hex(i.id),'-','') || REPLACE(hex(i.id),'-','') - LEFT JOIN verifiers pv ON pv.hash = REPLACE(hex(i.parent),'-','') || REPLACE(hex(i.parent),'-','') + LEFT JOIN verifiers v ON v.hash = REPLACE(i.id,'-','') || REPLACE(i.id,'-','') + LEFT JOIN verifiers pv ON pv.hash = REPLACE(i.parent,'-','') || REPLACE(i.parent,'-','') WHERE i.did LIKE 'did:firefly:node/%' AND v.hash IS NOT NULL; DROP INDEX identities_id; diff --git a/db/migrations/sqlite/000063_create_identities_table.up.sql b/db/migrations/sqlite/000063_create_identities_table.up.sql index 3ca8a8ce12..bac731cfbd 100644 --- a/db/migrations/sqlite/000063_create_identities_table.up.sql +++ b/db/migrations/sqlite/000063_create_identities_table.up.sql @@ -38,6 +38,7 @@ INSERT INTO identities ( did, parent, messages_claim, + itype, namespace, name, description, @@ -49,6 +50,7 @@ INSERT INTO identities ( 'did:firefly:org/' || o1.name, o2.id, o1.message_id, + 'org', 'ff_system', o1.name, o1.description, @@ -63,6 +65,7 @@ INSERT INTO identities ( did, parent, messages_claim, + itype, namespace, name, description, @@ -74,6 +77,7 @@ INSERT INTO identities ( 'did:firefly:node/' || n.name, o.id, n.message_id, + 'node', 'ff_system', n.name, n.description, @@ -91,7 +95,7 @@ INSERT INTO verifiers ( value, created ) SELECT - REPLACE(hex(o.id), '-', '') || REPLACE(hex(o.id), '-', ''), -- to avoid the need for hashing in the migration, use the convenient fact the UUID is known hex - have to write it twice to fill the 32B -- + REPLACE(o.id, '-', '') || REPLACE(o.id, '-', ''), -- to avoid the need for hashing in the migration, use the convenient fact the UUID is known hex - have to write it twice to fill the 32B -- 'ff_system', o.id, 'ethereum_address', @@ -107,7 +111,7 @@ INSERT INTO verifiers ( value, created ) SELECT - REPLACE(hex(o.id), '-', '') || REPLACE(hex(o.id), '-', ''), -- to avoid the need for hashing in the migration, use the convenient fact the UUID is known hex - have to write it twice to fill the 32B -- + REPLACE(o.id, '-', '') || REPLACE(o.id, '-', ''), -- to avoid the need for hashing in the migration, use the convenient fact the UUID is known hex - have to write it twice to fill the 32B -- 'ff_system', o.id, 'fabric_msp_id', @@ -123,7 +127,7 @@ INSERT INTO verifiers ( value, created ) SELECT - REPLACE(hex(n.id), '-', '') || REPLACE(hex(n.id), '-', ''), -- to avoid the need for hashing in the migration, use the convenient fact the UUID is known hex - have to write it twice to fill the 32B -- + REPLACE(n.id, '-', '') || REPLACE(n.id, '-', ''), -- to avoid the need for hashing in the migration, use the convenient fact the UUID is known hex - have to write it twice to fill the 32B -- 'ff_system', n.id, 'dx_peer_id', diff --git a/db/migrations/sqlite/000069_add_operation_retry.down.sql b/db/migrations/sqlite/000069_add_operation_retry.down.sql new file mode 100644 index 0000000000..0415eac239 --- /dev/null +++ b/db/migrations/sqlite/000069_add_operation_retry.down.sql @@ -0,0 +1 @@ +ALTER TABLE operations DROP COLUMN retry_id; diff --git a/db/migrations/sqlite/000069_add_operation_retry.up.sql b/db/migrations/sqlite/000069_add_operation_retry.up.sql new file mode 100644 index 0000000000..16668cf665 --- /dev/null +++ b/db/migrations/sqlite/000069_add_operation_retry.up.sql @@ -0,0 +1 @@ +ALTER TABLE operations ADD COLUMN retry_id UUID; diff --git a/db/migrations/sqlite/000070_add_subscriptions_filters.down.sql b/db/migrations/sqlite/000070_add_subscriptions_filters.down.sql new file mode 100644 index 0000000000..4c1cb759f2 --- /dev/null +++ b/db/migrations/sqlite/000070_add_subscriptions_filters.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE subscriptions DROP COLUMN filters; +ALTER TABLE subscriptions ADD COLUMN filter_events text; +ALTER TABLE subscriptions ADD COLUMN filter_topics text; +ALTER TABLE subscriptions ADD COLUMN filter_tag text; +ALTER TABLE subscriptions ADD COLUMN filter_group text; \ No newline at end of file diff --git a/db/migrations/sqlite/000070_add_subscriptions_filters.up.sql b/db/migrations/sqlite/000070_add_subscriptions_filters.up.sql new file mode 100644 index 0000000000..ff88a62755 --- /dev/null +++ b/db/migrations/sqlite/000070_add_subscriptions_filters.up.sql @@ -0,0 +1,6 @@ +ALTER TABLE subscriptions ADD COLUMN filters TEXT; +UPDATE subscriptions SET filters='{"events":"' || filter_events || '","message":{"topics":"' || filter_topics || '","tag":"' || filter_tag || '","group":"' || filter_group || '"}}'; +ALTER TABLE subscriptions DROP COLUMN filter_events; +ALTER TABLE subscriptions DROP COLUMN filter_topics; +ALTER TABLE subscriptions DROP COLUMN filter_tag; +ALTER TABLE subscriptions DROP COLUMN filter_group; \ No newline at end of file diff --git a/db/migrations/sqlite/000071_rename_batch_payload_column.down.sql b/db/migrations/sqlite/000071_rename_batch_payload_column.down.sql new file mode 100644 index 0000000000..5fc109c3c9 --- /dev/null +++ b/db/migrations/sqlite/000071_rename_batch_payload_column.down.sql @@ -0,0 +1 @@ +ALTER TABLE batches RENAME COLUMN manifest TO payload; diff --git a/db/migrations/sqlite/000071_rename_batch_payload_column.up.sql b/db/migrations/sqlite/000071_rename_batch_payload_column.up.sql new file mode 100644 index 0000000000..ab488aac59 --- /dev/null +++ b/db/migrations/sqlite/000071_rename_batch_payload_column.up.sql @@ -0,0 +1 @@ +ALTER TABLE batches RENAME COLUMN payload TO manifest; diff --git a/db/migrations/sqlite/000072_add_tokenpool_info.down.sql b/db/migrations/sqlite/000072_add_tokenpool_info.down.sql new file mode 100644 index 0000000000..ce0590e778 --- /dev/null +++ b/db/migrations/sqlite/000072_add_tokenpool_info.down.sql @@ -0,0 +1 @@ +ALTER TABLE tokenpool DROP COLUMN info; diff --git a/db/migrations/sqlite/000072_add_tokenpool_info.up.sql b/db/migrations/sqlite/000072_add_tokenpool_info.up.sql new file mode 100644 index 0000000000..d40f2bcebf --- /dev/null +++ b/db/migrations/sqlite/000072_add_tokenpool_info.up.sql @@ -0,0 +1 @@ +ALTER TABLE tokenpool ADD COLUMN info TEXT; diff --git a/db/migrations/sqlite/000073_add_contractlisteners_options.down.sql b/db/migrations/sqlite/000073_add_contractlisteners_options.down.sql new file mode 100644 index 0000000000..b0d5e419db --- /dev/null +++ b/db/migrations/sqlite/000073_add_contractlisteners_options.down.sql @@ -0,0 +1 @@ +ALTER TABLE contractlisteners DROP COLUMN options; \ No newline at end of file diff --git a/db/migrations/sqlite/000073_add_contractlisteners_options.up.sql b/db/migrations/sqlite/000073_add_contractlisteners_options.up.sql new file mode 100644 index 0000000000..cf946ee6fe --- /dev/null +++ b/db/migrations/sqlite/000073_add_contractlisteners_options.up.sql @@ -0,0 +1 @@ +ALTER TABLE contractlisteners ADD COLUMN options TEXT; \ No newline at end of file diff --git a/db/migrations/sqlite/000074_add_event_and_listener_topic.down.sql b/db/migrations/sqlite/000074_add_event_and_listener_topic.down.sql new file mode 100644 index 0000000000..1cfee88d47 --- /dev/null +++ b/db/migrations/sqlite/000074_add_event_and_listener_topic.down.sql @@ -0,0 +1,5 @@ +DROP INDEX events_topic; + +ALTER TABLE events DROP COLUMN topic; +ALTER TABLE contractlisteners DROP COLUMN topic; + diff --git a/db/migrations/sqlite/000074_add_event_and_listener_topic.up.sql b/db/migrations/sqlite/000074_add_event_and_listener_topic.up.sql new file mode 100644 index 0000000000..6dfaaff364 --- /dev/null +++ b/db/migrations/sqlite/000074_add_event_and_listener_topic.up.sql @@ -0,0 +1,7 @@ +ALTER TABLE events ADD COLUMN topic VARCHAR(64); +ALTER TABLE contractlisteners ADD COLUMN topic VARCHAR(64); + +UPDATE events SET topic = ''; +UPDATE contractlisteners SET topic = ''; + +CREATE INDEX events_topic ON events(topic); diff --git a/db/migrations/sqlite/000075_add_pins_batch_hash.down.sql b/db/migrations/sqlite/000075_add_pins_batch_hash.down.sql new file mode 100644 index 0000000000..e2c3c275da --- /dev/null +++ b/db/migrations/sqlite/000075_add_pins_batch_hash.down.sql @@ -0,0 +1 @@ +ALTER TABLE pins DROP COLUMN batch_hash; diff --git a/db/migrations/sqlite/000075_add_pins_batch_hash.up.sql b/db/migrations/sqlite/000075_add_pins_batch_hash.up.sql new file mode 100644 index 0000000000..a2e1f82ef1 --- /dev/null +++ b/db/migrations/sqlite/000075_add_pins_batch_hash.up.sql @@ -0,0 +1 @@ +ALTER TABLE pins ADD COLUMN batch_hash VARCHAR(64); diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock index aa1db96a0e..d4d15b6f32 100644 --- a/docs/Gemfile.lock +++ b/docs/Gemfile.lock @@ -1,7 +1,7 @@ GEM remote: https://rubygems.org/ specs: - activesupport (6.0.4.6) + activesupport (6.0.4.7) concurrent-ruby (~> 1.0, >= 1.0.2) i18n (>= 0.7, < 2) minitest (~> 5.1) @@ -14,8 +14,7 @@ GEM execjs coffee-script-source (1.11.1) colorator (1.1.0) - commonmarker (0.17.13) - ruby-enum (~> 0.5) + commonmarker (0.23.4) concurrent-ruby (1.1.9) dnsruby (1.61.9) simpleidn (~> 0.1) @@ -52,12 +51,12 @@ GEM ffi (1.15.5) forwardable-extended (2.6.0) gemoji (3.0.1) - github-pages (223) + github-pages (225) github-pages-health-check (= 1.17.9) jekyll (= 3.9.0) jekyll-avatar (= 0.7.0) jekyll-coffeescript (= 1.1.1) - jekyll-commonmark-ghpages (= 0.1.6) + jekyll-commonmark-ghpages (= 0.2.0) jekyll-default-layout (= 0.1.4) jekyll-feed (= 0.15.1) jekyll-gist (= 1.5.0) @@ -71,7 +70,7 @@ GEM jekyll-relative-links (= 0.6.1) jekyll-remote-theme (= 0.4.3) jekyll-sass-converter (= 1.5.2) - jekyll-seo-tag (= 2.7.1) + jekyll-seo-tag (= 2.8.0) jekyll-sitemap (= 1.4.0) jekyll-swiss (= 1.0.0) jekyll-theme-architect (= 0.2.0) @@ -127,12 +126,12 @@ GEM jekyll-coffeescript (1.1.1) coffee-script (~> 2.2) coffee-script-source (~> 1.11.1) - jekyll-commonmark (1.3.1) - commonmarker (~> 0.14) - jekyll (>= 3.7, < 5.0) - jekyll-commonmark-ghpages (0.1.6) - commonmarker (~> 0.17.6) - jekyll-commonmark (~> 1.2) + jekyll-commonmark (1.4.0) + commonmarker (~> 0.22) + jekyll-commonmark-ghpages (0.2.0) + commonmarker (~> 0.23.4) + jekyll (~> 3.9.0) + jekyll-commonmark (~> 1.4.0) rouge (>= 2.0, < 4.0) jekyll-default-layout (0.1.4) jekyll (~> 3.0) @@ -164,7 +163,7 @@ GEM rubyzip (>= 1.3.0, < 3.0) jekyll-sass-converter (1.5.2) sass (~> 3.4) - jekyll-seo-tag (2.7.1) + jekyll-seo-tag (2.8.0) jekyll (>= 3.8, < 5.0) jekyll-sitemap (1.4.0) jekyll (>= 3.7, < 5.0) @@ -248,8 +247,6 @@ GEM ffi (~> 1.0) rexml (3.2.5) rouge (3.26.0) - ruby-enum (0.9.0) - i18n ruby2_keywords (0.0.5) rubyzip (2.3.2) safe_yaml (1.0.5) @@ -272,7 +269,7 @@ GEM thread_safe (~> 0.1) unf (0.1.4) unf_ext - unf_ext (0.0.8) + unf_ext (0.0.8.1) unicode-display_width (1.8.0) zeitwerk (2.5.4) diff --git a/docs/contributors/contributors.md b/docs/contributors/contributors.md index 667190f077..965b7fa3e9 100644 --- a/docs/contributors/contributors.md +++ b/docs/contributors/contributors.md @@ -22,12 +22,9 @@ We welcome anyone to contribute to the FireFly project! If you're interested, th --- -## 🚀 Connect with us on Rocket Chat -You can chat with maintainers and other contributors on Rocket Chat in the `firefly` channel: -[https://chat.hyperledger.org/channel/firefly](https://chat.hyperledger.org/channel/firefly) - -If you don't have a Linux Foundation ID, you can sign up for a free account here: -[https://wiki.hyperledger.org/display/CA/Setting+up+an+LFID](https://wiki.hyperledger.org/display/CA/Setting+up+an+LFID) +## 🚀 Connect with us on Discord +You can chat with maintainers and other contributors on Discord in the `firefly` channel: +[https://discord.gg/hyperledger](https://discord.gg/hyperledger) ## 📅 Join our Community Calls Community calls are a place to talk to other contributors, maintainers, and other people interested in FireFly. Maintainers often discuss upcoming changes and proposed new features on these calls. These calls are a great way for the community to give feedback on new ideas, ask questions about FireFly, and hear how others are using FireFly to solve real world problems. diff --git a/docs/gettingstarted/events.md b/docs/gettingstarted/events.md index ada3b30634..6ec7eee078 100644 --- a/docs/gettingstarted/events.md +++ b/docs/gettingstarted/events.md @@ -149,11 +149,20 @@ in the event. "transport": "websockets", "name": "app1", "filter": { - "author": ".*", + "blockchainevent": { + "listener": ".*", + "name": ".*" + }, "events": ".*", - "group": ".*", - "tag": ".*", - "topics": ".*" + "message": { + "author": ".*", + "group": ".*", + "tag": ".*", + "topics": ".*" + }, + "transaction": { + "type": ".*" + } }, "options": { "firstEvent": "newest", diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index 9f112ada0c..218a54cbbd 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -903,121 +903,29 @@ paths: properties: author: type: string - blobs: - items: {} - type: array confirmed: {} created: {} hash: {} id: {} key: type: string + manifest: + type: string namespace: type: string node: {} - payload: - properties: - data: - items: - properties: - blob: - properties: - hash: {} - name: - type: string - public: - type: string - size: - format: int64 - type: integer - type: object - created: {} - datatype: - properties: - name: - type: string - version: - type: string - type: object - hash: {} - id: {} - namespace: - type: string - validator: - type: string - value: - type: string - type: object - type: array - messages: - items: - properties: - batch: {} - confirmed: {} - data: - items: - properties: - hash: {} - id: {} - type: object - type: array - hash: {} - header: - properties: - author: - type: string - cid: {} - created: {} - datahash: {} - group: {} - id: {} - key: - type: string - namespace: - type: string - tag: - type: string - topics: - items: - type: string - type: array - txtype: - type: string - type: - enum: - - definition - - broadcast - - private - - groupinit - - transfer_broadcast - - transfer_private - type: string - type: object - pins: - items: - type: string - type: array - state: - enum: - - staged - - ready - - sent - - pending - - confirmed - - rejected - type: string - type: object - type: array - tx: - properties: - id: {} - type: - type: string - type: object - type: object payloadRef: type: string + tx: + properties: + id: {} + type: + type: string + type: object type: + enum: + - broadcast + - private type: string type: object description: Success @@ -1056,121 +964,29 @@ paths: properties: author: type: string - blobs: - items: {} - type: array confirmed: {} created: {} hash: {} id: {} key: type: string + manifest: + type: string namespace: type: string node: {} - payload: - properties: - data: - items: - properties: - blob: - properties: - hash: {} - name: - type: string - public: - type: string - size: - format: int64 - type: integer - type: object - created: {} - datatype: - properties: - name: - type: string - version: - type: string - type: object - hash: {} - id: {} - namespace: - type: string - validator: - type: string - value: - type: string - type: object - type: array - messages: - items: - properties: - batch: {} - confirmed: {} - data: - items: - properties: - hash: {} - id: {} - type: object - type: array - hash: {} - header: - properties: - author: - type: string - cid: {} - created: {} - datahash: {} - group: {} - id: {} - key: - type: string - namespace: - type: string - tag: - type: string - topics: - items: - type: string - type: array - txtype: - type: string - type: - enum: - - definition - - broadcast - - private - - groupinit - - transfer_broadcast - - transfer_private - type: string - type: object - pins: - items: - type: string - type: array - state: - enum: - - staged - - ready - - sent - - pending - - confirmed - - rejected - type: string - type: object - type: array - tx: - properties: - id: {} - type: - type: string - type: object - type: object payloadRef: type: string + tx: + properties: + id: {} + type: + type: string + type: object type: + enum: + - broadcast + - private type: string type: object description: Success @@ -1420,6 +1236,15 @@ paths: count: type: string timestamp: {} + types: + items: + properties: + count: + type: string + type: + type: string + type: object + type: array type: object description: Success default: @@ -2450,8 +2275,15 @@ paths: type: string namespace: type: string + options: + properties: + firstEvent: + type: string + type: object protocolId: type: string + topic: + type: string type: object description: Success default: @@ -2512,8 +2344,15 @@ paths: type: string namespace: type: string + options: + properties: + firstEvent: + type: string + type: object protocolId: type: string + topic: + type: string type: object responses: "200": @@ -2553,8 +2392,15 @@ paths: type: string namespace: type: string + options: + properties: + firstEvent: + type: string + type: object protocolId: type: string + topic: + type: string type: object description: Success default: @@ -2649,8 +2495,15 @@ paths: type: string namespace: type: string + options: + properties: + firstEvent: + type: string + type: object protocolId: type: string + topic: + type: string type: object description: Success default: @@ -3693,6 +3546,12 @@ paths: schema: example: default type: string + - description: 'TODO: Description' + in: query + name: fetchreferences + schema: + example: "true" + type: string - description: Server-side request timeout (millseconds, or set a custom suffix like 10s) in: header @@ -3730,6 +3589,11 @@ paths: name: sequence schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: topic + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: tx @@ -3789,6 +3653,8 @@ paths: sequence: format: int64 type: integer + topic: + type: string tx: {} type: enum: @@ -3852,6 +3718,8 @@ paths: sequence: format: int64 type: integer + topic: + type: string tx: {} type: enum: @@ -4965,6 +4833,11 @@ paths: name: sequence schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: topic + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: tx @@ -5024,6 +4897,8 @@ paths: sequence: format: int64 type: integer + topic: + type: string tx: {} type: enum: @@ -5047,71 +4922,6 @@ paths: description: Success default: description: "" - /namespaces/{ns}/messages/{msgid}/operations: - get: - description: 'TODO: Description' - operationId: getMsgOps - parameters: - - description: 'TODO: Description' - in: path - name: ns - required: true - schema: - example: default - type: string - - description: 'TODO: Description' - in: path - name: msgid - required: true - schema: - type: string - - description: Server-side request timeout (millseconds, or set a custom suffix - like 10s) - in: header - name: Request-Timeout - schema: - default: 120s - type: string - responses: - "200": - content: - application/json: - schema: - properties: - created: {} - error: - type: string - id: {} - input: - additionalProperties: {} - type: object - namespace: - type: string - output: - additionalProperties: {} - type: object - plugin: - type: string - status: - type: string - tx: {} - type: - enum: - - blockchain_batch_pin - - blockchain_invoke - - sharedstorage_batch_broadcast - - dataexchange_batch_send - - dataexchange_blob_send - - token_create_pool - - token_activate_pool - - token_transfer - - token_approval - type: string - updated: {} - type: object - description: Success - default: - description: "" /namespaces/{ns}/messages/{msgid}/transaction: get: description: 'TODO: Description' @@ -5781,6 +5591,11 @@ paths: name: plugin schema: type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: retry + schema: + type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query name: status @@ -5855,16 +5670,20 @@ paths: type: object plugin: type: string + retry: {} status: type: string tx: {} type: enum: - - blockchain_batch_pin + - blockchain_pin_batch - blockchain_invoke - - sharedstorage_batch_broadcast - - dataexchange_batch_send - - dataexchange_blob_send + - sharedstorage_upload_batch + - sharedstorage_upload_blob + - sharedstorage_download_batch + - sharedstorage_download_blob + - dataexchange_send_batch + - dataexchange_send_blob - token_create_pool - token_activate_pool - token_transfer @@ -5920,16 +5739,94 @@ paths: type: object plugin: type: string + retry: {} + status: + type: string + tx: {} + type: + enum: + - blockchain_pin_batch + - blockchain_invoke + - sharedstorage_upload_batch + - sharedstorage_upload_blob + - sharedstorage_download_batch + - sharedstorage_download_blob + - dataexchange_send_batch + - dataexchange_send_blob + - token_create_pool + - token_activate_pool + - token_transfer + - token_approval + type: string + updated: {} + type: object + description: Success + default: + description: "" + /namespaces/{ns}/operations/{opid}/retry: + post: + description: 'TODO: Description' + operationId: postOpRetry + parameters: + - description: 'TODO: Description' + in: path + name: ns + required: true + schema: + example: default + type: string + - description: 'TODO: Description' + in: path + name: opid + required: true + schema: + type: string + - description: Server-side request timeout (millseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 120s + type: string + requestBody: + content: + application/json: + schema: + type: object + responses: + "202": + content: + application/json: + schema: + properties: + created: {} + error: + type: string + id: {} + input: + additionalProperties: {} + type: object + namespace: + type: string + output: + additionalProperties: {} + type: object + plugin: + type: string + retry: {} status: type: string tx: {} type: enum: - - blockchain_batch_pin + - blockchain_pin_batch - blockchain_invoke - - sharedstorage_batch_broadcast - - dataexchange_batch_send - - dataexchange_blob_send + - sharedstorage_upload_batch + - sharedstorage_upload_blob + - sharedstorage_download_batch + - sharedstorage_download_blob + - dataexchange_send_batch + - dataexchange_send_blob - token_create_pool - token_activate_pool - token_transfer @@ -5971,17 +5868,7 @@ paths: type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' in: query - name: filter.group - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: filter.tag - schema: - type: string - - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' - in: query - name: filter.topics + name: filters schema: type: string - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' @@ -6056,14 +5943,37 @@ paths: properties: author: type: string + blockchainevent: + properties: + listener: + type: string + name: + type: string + type: object events: type: string group: type: string + message: + properties: + author: + type: string + group: + type: string + tag: + type: string + type: object tag: type: string + topic: + type: string topics: type: string + transaction: + properties: + type: + type: string + type: object type: object id: {} name: @@ -6115,14 +6025,37 @@ paths: properties: author: type: string + blockchainevent: + properties: + listener: + type: string + name: + type: string + type: object events: type: string group: type: string + message: + properties: + author: + type: string + group: + type: string + tag: + type: string + type: object tag: type: string + topic: + type: string topics: type: string + transaction: + properties: + type: + type: string + type: object type: object name: type: string @@ -6243,14 +6176,37 @@ paths: properties: author: type: string + blockchainevent: + properties: + listener: + type: string + name: + type: string + type: object events: type: string group: type: string + message: + properties: + author: + type: string + group: + type: string + tag: + type: string + type: object tag: type: string + topic: + type: string topics: type: string + transaction: + properties: + type: + type: string + type: object type: object id: {} name: @@ -6302,14 +6258,37 @@ paths: properties: author: type: string + blockchainevent: + properties: + listener: + type: string + name: + type: string + type: object events: type: string group: type: string + message: + properties: + author: + type: string + group: + type: string + tag: + type: string + type: object tag: type: string + topic: + type: string topics: type: string + transaction: + properties: + type: + type: string + type: object type: object name: type: string @@ -6430,14 +6409,37 @@ paths: properties: author: type: string + blockchainevent: + properties: + listener: + type: string + name: + type: string + type: object events: type: string group: type: string + message: + properties: + author: + type: string + group: + type: string + tag: + type: string + type: object tag: type: string + topic: + type: string topics: type: string + transaction: + properties: + type: + type: string + type: object type: object id: {} name: @@ -6527,14 +6529,37 @@ paths: properties: author: type: string + blockchainevent: + properties: + listener: + type: string + name: + type: string + type: object events: type: string group: type: string + message: + properties: + author: + type: string + group: + type: string + tag: + type: string + type: object tag: type: string + topic: + type: string topics: type: string + transaction: + properties: + type: + type: string + type: object type: object id: {} name: @@ -7740,6 +7765,9 @@ paths: type: string created: {} id: {} + info: + additionalProperties: {} + type: object key: type: string message: {} @@ -7832,6 +7860,9 @@ paths: type: string created: {} id: {} + info: + additionalProperties: {} + type: object key: type: string message: {} @@ -7876,6 +7907,9 @@ paths: type: string created: {} id: {} + info: + additionalProperties: {} + type: object key: type: string message: {} @@ -7948,6 +7982,9 @@ paths: type: string created: {} id: {} + info: + additionalProperties: {} + type: object key: type: string message: {} @@ -8795,16 +8832,20 @@ paths: type: object plugin: type: string + retry: {} status: type: string tx: {} type: enum: - - blockchain_batch_pin + - blockchain_pin_batch - blockchain_invoke - - sharedstorage_batch_broadcast - - dataexchange_batch_send - - dataexchange_blob_send + - sharedstorage_upload_batch + - sharedstorage_upload_blob + - sharedstorage_download_batch + - sharedstorage_download_blob + - dataexchange_send_batch + - dataexchange_send_blob - token_create_pool - token_activate_pool - token_transfer @@ -9176,14 +9217,14 @@ paths: description: Success default: description: "" - /network/nodes/{nid}: + /network/nodes/{nameOrId}: get: description: 'TODO: Description' operationId: getNetworkNode parameters: - description: 'TODO: Description' in: path - name: nid + name: nameOrId required: true schema: type: string @@ -9577,14 +9618,14 @@ paths: description: Success default: description: "" - /network/organizations/{oid}: + /network/organizations/{nameOrId}: get: description: 'TODO: Description' operationId: getNetworkOrg parameters: - description: 'TODO: Description' in: path - name: oid + name: nameOrId required: true schema: type: string @@ -9830,5 +9871,112 @@ paths: description: Success default: description: "" + /status/pins: + get: + description: 'TODO: Description' + operationId: getStatusPins + parameters: + - description: Server-side request timeout (millseconds, or set a custom suffix + like 10s) + in: header + name: Request-Timeout + schema: + default: 120s + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: batch + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: created + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: dispatched + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: hash + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: index + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: masked + schema: + type: string + - description: 'Data filter field. Prefixes supported: > >= < <= @ ^ ! !@ !^' + in: query + name: sequence + schema: + type: string + - description: Sort field. For multi-field sort use comma separated values (or + multiple query values) with '-' prefix for descending + in: query + name: sort + schema: + type: string + - description: Ascending sort order (overrides all fields in a multi-field sort) + in: query + name: ascending + schema: + type: string + - description: Descending sort order (overrides all fields in a multi-field + sort) + in: query + name: descending + schema: + type: string + - description: 'The number of records to skip (max: 1,000). Unsuitable for bulk + operations' + in: query + name: skip + schema: + type: string + - description: 'The maximum number of records to return (max: 1,000)' + in: query + name: limit + schema: + example: "25" + type: string + - description: Return a total count as well as items (adds extra database processing) + in: query + name: count + schema: + type: string + responses: + "200": + content: + application/json: + schema: + properties: + batch: {} + batchHash: {} + created: {} + dispatched: + type: boolean + hash: {} + index: + format: int64 + type: integer + masked: + type: boolean + sequence: + format: int64 + type: integer + signer: + type: string + type: object + description: Success + default: + description: "" servers: - url: http://localhost:5000 diff --git a/go.mod b/go.mod index 84ca77df8b..fc2df5cfcc 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/Masterminds/squirrel v1.5.2 github.com/aidarkhanov/nanoid v1.0.8 github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect - github.com/containerd/containerd v1.5.9 // indirect + github.com/containerd/containerd v1.5.10 // indirect github.com/docker/go-units v0.4.0 github.com/getkin/kin-openapi v0.87.0 github.com/ghodss/yaml v1.0.0 diff --git a/go.sum b/go.sum index 515d1239eb..20377f06dc 100644 --- a/go.sum +++ b/go.sum @@ -247,8 +247,8 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= -github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.5.10 h1:3cQ2uRVCkJVcx5VombsE7105Gl9Wrl7ORAO3+4+ogf4= +github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= diff --git a/internal/apiserver/route_get_batch_by_id.go b/internal/apiserver/route_get_batch_by_id.go index d07da849f8..65ebb91f38 100644 --- a/internal/apiserver/route_get_batch_by_id.go +++ b/internal/apiserver/route_get_batch_by_id.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -37,7 +37,7 @@ var getBatchByID = &oapispec.Route{ FilterFactory: nil, Description: i18n.MsgTBD, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return &fftypes.Batch{} }, + JSONOutputValue: func() interface{} { return &fftypes.BatchPersisted{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { output, err = getOr(r.Ctx).GetBatchByID(r.Ctx, r.PP["ns"], r.PP["batchid"]) diff --git a/internal/apiserver/route_get_batch_by_id_test.go b/internal/apiserver/route_get_batch_by_id_test.go index 7ab63c56a0..9c8a15c159 100644 --- a/internal/apiserver/route_get_batch_by_id_test.go +++ b/internal/apiserver/route_get_batch_by_id_test.go @@ -32,7 +32,7 @@ func TestGetBatchByID(t *testing.T) { res := httptest.NewRecorder() o.On("GetBatchByID", mock.Anything, "mynamespace", "abcd12345"). - Return(&fftypes.Batch{}, nil) + Return(&fftypes.BatchPersisted{}, nil) r.ServeHTTP(res, req) assert.Equal(t, 200, res.Result().StatusCode) diff --git a/internal/apiserver/route_get_batches.go b/internal/apiserver/route_get_batches.go index 9357f285e1..a9f199ff65 100644 --- a/internal/apiserver/route_get_batches.go +++ b/internal/apiserver/route_get_batches.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -37,7 +37,7 @@ var getBatches = &oapispec.Route{ FilterFactory: database.BatchQueryFactory, Description: i18n.MsgTBD, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return []*fftypes.Batch{} }, + JSONOutputValue: func() interface{} { return []*fftypes.BatchPersisted{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { return filterResult(getOr(r.Ctx).GetBatches(r.Ctx, r.PP["ns"], r.Filter)) diff --git a/internal/apiserver/route_get_batches_test.go b/internal/apiserver/route_get_batches_test.go index c74a687a9f..e25b464fa2 100644 --- a/internal/apiserver/route_get_batches_test.go +++ b/internal/apiserver/route_get_batches_test.go @@ -32,7 +32,7 @@ func TestGetBatches(t *testing.T) { res := httptest.NewRecorder() o.On("GetBatches", mock.Anything, "mynamespace", mock.Anything). - Return([]*fftypes.Batch{}, nil, nil) + Return([]*fftypes.BatchPersisted{}, nil, nil) r.ServeHTTP(res, req) assert.Equal(t, 200, res.Result().StatusCode) diff --git a/internal/apiserver/route_get_data.go b/internal/apiserver/route_get_data.go index 71810ab65f..d7e2cbbe7c 100644 --- a/internal/apiserver/route_get_data.go +++ b/internal/apiserver/route_get_data.go @@ -37,7 +37,7 @@ var getData = &oapispec.Route{ FilterFactory: database.DataQueryFactory, Description: i18n.MsgTBD, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return []*fftypes.Data{} }, + JSONOutputValue: func() interface{} { return fftypes.DataArray{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { return filterResult(getOr(r.Ctx).GetData(r.Ctx, r.PP["ns"], r.Filter)) diff --git a/internal/apiserver/route_get_data_test.go b/internal/apiserver/route_get_data_test.go index 6a69d09370..fedc291245 100644 --- a/internal/apiserver/route_get_data_test.go +++ b/internal/apiserver/route_get_data_test.go @@ -32,7 +32,7 @@ func TestGetData(t *testing.T) { res := httptest.NewRecorder() o.On("GetData", mock.Anything, "mynamespace", mock.Anything). - Return([]*fftypes.Data{}, nil, nil) + Return(fftypes.DataArray{}, nil, nil) r.ServeHTTP(res, req) assert.Equal(t, 200, res.Result().StatusCode) diff --git a/internal/apiserver/route_get_events.go b/internal/apiserver/route_get_events.go index 4aca371a67..76b669eb73 100644 --- a/internal/apiserver/route_get_events.go +++ b/internal/apiserver/route_get_events.go @@ -18,6 +18,7 @@ package apiserver import ( "net/http" + "strings" "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/i18n" @@ -33,13 +34,18 @@ var getEvents = &oapispec.Route{ PathParams: []*oapispec.PathParam{ {Name: "ns", ExampleFromConf: config.NamespacesDefault, Description: i18n.MsgTBD}, }, - QueryParams: nil, + QueryParams: []*oapispec.QueryParam{ + {Name: "fetchreferences", Example: "true", Description: i18n.MsgTBD, IsBool: true}, + }, FilterFactory: database.EventQueryFactory, Description: i18n.MsgTBD, JSONInputValue: nil, JSONOutputValue: func() interface{} { return []*fftypes.Event{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { + if strings.EqualFold(r.QP["fetchreferences"], "true") { + return filterResult(getOr(r.Ctx).GetEventsWithReferences(r.Ctx, r.PP["ns"], r.Filter)) + } return filterResult(getOr(r.Ctx).GetEvents(r.Ctx, r.PP["ns"], r.Filter)) }, } diff --git a/internal/apiserver/route_get_events_test.go b/internal/apiserver/route_get_events_test.go index 775c822646..864c77408a 100644 --- a/internal/apiserver/route_get_events_test.go +++ b/internal/apiserver/route_get_events_test.go @@ -17,9 +17,11 @@ package apiserver import ( + "encoding/json" "net/http/httptest" "testing" + "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -37,3 +39,25 @@ func TestGetEvents(t *testing.T) { assert.Equal(t, 200, res.Result().StatusCode) } + +func TestGetEventsWithReferences(t *testing.T) { + o, r := newTestAPIServer() + req := httptest.NewRequest("GET", "/api/v1/namespaces/mynamespace/events?fetchreferences", nil) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + + var ten int64 = 10 + o.On("GetEventsWithReferences", mock.Anything, "mynamespace", mock.Anything). + Return([]*fftypes.EnrichedEvent{}, &database.FilterResult{ + TotalCount: &ten, + }, nil) + r.ServeHTTP(res, req) + + assert.Equal(t, 200, res.Result().StatusCode) + var resWithCount filterResultsWithCount + err := json.NewDecoder(res.Body).Decode(&resWithCount) + assert.NoError(t, err) + assert.NotNil(t, resWithCount.Items) + assert.Equal(t, int64(0), resWithCount.Count) + assert.Equal(t, int64(10), resWithCount.Total) +} diff --git a/internal/apiserver/route_get_msg_data.go b/internal/apiserver/route_get_msg_data.go index 16be29e808..b9390b6a7f 100644 --- a/internal/apiserver/route_get_msg_data.go +++ b/internal/apiserver/route_get_msg_data.go @@ -37,7 +37,7 @@ var getMsgData = &oapispec.Route{ FilterFactory: nil, // No filtering on this route - use namespaces/{ns}/data Description: i18n.MsgTBD, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return []*fftypes.Data{} }, + JSONOutputValue: func() interface{} { return fftypes.DataArray{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { output, err = getOr(r.Ctx).GetMessageData(r.Ctx, r.PP["ns"], r.PP["msgid"]) diff --git a/internal/apiserver/route_get_msg_data_test.go b/internal/apiserver/route_get_msg_data_test.go index 6dc4879657..9cab08a055 100644 --- a/internal/apiserver/route_get_msg_data_test.go +++ b/internal/apiserver/route_get_msg_data_test.go @@ -32,7 +32,7 @@ func TestGetMessageData(t *testing.T) { res := httptest.NewRecorder() o.On("GetMessageData", mock.Anything, "mynamespace", "uuid1"). - Return([]*fftypes.Data{}, nil) + Return(fftypes.DataArray{}, nil) r.ServeHTTP(res, req) assert.Equal(t, 200, res.Result().StatusCode) diff --git a/internal/apiserver/route_get_net_node.go b/internal/apiserver/route_get_net_node.go index 3554f35a5a..8a4edc72af 100644 --- a/internal/apiserver/route_get_net_node.go +++ b/internal/apiserver/route_get_net_node.go @@ -26,10 +26,10 @@ import ( var getNetworkNode = &oapispec.Route{ Name: "getNetworkNode", - Path: "network/nodes/{nid}", + Path: "network/nodes/{nameOrId}", Method: http.MethodGet, PathParams: []*oapispec.PathParam{ - {Name: "nid", Description: i18n.MsgTBD}, + {Name: "nameOrId", Description: i18n.MsgTBD}, }, QueryParams: nil, FilterFactory: nil, @@ -38,7 +38,7 @@ var getNetworkNode = &oapispec.Route{ JSONOutputValue: func() interface{} { return &fftypes.Identity{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { - output, err = getOr(r.Ctx).NetworkMap().GetNodeByID(r.Ctx, r.PP["nid"]) + output, err = getOr(r.Ctx).NetworkMap().GetNodeByNameOrID(r.Ctx, r.PP["nameOrId"]) return output, err }, } diff --git a/internal/apiserver/route_get_net_node_test.go b/internal/apiserver/route_get_net_node_test.go index 194e8cabc5..b988d9f5a4 100644 --- a/internal/apiserver/route_get_net_node_test.go +++ b/internal/apiserver/route_get_net_node_test.go @@ -34,7 +34,7 @@ func TestGetNode(t *testing.T) { req.Header.Set("Content-Type", "application/json; charset=utf-8") res := httptest.NewRecorder() - nmn.On("GetNodeByID", mock.Anything, "node12345"). + nmn.On("GetNodeByNameOrID", mock.Anything, "node12345"). Return(&fftypes.Identity{}, nil) r.ServeHTTP(res, req) diff --git a/internal/apiserver/route_get_net_org.go b/internal/apiserver/route_get_net_org.go index 2d8d1b68d3..77cea51356 100644 --- a/internal/apiserver/route_get_net_org.go +++ b/internal/apiserver/route_get_net_org.go @@ -26,10 +26,10 @@ import ( var getNetworkOrg = &oapispec.Route{ Name: "getNetworkOrg", - Path: "network/organizations/{oid}", + Path: "network/organizations/{nameOrId}", Method: http.MethodGet, PathParams: []*oapispec.PathParam{ - {Name: "oid", Description: i18n.MsgTBD}, + {Name: "nameOrId", Description: i18n.MsgTBD}, }, QueryParams: nil, FilterFactory: nil, @@ -38,7 +38,7 @@ var getNetworkOrg = &oapispec.Route{ JSONOutputValue: func() interface{} { return &fftypes.Identity{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { - output, err = getOr(r.Ctx).NetworkMap().GetOrganizationByID(r.Ctx, r.PP["oid"]) + output, err = getOr(r.Ctx).NetworkMap().GetOrganizationByNameOrID(r.Ctx, r.PP["nameOrId"]) return output, err }, } diff --git a/internal/apiserver/route_get_net_org_test.go b/internal/apiserver/route_get_net_org_test.go index 9dcbebd9ad..6e71be06a6 100644 --- a/internal/apiserver/route_get_net_org_test.go +++ b/internal/apiserver/route_get_net_org_test.go @@ -34,7 +34,7 @@ func TestGetOrg(t *testing.T) { req.Header.Set("Content-Type", "application/json; charset=utf-8") res := httptest.NewRecorder() - nmn.On("GetOrganizationByID", mock.Anything, "org12345"). + nmn.On("GetOrganizationByNameOrID", mock.Anything, "org12345"). Return(&fftypes.Identity{}, nil) r.ServeHTTP(res, req) diff --git a/internal/apiserver/route_get_msg_ops.go b/internal/apiserver/route_get_status_pins.go similarity index 65% rename from internal/apiserver/route_get_msg_ops.go rename to internal/apiserver/route_get_status_pins.go index 6e0a2098be..8a7fb88f58 100644 --- a/internal/apiserver/route_get_msg_ops.go +++ b/internal/apiserver/route_get_status_pins.go @@ -19,27 +19,24 @@ package apiserver import ( "net/http" - "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/oapispec" + "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" ) -var getMsgOps = &oapispec.Route{ - Name: "getMsgOps", - Path: "namespaces/{ns}/messages/{msgid}/operations", - Method: http.MethodGet, - PathParams: []*oapispec.PathParam{ - {Name: "ns", ExampleFromConf: config.NamespacesDefault, Description: i18n.MsgTBD}, - {Name: "msgid", Description: i18n.MsgTBD}, - }, +var getStatusPins = &oapispec.Route{ + Name: "getStatusPins", + Path: "status/pins", + Method: http.MethodGet, + PathParams: nil, QueryParams: nil, - FilterFactory: nil, + FilterFactory: database.PinQueryFactory, Description: i18n.MsgTBD, JSONInputValue: nil, - JSONOutputValue: func() interface{} { return []*fftypes.Operation{} }, + JSONOutputValue: func() interface{} { return []fftypes.Pin{} }, JSONOutputCodes: []int{http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { - return filterResult(getOr(r.Ctx).GetMessageOperations(r.Ctx, r.PP["ns"], r.PP["msgid"])) + return filterResult(getOr(r.Ctx).GetPins(r.Ctx, r.Filter)) }, } diff --git a/internal/apiserver/route_get_msg_ops_test.go b/internal/apiserver/route_get_status_pins_test.go similarity index 78% rename from internal/apiserver/route_get_msg_ops_test.go rename to internal/apiserver/route_get_status_pins_test.go index 7efb10da55..c87f722150 100644 --- a/internal/apiserver/route_get_msg_ops_test.go +++ b/internal/apiserver/route_get_status_pins_test.go @@ -25,14 +25,14 @@ import ( "github.com/stretchr/testify/mock" ) -func TestGetMessageOperations(t *testing.T) { +func TestGetStatusPins(t *testing.T) { o, r := newTestAPIServer() - req := httptest.NewRequest("GET", "/api/v1/namespaces/mynamespace/messages/uuid1/operations", nil) + req := httptest.NewRequest("GET", "/api/v1/status/pins", nil) req.Header.Set("Content-Type", "application/json; charset=utf-8") res := httptest.NewRecorder() - o.On("GetMessageOperations", mock.Anything, "mynamespace", "uuid1", mock.Anything). - Return([]*fftypes.Operation{}, nil, nil) + o.On("GetPins", mock.Anything, mock.Anything). + Return([]*fftypes.Pin{}, nil, nil) r.ServeHTTP(res, req) assert.Equal(t, 200, res.Result().StatusCode) diff --git a/internal/apiserver/route_post_op_retry.go b/internal/apiserver/route_post_op_retry.go new file mode 100644 index 0000000000..18e778a43d --- /dev/null +++ b/internal/apiserver/route_post_op_retry.go @@ -0,0 +1,52 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "context" + "net/http" + + "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/internal/oapispec" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +var postOpRetry = &oapispec.Route{ + Name: "postOpRetry", + Path: "namespaces/{ns}/operations/{opid}/retry", + Method: http.MethodPost, + PathParams: []*oapispec.PathParam{ + {Name: "ns", ExampleFromConf: config.NamespacesDefault, Description: i18n.MsgTBD}, + {Name: "opid", Description: i18n.MsgTBD}, + }, + QueryParams: []*oapispec.QueryParam{}, + FilterFactory: nil, + Description: i18n.MsgTBD, + JSONInputValue: func() interface{} { return &fftypes.EmptyInput{} }, + JSONInputMask: nil, + JSONInputSchema: func(ctx context.Context) string { return emptyObjectSchema }, + JSONOutputValue: func() interface{} { return &fftypes.Operation{} }, + JSONOutputCodes: []int{http.StatusAccepted}, + JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { + opid, err := fftypes.ParseUUID(r.Ctx, r.PP["opid"]) + if err != nil { + return nil, err + } + return getOr(r.Ctx).Operations().RetryOperation(r.Ctx, r.PP["ns"], opid) + }, +} diff --git a/internal/apiserver/route_post_op_retry_test.go b/internal/apiserver/route_post_op_retry_test.go new file mode 100644 index 0000000000..2094a3c8ff --- /dev/null +++ b/internal/apiserver/route_post_op_retry_test.go @@ -0,0 +1,62 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiserver + +import ( + "bytes" + "encoding/json" + "net/http/httptest" + "testing" + + "github.com/hyperledger/firefly/mocks/operationmocks" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestPostOpRetry(t *testing.T) { + o, r := newTestAPIServer() + mom := &operationmocks.Manager{} + o.On("Operations").Return(mom) + input := fftypes.EmptyInput{} + var buf bytes.Buffer + json.NewEncoder(&buf).Encode(&input) + opID := fftypes.NewUUID() + req := httptest.NewRequest("POST", "/api/v1/namespaces/ns1/operations/"+opID.String()+"/retry", &buf) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + + mom.On("RetryOperation", mock.Anything, "ns1", opID). + Return(&fftypes.Operation{}, nil) + r.ServeHTTP(res, req) + + assert.Equal(t, 202, res.Result().StatusCode) +} + +func TestPostOpRetryBadID(t *testing.T) { + _, r := newTestAPIServer() + input := fftypes.EmptyInput{} + var buf bytes.Buffer + json.NewEncoder(&buf).Encode(&input) + req := httptest.NewRequest("POST", "/api/v1/namespaces/ns1/operations/bad/retry", &buf) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + res := httptest.NewRecorder() + + r.ServeHTTP(res, req) + + assert.Equal(t, 400, res.Result().StatusCode) +} diff --git a/internal/apiserver/route_post_token_pool.go b/internal/apiserver/route_post_token_pool.go index 0a45e1b5b6..669dd5f519 100644 --- a/internal/apiserver/route_post_token_pool.go +++ b/internal/apiserver/route_post_token_pool.go @@ -39,7 +39,7 @@ var postTokenPool = &oapispec.Route{ FilterFactory: nil, Description: i18n.MsgTBD, JSONInputValue: func() interface{} { return &fftypes.TokenPool{} }, - JSONInputMask: []string{"ID", "Namespace", "Standard", "ProtocolID", "TX", "Message", "State", "Created"}, + JSONInputMask: []string{"ID", "Namespace", "Standard", "ProtocolID", "TX", "Message", "State", "Created", "Info"}, JSONOutputValue: func() interface{} { return &fftypes.TokenPool{} }, JSONOutputCodes: []int{http.StatusAccepted, http.StatusOK}, JSONHandler: func(r *oapispec.APIRequest) (output interface{}, err error) { diff --git a/internal/apiserver/routes.go b/internal/apiserver/routes.go index 1080d838e6..560c4b13a9 100644 --- a/internal/apiserver/routes.go +++ b/internal/apiserver/routes.go @@ -21,36 +21,30 @@ import "github.com/hyperledger/firefly/internal/oapispec" const emptyObjectSchema = `{"type": "object"}` var routes = []*oapispec.Route{ - postNewDatatype, - postNewNamespace, - postNewMessageBroadcast, - postNewMessagePrivate, - postNewMessageRequestReply, - postNodesSelf, - postNewOrganization, - postNewOrganizationSelf, - postNewIdentity, - patchUpdateIdentity, - - postData, - postNewSubscription, - - putSubscription, - + deleteContractListener, deleteSubscription, - getBatchByID, getBatches, + getBlockchainEventByID, + getBlockchainEvents, + getChartHistogram, + getContractAPIByName, + getContractAPIs, + getContractInterface, + getContractInterfaceNameVersion, + getContractInterfaces, + getContractListenerByNameOrID, + getContractListeners, getData, getDataBlob, getDataByID, + getDataMsgs, getDatatypeByName, getDatatypes, - getDataMsgs, getEventByID, getEvents, - getGroups, getGroupByHash, + getGroups, getIdentities, getIdentityByID, getIdentityDID, @@ -58,68 +52,65 @@ var routes = []*oapispec.Route{ getMsgByID, getMsgData, getMsgEvents, - getMsgOps, - getMsgTxn, getMsgs, - getNetworkOrg, - getNetworkOrgs, - getNetworkNode, - getNetworkNodes, + getMsgTxn, getNamespace, getNamespaces, + getNetworkNode, + getNetworkNodes, + getNetworkOrg, + getNetworkOrgs, getOpByID, getOps, getStatus, getStatusBatchManager, + getStatusPins, getSubscriptionByID, getSubscriptions, + getTokenAccountPools, + getTokenAccounts, + getTokenApprovals, + getTokenBalances, + getTokenConnectors, + getTokenPoolByNameOrID, + getTokenPools, + getTokenTransferByID, + getTokenTransfers, + getTxnBlockchainEvents, getTxnByID, getTxnOps, - getTxnBlockchainEvents, - getTxnStatus, getTxns, - getVerifiers, + getTxnStatus, getVerifierByID, - - getChartHistogram, - - postTokenPool, - getTokenPools, - getTokenPoolByNameOrID, - getTokenBalances, - getTokenApprovals, - getTokenAccounts, - getTokenAccountPools, - getTokenTransfers, - getTokenTransferByID, - postTokenMint, - postTokenBurn, - postTokenTransfer, - postTokenApproval, - getTokenConnectors, - - postContractInvoke, - postContractQuery, - - postNewContractInterface, - getContractInterface, - getContractInterfaces, - getContractInterfaceNameVersion, + getVerifiers, + patchUpdateIdentity, + postContractAPIInvoke, + postContractAPIQuery, + postContractInterfaceGenerate, postContractInterfaceInvoke, postContractInterfaceQuery, - postContractInterfaceGenerate, - + postContractInvoke, + postContractQuery, + postData, postNewContractAPI, - getContractAPIByName, - getContractAPIs, - putContractAPI, - postContractAPIInvoke, - postContractAPIQuery, - + postNewContractInterface, postNewContractListener, - getContractListenerByNameOrID, - getContractListeners, - deleteContractListener, - getBlockchainEvents, - getBlockchainEventByID, + postNewDatatype, + postNewIdentity, + postNewMessageBroadcast, + postNewMessagePrivate, + postNewMessageRequestReply, + postNewNamespace, + postNewOrganization, + postNewOrganizationSelf, + postNewSubscription, + postNodesSelf, + postOpRetry, + postTokenApproval, + postTokenBurn, + postTokenMint, + postTokenPool, + postTokenTransfer, + putContractAPI, + putSubscription, } diff --git a/internal/assets/manager.go b/internal/assets/manager.go index e9be352892..c7cb36cf6a 100644 --- a/internal/assets/manager.go +++ b/internal/assets/manager.go @@ -25,8 +25,8 @@ import ( "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/identity" "github.com/hyperledger/firefly/internal/metrics" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/internal/privatemessaging" - "github.com/hyperledger/firefly/internal/retry" "github.com/hyperledger/firefly/internal/syncasync" "github.com/hyperledger/firefly/internal/sysmessaging" "github.com/hyperledger/firefly/internal/txcommon" @@ -36,8 +36,10 @@ import ( ) type Manager interface { + fftypes.Named + CreateTokenPool(ctx context.Context, ns string, pool *fftypes.TokenPool, waitConfirm bool) (*fftypes.TokenPool, error) - ActivateTokenPool(ctx context.Context, pool *fftypes.TokenPool, event *fftypes.BlockchainEvent) error + ActivateTokenPool(ctx context.Context, pool *fftypes.TokenPool, blockchainInfo fftypes.JSONObject) error GetTokenPools(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.TokenPool, *database.FilterResult, error) GetTokenPool(ctx context.Context, ns, connector, poolName string) (*fftypes.TokenPool, error) GetTokenPoolByNameOrID(ctx context.Context, ns string, poolNameOrID string) (*fftypes.TokenPool, error) @@ -60,8 +62,9 @@ type Manager interface { TokenApproval(ctx context.Context, ns string, approval *fftypes.TokenApprovalInput, waitConfirm bool) (*fftypes.TokenApproval, error) GetTokenApprovals(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.TokenApproval, *database.FilterResult, error) - Start() error - WaitStop() + // From operations.OperationHandler + PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) + RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) } type assetManager struct { @@ -74,36 +77,42 @@ type assetManager struct { broadcast broadcast.Manager messaging privatemessaging.Manager tokens map[string]tokens.Plugin - retry retry.Retry metrics metrics.Manager + operations operations.Manager keyNormalization int } -func NewAssetManager(ctx context.Context, di database.Plugin, im identity.Manager, dm data.Manager, sa syncasync.Bridge, bm broadcast.Manager, pm privatemessaging.Manager, ti map[string]tokens.Plugin, mm metrics.Manager) (Manager, error) { - if di == nil || im == nil || sa == nil || bm == nil || pm == nil || ti == nil { +func NewAssetManager(ctx context.Context, di database.Plugin, im identity.Manager, dm data.Manager, sa syncasync.Bridge, bm broadcast.Manager, pm privatemessaging.Manager, ti map[string]tokens.Plugin, mm metrics.Manager, om operations.Manager, txHelper txcommon.Helper) (Manager, error) { + if di == nil || im == nil || sa == nil || bm == nil || pm == nil || ti == nil || mm == nil || om == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } am := &assetManager{ - ctx: ctx, - database: di, - txHelper: txcommon.NewTransactionHelper(di), - identity: im, - data: dm, - syncasync: sa, - broadcast: bm, - messaging: pm, - tokens: ti, - retry: retry.Retry{ - InitialDelay: config.GetDuration(config.AssetManagerRetryInitialDelay), - MaximumDelay: config.GetDuration(config.AssetManagerRetryMaxDelay), - Factor: config.GetFloat64(config.AssetManagerRetryFactor), - }, + ctx: ctx, + database: di, + txHelper: txHelper, + identity: im, + data: dm, + syncasync: sa, + broadcast: bm, + messaging: pm, + tokens: ti, keyNormalization: identity.ParseKeyNormalizationConfig(config.GetString(config.AssetManagerKeyNormalization)), metrics: mm, + operations: om, } + om.RegisterHandler(ctx, am, []fftypes.OpType{ + fftypes.OpTypeTokenCreatePool, + fftypes.OpTypeTokenActivatePool, + fftypes.OpTypeTokenTransfer, + fftypes.OpTypeTokenApproval, + }) return am, nil } +func (am *assetManager) Name() string { + return "AssetManager" +} + func (am *assetManager) selectTokenPlugin(ctx context.Context, name string) (tokens.Plugin, error) { for pluginName, plugin := range am.tokens { if pluginName == name { @@ -147,14 +156,6 @@ func (am *assetManager) GetTokenConnectors(ctx context.Context, ns string) ([]*f return connectors, nil } -func (am *assetManager) Start() error { - return nil -} - -func (am *assetManager) WaitStop() { - // No go routines -} - func (am *assetManager) getTokenConnectorName(ctx context.Context, ns string) (string, error) { tokenConnectors, err := am.GetTokenConnectors(ctx, ns) if err != nil { diff --git a/internal/assets/manager_test.go b/internal/assets/manager_test.go index a510e35071..0bdb8751ec 100644 --- a/internal/assets/manager_test.go +++ b/internal/assets/manager_test.go @@ -19,11 +19,13 @@ import ( "testing" "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/broadcastmocks" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/metricsmocks" + "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/mocks/privatemessagingmocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" "github.com/hyperledger/firefly/mocks/tokenmocks" @@ -36,30 +38,14 @@ import ( ) func newTestAssets(t *testing.T) (*assetManager, func()) { - config.Reset() - mdi := &databasemocks.Plugin{} - mim := &identitymanagermocks.Manager{} - mdm := &datamocks.Manager{} - msa := &syncasyncmocks.Bridge{} - mbm := &broadcastmocks.Manager{} - mpm := &privatemessagingmocks.Manager{} - mti := &tokenmocks.Plugin{} - mm := &metricsmocks.Manager{} - mti.On("Name").Return("ut_tokens").Maybe() - mm.On("IsMetricsEnabled").Return(false) - ctx, cancel := context.WithCancel(context.Background()) - a, err := NewAssetManager(ctx, mdi, mim, mdm, msa, mbm, mpm, map[string]tokens.Plugin{"magic-tokens": mti}, mm) - rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() - rag.RunFn = func(a mock.Arguments) { - rag.ReturnArguments = mock.Arguments{a[1].(func(context.Context) error)(a[0].(context.Context))} - } - assert.NoError(t, err) - am := a.(*assetManager) - am.txHelper = &txcommonmocks.Helper{} - return am, cancel + return newTestAssetsCommon(t, false) } func newTestAssetsWithMetrics(t *testing.T) (*assetManager, func()) { + return newTestAssetsCommon(t, true) +} + +func newTestAssetsCommon(t *testing.T, metrics bool) (*assetManager, func()) { config.Reset() mdi := &databasemocks.Plugin{} mim := &identitymanagermocks.Manager{} @@ -69,11 +55,14 @@ func newTestAssetsWithMetrics(t *testing.T) (*assetManager, func()) { mpm := &privatemessagingmocks.Manager{} mti := &tokenmocks.Plugin{} mm := &metricsmocks.Manager{} - mti.On("Name").Return("ut_tokens").Maybe() - mm.On("IsMetricsEnabled").Return(true) + mom := &operationmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + mm.On("IsMetricsEnabled").Return(metrics) mm.On("TransferSubmitted", mock.Anything) + mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) + mti.On("Name").Return("ut").Maybe() ctx, cancel := context.WithCancel(context.Background()) - a, err := NewAssetManager(ctx, mdi, mim, mdm, msa, mbm, mpm, map[string]tokens.Plugin{"magic-tokens": mti}, mm) + a, err := NewAssetManager(ctx, mdi, mim, mdm, msa, mbm, mpm, map[string]tokens.Plugin{"magic-tokens": mti}, mm, mom, txHelper) rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() rag.RunFn = func(a mock.Arguments) { rag.ReturnArguments = mock.Arguments{a[1].(func(context.Context) error)(a[0].(context.Context))} @@ -85,16 +74,14 @@ func newTestAssetsWithMetrics(t *testing.T) (*assetManager, func()) { } func TestInitFail(t *testing.T) { - _, err := NewAssetManager(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil) + _, err := NewAssetManager(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) assert.Regexp(t, "FF10128", err) } -func TestStartStop(t *testing.T) { +func TestName(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() - - am.Start() - am.WaitStop() + assert.Equal(t, "AssetManager", am.Name()) } func TestGetTokenBalances(t *testing.T) { diff --git a/internal/assets/operations.go b/internal/assets/operations.go new file mode 100644 index 0000000000..757f5e0b87 --- /dev/null +++ b/internal/assets/operations.go @@ -0,0 +1,176 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package assets + +import ( + "context" + "fmt" + + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/internal/txcommon" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +type createPoolData struct { + Pool *fftypes.TokenPool `json:"pool"` +} + +type activatePoolData struct { + Pool *fftypes.TokenPool `json:"pool"` + BlockchainInfo fftypes.JSONObject `json:"blockchainInfo"` +} + +type transferData struct { + Pool *fftypes.TokenPool `json:"pool"` + Transfer *fftypes.TokenTransfer `json:"transfer"` +} + +type approvalData struct { + Pool *fftypes.TokenPool `json:"pool"` + Approval *fftypes.TokenApproval `json:"approval"` +} + +func (am *assetManager) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + switch op.Type { + case fftypes.OpTypeTokenCreatePool: + pool, err := txcommon.RetrieveTokenPoolCreateInputs(ctx, op) + if err != nil { + return nil, err + } + return opCreatePool(op, pool), nil + + case fftypes.OpTypeTokenActivatePool: + poolID, blockchainInfo, err := txcommon.RetrieveTokenPoolActivateInputs(ctx, op) + if err != nil { + return nil, err + } + pool, err := am.database.GetTokenPoolByID(ctx, poolID) + if err != nil { + return nil, err + } else if pool == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + return opActivatePool(op, pool, blockchainInfo), nil + + case fftypes.OpTypeTokenTransfer: + transfer, err := txcommon.RetrieveTokenTransferInputs(ctx, op) + if err != nil { + return nil, err + } + pool, err := am.database.GetTokenPoolByID(ctx, transfer.Pool) + if err != nil { + return nil, err + } else if pool == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + return opTransfer(op, pool, transfer), nil + + case fftypes.OpTypeTokenApproval: + approval, err := txcommon.RetrieveTokenApprovalInputs(ctx, op) + if err != nil { + return nil, err + } + pool, err := am.database.GetTokenPoolByID(ctx, approval.Pool) + if err != nil { + return nil, err + } else if pool == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + return opApproval(op, pool, approval), nil + + default: + return nil, i18n.NewError(ctx, i18n.MsgOperationNotSupported, op.Type) + } +} + +func (am *assetManager) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { + switch data := op.Data.(type) { + case createPoolData: + plugin, err := am.selectTokenPlugin(ctx, data.Pool.Connector) + if err != nil { + return nil, false, err + } + complete, err = plugin.CreateTokenPool(ctx, op.ID, data.Pool) + return nil, complete, err + + case activatePoolData: + plugin, err := am.selectTokenPlugin(ctx, data.Pool.Connector) + if err != nil { + return nil, false, err + } + complete, err = plugin.ActivateTokenPool(ctx, op.ID, data.Pool, data.BlockchainInfo) + return nil, complete, err + + case transferData: + plugin, err := am.selectTokenPlugin(ctx, data.Pool.Connector) + if err != nil { + return nil, false, err + } + switch data.Transfer.Type { + case fftypes.TokenTransferTypeMint: + return nil, false, plugin.MintTokens(ctx, op.ID, data.Pool.ProtocolID, data.Transfer) + case fftypes.TokenTransferTypeTransfer: + return nil, false, plugin.TransferTokens(ctx, op.ID, data.Pool.ProtocolID, data.Transfer) + case fftypes.TokenTransferTypeBurn: + return nil, false, plugin.BurnTokens(ctx, op.ID, data.Pool.ProtocolID, data.Transfer) + default: + panic(fmt.Sprintf("unknown transfer type: %v", data.Transfer.Type)) + } + + case approvalData: + plugin, err := am.selectTokenPlugin(ctx, data.Pool.Connector) + if err != nil { + return nil, false, err + } + return nil, false, plugin.TokensApproval(ctx, op.ID, data.Pool.ProtocolID, data.Approval) + + default: + return nil, false, i18n.NewError(ctx, i18n.MsgOperationDataIncorrect, op.Data) + } +} + +func opCreatePool(op *fftypes.Operation, pool *fftypes.TokenPool) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: createPoolData{Pool: pool}, + } +} + +func opActivatePool(op *fftypes.Operation, pool *fftypes.TokenPool, blockchainInfo fftypes.JSONObject) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: activatePoolData{Pool: pool, BlockchainInfo: blockchainInfo}, + } +} + +func opTransfer(op *fftypes.Operation, pool *fftypes.TokenPool, transfer *fftypes.TokenTransfer) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: transferData{Pool: pool, Transfer: transfer}, + } +} + +func opApproval(op *fftypes.Operation, pool *fftypes.TokenPool, approval *fftypes.TokenApproval) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: approvalData{Pool: pool, Approval: approval}, + } +} diff --git a/internal/assets/operations_test.go b/internal/assets/operations_test.go new file mode 100644 index 0000000000..d2f09bbcfb --- /dev/null +++ b/internal/assets/operations_test.go @@ -0,0 +1,526 @@ +// Copyright © 2021 Kaleido, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in comdiliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or imdilied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package assets + +import ( + "context" + "fmt" + "testing" + + "github.com/hyperledger/firefly/internal/txcommon" + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/tokenmocks" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" +) + +func TestPrepareAndRunCreatePool(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenCreatePool, + } + pool := &fftypes.TokenPool{ + Connector: "magic-tokens", + ProtocolID: "F1", + } + err := txcommon.AddTokenPoolCreateInputs(op, pool) + assert.NoError(t, err) + + mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) + mti.On("CreateTokenPool", context.Background(), op.ID, pool).Return(false, nil) + + po, err := am.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, pool, po.Data.(createPoolData).Pool) + + _, complete, err := am.RunOperation(context.Background(), po) + + assert.False(t, complete) + assert.NoError(t, err) + + mti.AssertExpectations(t) +} + +func TestPrepareAndRunActivatePool(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenActivatePool, + } + pool := &fftypes.TokenPool{ + Connector: "magic-tokens", + ID: fftypes.NewUUID(), + ProtocolID: "F1", + } + info := fftypes.JSONObject{ + "some": "info", + } + txcommon.AddTokenPoolActivateInputs(op, pool.ID, info) + + mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) + mdi := am.database.(*databasemocks.Plugin) + mti.On("ActivateTokenPool", context.Background(), op.ID, pool, info).Return(true, nil) + mdi.On("GetTokenPoolByID", context.Background(), pool.ID).Return(pool, nil) + + po, err := am.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, pool, po.Data.(activatePoolData).Pool) + assert.Equal(t, info, po.Data.(activatePoolData).BlockchainInfo) + + _, complete, err := am.RunOperation(context.Background(), po) + + assert.True(t, complete) + assert.NoError(t, err) + + mti.AssertExpectations(t) + mdi.AssertExpectations(t) +} + +func TestPrepareAndRunTransfer(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenTransfer, + } + pool := &fftypes.TokenPool{ + Connector: "magic-tokens", + ProtocolID: "F1", + } + transfer := &fftypes.TokenTransfer{ + LocalID: fftypes.NewUUID(), + Pool: pool.ID, + Type: fftypes.TokenTransferTypeTransfer, + } + txcommon.AddTokenTransferInputs(op, transfer) + + mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) + mdi := am.database.(*databasemocks.Plugin) + mti.On("TransferTokens", context.Background(), op.ID, "F1", transfer).Return(nil) + mdi.On("GetTokenPoolByID", context.Background(), pool.ID).Return(pool, nil) + + po, err := am.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, pool, po.Data.(transferData).Pool) + assert.Equal(t, transfer, po.Data.(transferData).Transfer) + + _, complete, err := am.RunOperation(context.Background(), po) + + assert.False(t, complete) + assert.NoError(t, err) + + mti.AssertExpectations(t) + mdi.AssertExpectations(t) +} + +func TestPrepareAndRunApproval(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenApproval, + } + pool := &fftypes.TokenPool{ + Connector: "magic-tokens", + ProtocolID: "F1", + } + approval := &fftypes.TokenApproval{ + LocalID: fftypes.NewUUID(), + Pool: pool.ID, + Approved: true, + } + txcommon.AddTokenApprovalInputs(op, approval) + + mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) + mdi := am.database.(*databasemocks.Plugin) + mti.On("TokensApproval", context.Background(), op.ID, "F1", approval).Return(nil) + mdi.On("GetTokenPoolByID", context.Background(), pool.ID).Return(pool, nil) + + po, err := am.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, pool, po.Data.(approvalData).Pool) + assert.Equal(t, approval, po.Data.(approvalData).Approval) + + _, complete, err := am.RunOperation(context.Background(), po) + + assert.False(t, complete) + assert.NoError(t, err) + + mti.AssertExpectations(t) + mdi.AssertExpectations(t) +} + +func TestPrepareOperationNotSupported(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + po, err := am.PrepareOperation(context.Background(), &fftypes.Operation{}) + + assert.Nil(t, po) + assert.Regexp(t, "FF10371", err) +} + +func TestPrepareOperationCreatePoolBadInput(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenCreatePool, + Input: fftypes.JSONObject{"id": "bad"}, + } + + _, err := am.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10151", err) +} + +func TestPrepareOperationActivatePoolBadInput(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenActivatePool, + Input: fftypes.JSONObject{"id": "bad"}, + } + + _, err := am.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10142", err) +} + +func TestPrepareOperationActivatePoolError(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + poolID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenActivatePool, + Input: fftypes.JSONObject{"id": poolID.String()}, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPoolByID", context.Background(), poolID).Return(nil, fmt.Errorf("pop")) + + _, err := am.PrepareOperation(context.Background(), op) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationActivatePoolNotFound(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + poolID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenActivatePool, + Input: fftypes.JSONObject{"id": poolID.String()}, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPoolByID", context.Background(), poolID).Return(nil, nil) + + _, err := am.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationTransferBadInput(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenTransfer, + Input: fftypes.JSONObject{"localId": "bad"}, + } + + _, err := am.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10151", err) +} + +func TestPrepareOperationTransferError(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + poolID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenTransfer, + Input: fftypes.JSONObject{"pool": poolID.String()}, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPoolByID", context.Background(), poolID).Return(nil, fmt.Errorf("pop")) + + _, err := am.PrepareOperation(context.Background(), op) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationTransferNotFound(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + poolID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenTransfer, + Input: fftypes.JSONObject{"pool": poolID.String()}, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPoolByID", context.Background(), poolID).Return(nil, nil) + + _, err := am.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationApprovalBadInput(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenApproval, + Input: fftypes.JSONObject{"localId": "bad"}, + } + + _, err := am.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10151", err) +} + +func TestPrepareOperationApprovalError(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + poolID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenApproval, + Input: fftypes.JSONObject{"pool": poolID.String()}, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPoolByID", context.Background(), poolID).Return(nil, fmt.Errorf("pop")) + + _, err := am.PrepareOperation(context.Background(), op) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationApprovalNotFound(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + poolID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeTokenApproval, + Input: fftypes.JSONObject{"pool": poolID.String()}, + } + + mdi := am.database.(*databasemocks.Plugin) + mdi.On("GetTokenPoolByID", context.Background(), poolID).Return(nil, nil) + + _, err := am.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestRunOperationNotSupported(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + _, complete, err := am.RunOperation(context.Background(), &fftypes.PreparedOperation{}) + + assert.False(t, complete) + assert.Regexp(t, "FF10378", err) +} + +func TestRunOperationCreatePoolBadPlugin(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{} + pool := &fftypes.TokenPool{} + + _, complete, err := am.RunOperation(context.Background(), opCreatePool(op, pool)) + + assert.False(t, complete) + assert.Regexp(t, "FF10272", err) +} + +func TestRunOperationCreatePool(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + ID: fftypes.NewUUID(), + } + pool := &fftypes.TokenPool{ + Connector: "magic-tokens", + } + + mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) + mti.On("CreateTokenPool", context.Background(), op.ID, pool).Return(false, nil) + + _, complete, err := am.RunOperation(context.Background(), opCreatePool(op, pool)) + + assert.False(t, complete) + assert.NoError(t, err) + + mti.AssertExpectations(t) +} + +func TestRunOperationActivatePoolBadPlugin(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{} + pool := &fftypes.TokenPool{} + info := fftypes.JSONObject{} + + _, complete, err := am.RunOperation(context.Background(), opActivatePool(op, pool, info)) + + assert.False(t, complete) + assert.Regexp(t, "FF10272", err) +} + +func TestRunOperationTransferBadPlugin(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{} + pool := &fftypes.TokenPool{} + transfer := &fftypes.TokenTransfer{} + + _, complete, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) + + assert.False(t, complete) + assert.Regexp(t, "FF10272", err) +} + +func TestRunOperationApprovalBadPlugin(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{} + pool := &fftypes.TokenPool{} + approval := &fftypes.TokenApproval{} + + _, complete, err := am.RunOperation(context.Background(), opApproval(op, pool, approval)) + + assert.False(t, complete) + assert.Regexp(t, "FF10272", err) +} + +func TestRunOperationTransferUnknownType(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + ID: fftypes.NewUUID(), + } + pool := &fftypes.TokenPool{ + Connector: "magic-tokens", + } + transfer := &fftypes.TokenTransfer{ + Type: "bad", + } + + assert.PanicsWithValue(t, "unknown transfer type: bad", func() { + am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) + }) +} + +func TestRunOperationTransferMint(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + ID: fftypes.NewUUID(), + } + pool := &fftypes.TokenPool{ + Connector: "magic-tokens", + ProtocolID: "F1", + } + transfer := &fftypes.TokenTransfer{ + Type: fftypes.TokenTransferTypeMint, + } + + mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) + mti.On("MintTokens", context.Background(), op.ID, "F1", transfer).Return(nil) + + _, complete, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) + + assert.False(t, complete) + assert.NoError(t, err) + + mti.AssertExpectations(t) +} + +func TestRunOperationTransferBurn(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + ID: fftypes.NewUUID(), + } + pool := &fftypes.TokenPool{ + Connector: "magic-tokens", + ProtocolID: "F1", + } + transfer := &fftypes.TokenTransfer{ + Type: fftypes.TokenTransferTypeBurn, + } + + mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) + mti.On("BurnTokens", context.Background(), op.ID, "F1", transfer).Return(nil) + + _, complete, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) + + assert.False(t, complete) + assert.NoError(t, err) + + mti.AssertExpectations(t) +} + +func TestRunOperationTransfer(t *testing.T) { + am, cancel := newTestAssets(t) + defer cancel() + + op := &fftypes.Operation{ + ID: fftypes.NewUUID(), + } + pool := &fftypes.TokenPool{ + Connector: "magic-tokens", + ProtocolID: "F1", + } + transfer := &fftypes.TokenTransfer{ + Type: fftypes.TokenTransferTypeTransfer, + } + + mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) + mti.On("TransferTokens", context.Background(), op.ID, "F1", transfer).Return(nil) + + _, complete, err := am.RunOperation(context.Background(), opTransfer(op, pool, transfer)) + + assert.False(t, complete) + assert.NoError(t, err) + + mti.AssertExpectations(t) +} diff --git a/internal/assets/token_approval.go b/internal/assets/token_approval.go index 8fd66a0550..255cacb694 100644 --- a/internal/assets/token_approval.go +++ b/internal/assets/token_approval.go @@ -127,13 +127,7 @@ func (s *approveSender) sendInternal(ctx context.Context, method sendMethod) err return err } - err = plugin.TokensApproval(ctx, op.ID, pool.ProtocolID, &s.approval.TokenApproval) - // if transaction fails, mark op as failed in DB - if err != nil { - s.mgr.txHelper.WriteOperationFailure(ctx, op.ID, err) - } - - return err + return s.mgr.operations.RunOperation(ctx, opApproval(op, pool, &s.approval.TokenApproval)) } func (am *assetManager) validateApproval(ctx context.Context, ns string, approval *fftypes.TokenApprovalInput) (err error) { diff --git a/internal/assets/token_approval_test.go b/internal/assets/token_approval_test.go index 096363c9f9..34cc39bc84 100644 --- a/internal/assets/token_approval_test.go +++ b/internal/assets/token_approval_test.go @@ -24,8 +24,8 @@ import ( "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" + "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" - "github.com/hyperledger/firefly/mocks/tokenmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" @@ -64,17 +64,25 @@ func TestTokenApprovalSuccess(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "key", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("TokensApproval", context.Background(), mock.Anything, "F1", &approval.TokenApproval).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenApproval).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(approvalData) + return op.Type == fftypes.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval + })).Return(nil) _, err := am.TokenApproval(context.Background(), "ns1", approval, false) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestTokenApprovalSuccessUnknownIdentity(t *testing.T) { @@ -94,17 +102,25 @@ func TestTokenApprovalSuccessUnknownIdentity(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("TokensApproval", context.Background(), mock.Anything, "F1", &approval.TokenApproval).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenApproval).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(approvalData) + return op.Type == fftypes.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval + })).Return(nil) _, err := am.TokenApproval(context.Background(), "ns1", approval, false) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestApprovalUnknownConnectorNoConnectors(t *testing.T) { @@ -122,9 +138,6 @@ func TestApprovalUnknownConnectorNoConnectors(t *testing.T) { am.tokens = make(map[string]tokens.Plugin) - mim := am.identity.(*identitymanagermocks.Manager) - mim.On("NormalizeSigningKey", context.Background(), "key", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - _, err := am.TokenApproval(context.Background(), "ns1", approval, false) assert.Regexp(t, "FF10292", err) } @@ -145,9 +158,6 @@ func TestApprovalUnknownConnectorMultipleConnectors(t *testing.T) { am.tokens["magic-tokens"] = nil am.tokens["magic-tokens2"] = nil - mim := am.identity.(*identitymanagermocks.Manager) - mim.On("NormalizeSigningKey", context.Background(), "key", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - _, err := am.TokenApproval(context.Background(), "ns1", approval, false) assert.Regexp(t, "FF10292", err) } @@ -167,9 +177,6 @@ func TestApprovalUnknownConnectorBadNamespace(t *testing.T) { am.tokens = make(map[string]tokens.Plugin) - mim := am.identity.(*identitymanagermocks.Manager) - mim.On("NormalizeSigningKey", context.Background(), "key", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - _, err := am.TokenApproval(context.Background(), "", approval, false) assert.Regexp(t, "FF10131", err) } @@ -208,9 +215,9 @@ func TestApprovalUnknownPoolSuccess(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) fb := database.TokenPoolQueryFactory.NewFilter(context.Background()) f := fb.And() f.Limit(1).Count(true) @@ -231,12 +238,20 @@ func TestApprovalUnknownPoolSuccess(t *testing.T) { return info.Count && info.Limit == 1 }))).Return(tokenPools, filterResult, nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(tokenPools[0], nil) - mti.On("TokensApproval", context.Background(), mock.Anything, "F1", &approval.TokenApproval).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenApproval).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(approvalData) + return op.Type == fftypes.OpTypeTokenApproval && data.Pool == tokenPools[0] && data.Approval == &approval.TokenApproval + })).Return(nil) _, err := am.TokenApproval(context.Background(), "ns1", approval, false) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestApprovalUnknownPoolNoPool(t *testing.T) { @@ -252,7 +267,6 @@ func TestApprovalUnknownPoolNoPool(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) - mim := am.identity.(*identitymanagermocks.Manager) fb := database.TokenPoolQueryFactory.NewFilter(context.Background()) f := fb.And() f.Limit(1).Count(true) @@ -261,7 +275,6 @@ func TestApprovalUnknownPoolNoPool(t *testing.T) { filterResult := &database.FilterResult{ TotalCount: &totalCount, } - mim.On("NormalizeSigningKey", context.Background(), "key", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPools", context.Background(), mock.MatchedBy((func(f database.AndFilter) bool { info, _ := f.Finalize() return info.Count && info.Limit == 1 @@ -358,18 +371,25 @@ func TestApprovalFail(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "key", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("TokensApproval", context.Background(), mock.Anything, "F1", &approval.TokenApproval).Return(fmt.Errorf("pop")) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenApproval).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) - mth.On("WriteOperationFailure", context.Background(), mock.Anything, fmt.Errorf("pop")) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(approvalData) + return op.Type == fftypes.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval + })).Return(fmt.Errorf("pop")) _, err := am.TokenApproval(context.Background(), "ns1", approval, false) assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestApprovalTransactionFail(t *testing.T) { @@ -402,38 +422,6 @@ func TestApprovalTransactionFail(t *testing.T) { mdi.AssertExpectations(t) } -func TestApprovalFailAndDbFail(t *testing.T) { - am, cancel := newTestAssets(t) - defer cancel() - - approval := &fftypes.TokenApprovalInput{ - TokenApproval: fftypes.TokenApproval{ - Approved: true, - Operator: "operator", - Key: "key", - }, - Pool: "pool1", - } - pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, - } - - mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) - mim := am.identity.(*identitymanagermocks.Manager) - mth := am.txHelper.(*txcommonmocks.Helper) - mim.On("NormalizeSigningKey", context.Background(), "key", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("TokensApproval", context.Background(), mock.Anything, "F1", &approval.TokenApproval).Return(fmt.Errorf("pop")) - mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) - mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenApproval).Return(fftypes.NewUUID(), nil) - mth.On("WriteOperationFailure", context.Background(), mock.Anything, fmt.Errorf("pop")) - - _, err := am.TokenApproval(context.Background(), "ns1", approval, false) - assert.EqualError(t, err, "pop") -} - func TestApprovalOperationsFail(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() @@ -482,16 +470,19 @@ func TestTokenApprovalConfirm(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mdm := am.data.(*datamocks.Manager) msa := am.syncasync.(*syncasyncmocks.Bridge) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "key", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("TokensApproval", context.Background(), mock.Anything, "F1", &approval.TokenApproval).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenApproval).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(approvalData) + return op.Type == fftypes.OpTypeTokenApproval && data.Pool == pool && data.Approval == &approval.TokenApproval + })).Return(nil) msa.On("WaitForTokenApproval", context.Background(), "ns1", mock.Anything, mock.Anything). Run(func(args mock.Arguments) { @@ -504,9 +495,10 @@ func TestTokenApprovalConfirm(t *testing.T) { assert.NoError(t, err) mdi.AssertExpectations(t) + mim.AssertExpectations(t) mdm.AssertExpectations(t) msa.AssertExpectations(t) - mti.AssertExpectations(t) + mom.AssertExpectations(t) } func TestApprovalPrepare(t *testing.T) { diff --git a/internal/assets/token_pool.go b/internal/assets/token_pool.go index 108753e8b8..d4897bd2d1 100644 --- a/internal/assets/token_pool.go +++ b/internal/assets/token_pool.go @@ -79,24 +79,19 @@ func (am *assetManager) createTokenPoolInternal(ctx context.Context, pool *fftyp pool.Namespace, txid, fftypes.OpTypeTokenCreatePool) - txcommon.AddTokenPoolCreateInputs(op, pool) - - return am.database.InsertOperation(ctx, op) + if err = txcommon.AddTokenPoolCreateInputs(op, pool); err == nil { + err = am.database.InsertOperation(ctx, op) + } + return err }) if err != nil { return nil, err } - if complete, err := plugin.CreateTokenPool(ctx, op.ID, pool); err != nil { - am.txHelper.WriteOperationFailure(ctx, op.ID, err) - return nil, err - } else if complete { - am.txHelper.WriteOperationSuccess(ctx, op.ID, nil) - } - return pool, nil + return pool, am.operations.RunOperation(ctx, opCreatePool(op, pool)) } -func (am *assetManager) ActivateTokenPool(ctx context.Context, pool *fftypes.TokenPool, event *fftypes.BlockchainEvent) error { +func (am *assetManager) ActivateTokenPool(ctx context.Context, pool *fftypes.TokenPool, blockchainInfo fftypes.JSONObject) error { plugin, err := am.selectTokenPlugin(ctx, pool.Connector) if err != nil { return err @@ -107,17 +102,12 @@ func (am *assetManager) ActivateTokenPool(ctx context.Context, pool *fftypes.Tok pool.Namespace, pool.TX.ID, fftypes.OpTypeTokenActivatePool) + txcommon.AddTokenPoolActivateInputs(op, pool.ID, blockchainInfo) if err := am.database.InsertOperation(ctx, op); err != nil { return err } - if complete, err := plugin.ActivateTokenPool(ctx, op.ID, pool, event); err != nil { - am.txHelper.WriteOperationFailure(ctx, op.ID, err) - return err - } else if complete { - am.txHelper.WriteOperationSuccess(ctx, op.ID, nil) - } - return nil + return am.operations.RunOperation(ctx, opActivatePool(op, pool, blockchainInfo)) } func (am *assetManager) GetTokenPools(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.TokenPool, *database.FilterResult, error) { diff --git a/internal/assets/token_pool_test.go b/internal/assets/token_pool_test.go index 6635edaedd..7efa93dbc6 100644 --- a/internal/assets/token_pool_test.go +++ b/internal/assets/token_pool_test.go @@ -24,8 +24,8 @@ import ( "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" + "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" - "github.com/hyperledger/firefly/mocks/tokenmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" @@ -57,17 +57,26 @@ func TestCreateTokenPoolUnknownConnectorSuccess(t *testing.T) { mdi := am.database.(*databasemocks.Plugin) mdm := am.data.(*datamocks.Manager) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("resolved-key", nil) mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) - mti.On("CreateTokenPool", context.Background(), mock.Anything, mock.Anything, mock.Anything).Return(false, nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenPool).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(createPoolData) + return op.Type == fftypes.OpTypeTokenCreatePool && data.Pool == pool + })).Return(nil) _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestCreateTokenPoolUnknownConnectorNoConnectors(t *testing.T) { @@ -85,6 +94,8 @@ func TestCreateTokenPoolUnknownConnectorNoConnectors(t *testing.T) { _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.Regexp(t, "FF10292", err) + + mdm.AssertExpectations(t) } func TestCreateTokenPoolUnknownConnectorMultipleConnectors(t *testing.T) { @@ -103,6 +114,8 @@ func TestCreateTokenPoolUnknownConnectorMultipleConnectors(t *testing.T) { _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.Regexp(t, "FF10292", err) + + mdm.AssertExpectations(t) } func TestCreateTokenPoolMissingNamespace(t *testing.T) { @@ -113,27 +126,13 @@ func TestCreateTokenPoolMissingNamespace(t *testing.T) { Name: "testpool", } - mdi := am.database.(*databasemocks.Plugin) mdm := am.data.(*datamocks.Manager) mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(fmt.Errorf("pop")) - msa := am.syncasync.(*syncasyncmocks.Bridge) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) - mim := am.identity.(*identitymanagermocks.Manager) - mth := am.txHelper.(*txcommonmocks.Helper) - mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil).Times(2) - mti.On("CreateTokenPool", context.Background(), mock.Anything, mock.Anything).Return(false, nil).Times(1) - mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenPool).Return(fftypes.NewUUID(), nil) - mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil).Times(1) - msa.On("WaitForTokenPool", context.Background(), "ns1", mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { - send := args[3].(syncasync.RequestSender) - send(context.Background()) - }). - Return(nil, nil) _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.EqualError(t, err, "pop") + + mdm.AssertExpectations(t) } func TestCreateTokenPoolNoConnectors(t *testing.T) { @@ -150,6 +149,8 @@ func TestCreateTokenPoolNoConnectors(t *testing.T) { _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.Regexp(t, "FF10292", err) + + mdm.AssertExpectations(t) } func TestCreateTokenPoolIdentityFail(t *testing.T) { @@ -167,6 +168,9 @@ func TestCreateTokenPoolIdentityFail(t *testing.T) { _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.EqualError(t, err, "pop") + + mdm.AssertExpectations(t) + mim.AssertExpectations(t) } func TestCreateTokenPoolWrongConnector(t *testing.T) { @@ -178,18 +182,16 @@ func TestCreateTokenPoolWrongConnector(t *testing.T) { Name: "testpool", } - mdi := am.database.(*databasemocks.Plugin) mdm := am.data.(*datamocks.Manager) mim := am.identity.(*identitymanagermocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) - mdi.On("UpsertTransaction", context.Background(), mock.MatchedBy(func(tx *fftypes.Transaction) bool { - return tx.Type == fftypes.TransactionTypeTokenPool - })).Return(nil) - mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.Regexp(t, "FF10272", err) + + mdm.AssertExpectations(t) + mim.AssertExpectations(t) } func TestCreateTokenPoolFail(t *testing.T) { @@ -203,18 +205,26 @@ func TestCreateTokenPoolFail(t *testing.T) { mdi := am.database.(*databasemocks.Plugin) mdm := am.data.(*datamocks.Manager) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenPool).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) - mti.On("CreateTokenPool", context.Background(), mock.Anything, mock.Anything, mock.Anything).Return(false, fmt.Errorf("pop")) - mth.On("WriteOperationFailure", context.Background(), mock.Anything, fmt.Errorf("pop")) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(createPoolData) + return op.Type == fftypes.OpTypeTokenCreatePool && data.Pool == pool + })).Return(fmt.Errorf("pop")) _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestCreateTokenPoolTransactionFail(t *testing.T) { @@ -235,6 +245,10 @@ func TestCreateTokenPoolTransactionFail(t *testing.T) { _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.Regexp(t, "pop", err) + + mdm.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) } func TestCreateTokenPoolOpInsertFail(t *testing.T) { @@ -257,6 +271,11 @@ func TestCreateTokenPoolOpInsertFail(t *testing.T) { _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) } func TestCreateTokenPoolSyncSuccess(t *testing.T) { @@ -270,18 +289,26 @@ func TestCreateTokenPoolSyncSuccess(t *testing.T) { mdi := am.database.(*databasemocks.Plugin) mdm := am.data.(*datamocks.Manager) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenPool).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) - mti.On("CreateTokenPool", context.Background(), mock.Anything, mock.Anything, mock.Anything).Return(true, nil) - mth.On("WriteOperationSuccess", context.Background(), mock.Anything, mock.Anything) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(createPoolData) + return op.Type == fftypes.OpTypeTokenCreatePool && data.Pool == pool + })).Return(nil) _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestCreateTokenPoolAsyncSuccess(t *testing.T) { @@ -295,17 +322,26 @@ func TestCreateTokenPoolAsyncSuccess(t *testing.T) { mdi := am.database.(*databasemocks.Plugin) mdm := am.data.(*datamocks.Manager) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) - mti.On("CreateTokenPool", context.Background(), mock.Anything, mock.Anything, mock.Anything).Return(false, nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenPool).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(createPoolData) + return op.Type == fftypes.OpTypeTokenCreatePool && data.Pool == pool + })).Return(nil) _, err := am.CreateTokenPool(context.Background(), "ns1", pool, false) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestCreateTokenPoolConfirm(t *testing.T) { @@ -320,23 +356,33 @@ func TestCreateTokenPoolConfirm(t *testing.T) { mdi := am.database.(*databasemocks.Plugin) mdm := am.data.(*datamocks.Manager) msa := am.syncasync.(*syncasyncmocks.Bridge) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) + mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil).Times(2) - mti.On("CreateTokenPool", context.Background(), mock.Anything, mock.Anything).Return(false, nil).Times(1) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenPool).Return(fftypes.NewUUID(), nil) - mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil).Times(1) + mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) msa.On("WaitForTokenPool", context.Background(), "ns1", mock.Anything, mock.Anything). Run(func(args mock.Arguments) { send := args[3].(syncasync.RequestSender) send(context.Background()) }). Return(nil, nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(createPoolData) + return op.Type == fftypes.OpTypeTokenCreatePool && data.Pool == pool + })).Return(nil) _, err := am.CreateTokenPool(context.Background(), "ns1", pool, true) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + msa.AssertExpectations(t) + mom.AssertExpectations(t) } func TestActivateTokenPool(t *testing.T) { @@ -344,22 +390,30 @@ func TestActivateTokenPool(t *testing.T) { defer cancel() pool := &fftypes.TokenPool{ + ID: fftypes.NewUUID(), Namespace: "ns1", Connector: "magic-tokens", } - ev := &fftypes.BlockchainEvent{} + info := fftypes.JSONObject{ + "some": "info", + } - mdm := am.data.(*datamocks.Manager) mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) - mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) + mom := am.operations.(*operationmocks.Manager) mdi.On("InsertOperation", context.Background(), mock.MatchedBy(func(op *fftypes.Operation) bool { return op.Type == fftypes.OpTypeTokenActivatePool })).Return(nil) - mti.On("ActivateTokenPool", context.Background(), mock.Anything, pool, ev).Return(false, nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(activatePoolData) + assert.Equal(t, info, data.BlockchainInfo) + return op.Type == fftypes.OpTypeTokenActivatePool && data.Pool == pool + })).Return(nil) - err := am.ActivateTokenPool(context.Background(), pool, ev) + err := am.ActivateTokenPool(context.Background(), pool, info) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mom.AssertExpectations(t) } func TestActivateTokenPoolBadConnector(t *testing.T) { @@ -370,12 +424,9 @@ func TestActivateTokenPoolBadConnector(t *testing.T) { Namespace: "ns1", Connector: "bad", } - ev := &fftypes.BlockchainEvent{} - - mdm := am.data.(*datamocks.Manager) - mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) + info := fftypes.JSONObject{} - err := am.ActivateTokenPool(context.Background(), pool, ev) + err := am.ActivateTokenPool(context.Background(), pool, info) assert.Regexp(t, "FF10272", err) } @@ -387,17 +438,17 @@ func TestActivateTokenPoolOpInsertFail(t *testing.T) { Namespace: "ns1", Connector: "magic-tokens", } - ev := &fftypes.BlockchainEvent{} + info := fftypes.JSONObject{} - mdm := am.data.(*datamocks.Manager) mdi := am.database.(*databasemocks.Plugin) - mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) mdi.On("InsertOperation", context.Background(), mock.MatchedBy(func(op *fftypes.Operation) bool { return op.Type == fftypes.OpTypeTokenActivatePool })).Return(fmt.Errorf("pop")) - err := am.ActivateTokenPool(context.Background(), pool, ev) + err := am.ActivateTokenPool(context.Background(), pool, info) assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) } func TestActivateTokenPoolFail(t *testing.T) { @@ -408,21 +459,28 @@ func TestActivateTokenPoolFail(t *testing.T) { Namespace: "ns1", Connector: "magic-tokens", } - ev := &fftypes.BlockchainEvent{} + info := fftypes.JSONObject{ + "some": "info", + } - mdm := am.data.(*datamocks.Manager) mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mth := am.txHelper.(*txcommonmocks.Helper) - mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) + mom := am.operations.(*operationmocks.Manager) mdi.On("InsertOperation", context.Background(), mock.MatchedBy(func(op *fftypes.Operation) bool { return op.Type == fftypes.OpTypeTokenActivatePool })).Return(nil) - mti.On("ActivateTokenPool", context.Background(), mock.Anything, pool, ev).Return(false, fmt.Errorf("pop")) - mth.On("WriteOperationFailure", context.Background(), mock.Anything, fmt.Errorf("pop")) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(activatePoolData) + assert.Equal(t, info, data.BlockchainInfo) + return op.Type == fftypes.OpTypeTokenActivatePool && data.Pool == pool + })).Return(fmt.Errorf("pop")) - err := am.ActivateTokenPool(context.Background(), pool, ev) + err := am.ActivateTokenPool(context.Background(), pool, info) assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestActivateTokenPoolSyncSuccess(t *testing.T) { @@ -433,21 +491,28 @@ func TestActivateTokenPoolSyncSuccess(t *testing.T) { Namespace: "ns1", Connector: "magic-tokens", } - ev := &fftypes.BlockchainEvent{} + info := fftypes.JSONObject{ + "some": "info", + } - mdm := am.data.(*datamocks.Manager) mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mth := am.txHelper.(*txcommonmocks.Helper) - mdm.On("VerifyNamespaceExists", context.Background(), "ns1").Return(nil) + mom := am.operations.(*operationmocks.Manager) mdi.On("InsertOperation", context.Background(), mock.MatchedBy(func(op *fftypes.Operation) bool { return op.Type == fftypes.OpTypeTokenActivatePool })).Return(nil) - mti.On("ActivateTokenPool", context.Background(), mock.Anything, pool, ev).Return(true, nil) - mth.On("WriteOperationSuccess", context.Background(), mock.Anything, mock.Anything) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(activatePoolData) + assert.Equal(t, info, data.BlockchainInfo) + return op.Type == fftypes.OpTypeTokenActivatePool && data.Pool == pool + })).Return(nil) - err := am.ActivateTokenPool(context.Background(), pool, ev) + err := am.ActivateTokenPool(context.Background(), pool, info) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestGetTokenPool(t *testing.T) { @@ -458,6 +523,8 @@ func TestGetTokenPool(t *testing.T) { mdi.On("GetTokenPool", context.Background(), "ns1", "abc").Return(&fftypes.TokenPool{}, nil) _, err := am.GetTokenPool(context.Background(), "ns1", "magic-tokens", "abc") assert.NoError(t, err) + + mdi.AssertExpectations(t) } func TestGetTokenPoolNotFound(t *testing.T) { @@ -468,6 +535,8 @@ func TestGetTokenPoolNotFound(t *testing.T) { mdi.On("GetTokenPool", context.Background(), "ns1", "abc").Return(nil, nil) _, err := am.GetTokenPool(context.Background(), "ns1", "magic-tokens", "abc") assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) } func TestGetTokenPoolFailed(t *testing.T) { @@ -478,6 +547,8 @@ func TestGetTokenPoolFailed(t *testing.T) { mdi.On("GetTokenPool", context.Background(), "ns1", "abc").Return(nil, fmt.Errorf("pop")) _, err := am.GetTokenPool(context.Background(), "ns1", "magic-tokens", "abc") assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) } func TestGetTokenPoolBadPlugin(t *testing.T) { @@ -513,6 +584,8 @@ func TestGetTokenPoolByID(t *testing.T) { mdi.On("GetTokenPoolByID", context.Background(), u).Return(&fftypes.TokenPool{}, nil) _, err := am.GetTokenPoolByNameOrID(context.Background(), "ns1", u.String()) assert.NoError(t, err) + + mdi.AssertExpectations(t) } func TestGetTokenPoolByIDBadNamespace(t *testing.T) { @@ -532,6 +605,8 @@ func TestGetTokenPoolByIDBadID(t *testing.T) { mdi.On("GetTokenPoolByID", context.Background(), u).Return(nil, fmt.Errorf("pop")) _, err := am.GetTokenPoolByNameOrID(context.Background(), "ns1", u.String()) assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) } func TestGetTokenPoolByIDNilPool(t *testing.T) { @@ -543,6 +618,8 @@ func TestGetTokenPoolByIDNilPool(t *testing.T) { mdi.On("GetTokenPoolByID", context.Background(), u).Return(nil, nil) _, err := am.GetTokenPoolByNameOrID(context.Background(), "ns1", u.String()) assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) } func TestGetTokenPoolByName(t *testing.T) { @@ -553,6 +630,8 @@ func TestGetTokenPoolByName(t *testing.T) { mdi.On("GetTokenPool", context.Background(), "ns1", "abc").Return(&fftypes.TokenPool{}, nil) _, err := am.GetTokenPoolByNameOrID(context.Background(), "ns1", "abc") assert.NoError(t, err) + + mdi.AssertExpectations(t) } func TestGetTokenPoolByNameBadName(t *testing.T) { @@ -571,6 +650,8 @@ func TestGetTokenPoolByNameNilPool(t *testing.T) { mdi.On("GetTokenPool", context.Background(), "ns1", "abc").Return(nil, fmt.Errorf("pop")) _, err := am.GetTokenPoolByNameOrID(context.Background(), "ns1", "abc") assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) } func TestGetTokenPools(t *testing.T) { @@ -584,6 +665,8 @@ func TestGetTokenPools(t *testing.T) { mdi.On("GetTokenPools", context.Background(), f).Return([]*fftypes.TokenPool{}, nil, nil) _, _, err := am.GetTokenPools(context.Background(), "ns1", f) assert.NoError(t, err) + + mdi.AssertExpectations(t) } func TestGetTokenPoolsBadNamespace(t *testing.T) { diff --git a/internal/assets/token_transfer.go b/internal/assets/token_transfer.go index 435d9172c6..31977e50dc 100644 --- a/internal/assets/token_transfer.go +++ b/internal/assets/token_transfer.go @@ -18,7 +18,6 @@ package assets import ( "context" - "fmt" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/sysmessaging" @@ -55,6 +54,7 @@ type transferSender struct { namespace string transfer *fftypes.TokenTransferInput resolved bool + msgSender sysmessaging.MessageSender } // sendMethod is the specific operation requested of the transferSender. @@ -190,14 +190,14 @@ func (s *transferSender) resolveAndSend(ctx context.Context, method sendMethod) return s.sendInternal(ctx, method) } -func (s *transferSender) resolve(ctx context.Context) error { +func (s *transferSender) resolve(ctx context.Context) (err error) { // Resolve the attached message if s.transfer.Message != nil { - sender, err := s.buildTransferMessage(ctx, s.namespace, s.transfer.Message) + s.msgSender, err = s.buildTransferMessage(ctx, s.namespace, s.transfer.Message) if err != nil { return err } - if err = sender.Prepare(ctx); err != nil { + if err = s.msgSender.Prepare(ctx); err != nil { return err } s.transfer.TokenTransfer.Message = s.transfer.Message.Header.ID @@ -224,8 +224,8 @@ func (s *transferSender) sendInternal(ctx context.Context, method sendMethod) er return nil } - var pool *fftypes.TokenPool var op *fftypes.Operation + var pool *fftypes.TokenPool err = s.mgr.database.RunAsGroup(ctx, func(ctx context.Context) (err error) { pool, err = s.mgr.GetTokenPoolByNameOrID(ctx, s.namespace, s.transfer.Pool) if err != nil { @@ -242,6 +242,7 @@ func (s *transferSender) sendInternal(ctx context.Context, method sendMethod) er s.transfer.TX.ID = txid s.transfer.TX.Type = fftypes.TransactionTypeTokenTransfer + s.transfer.TokenTransfer.Pool = pool.ID op = fftypes.NewOperation( plugin, @@ -255,31 +256,21 @@ func (s *transferSender) sendInternal(ctx context.Context, method sendMethod) er return err } - if s.transfer.Message != nil { - s.transfer.Message.State = fftypes.MessageStateStaged - err = s.mgr.database.UpsertMessage(ctx, &s.transfer.Message.Message, database.UpsertOptimizationNew) - } return err }) if err != nil { return err } - switch s.transfer.Type { - case fftypes.TokenTransferTypeMint: - err = plugin.MintTokens(ctx, op.ID, pool.ProtocolID, &s.transfer.TokenTransfer) - case fftypes.TokenTransferTypeTransfer: - err = plugin.TransferTokens(ctx, op.ID, pool.ProtocolID, &s.transfer.TokenTransfer) - case fftypes.TokenTransferTypeBurn: - err = plugin.BurnTokens(ctx, op.ID, pool.ProtocolID, &s.transfer.TokenTransfer) - default: - panic(fmt.Sprintf("unknown transfer type: %v", s.transfer.Type)) + // Write the transfer message outside of any DB transaction, as it will use the background message writer. + if s.transfer.Message != nil { + s.transfer.Message.State = fftypes.MessageStateStaged + if err = s.msgSender.Send(ctx); err != nil { + return err + } } - if err != nil { - s.mgr.txHelper.WriteOperationFailure(ctx, op.ID, err) - } - return err + return s.mgr.operations.RunOperation(ctx, opTransfer(op, pool, &s.transfer.TokenTransfer)) } func (s *transferSender) buildTransferMessage(ctx context.Context, ns string, in *fftypes.MessageInOut) (sysmessaging.MessageSender, error) { diff --git a/internal/assets/token_transfer_test.go b/internal/assets/token_transfer_test.go index b89c10749e..87949b0047 100644 --- a/internal/assets/token_transfer_test.go +++ b/internal/assets/token_transfer_test.go @@ -25,10 +25,10 @@ import ( "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" + "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/mocks/privatemessagingmocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" "github.com/hyperledger/firefly/mocks/sysmessagingmocks" - "github.com/hyperledger/firefly/mocks/tokenmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" @@ -47,6 +47,8 @@ func TestGetTokenTransfers(t *testing.T) { mdi.On("GetTokenTransfers", context.Background(), f).Return([]*fftypes.TokenTransfer{}, nil, nil) _, _, err := am.GetTokenTransfers(context.Background(), "ns1", f) assert.NoError(t, err) + + mdi.AssertExpectations(t) } func TestGetTokenTransferByID(t *testing.T) { @@ -58,6 +60,8 @@ func TestGetTokenTransferByID(t *testing.T) { mdi.On("GetTokenTransfer", context.Background(), u).Return(&fftypes.TokenTransfer{}, nil) _, err := am.GetTokenTransferByID(context.Background(), "ns1", u.String()) assert.NoError(t, err) + + mdi.AssertExpectations(t) } func TestGetTokenTransferByIDBadID(t *testing.T) { @@ -79,22 +83,29 @@ func TestMintTokensSuccess(t *testing.T) { Pool: "pool1", } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("MintTokens", context.Background(), mock.Anything, "F1", &mint.TokenTransfer).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &mint.TokenTransfer + })).Return(nil) _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestMintTokenUnknownConnectorSuccess(t *testing.T) { @@ -108,22 +119,29 @@ func TestMintTokenUnknownConnectorSuccess(t *testing.T) { Pool: "pool1", } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("MintTokens", context.Background(), mock.Anything, "F1", &mint.TokenTransfer).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &mint.TokenTransfer + })).Return(nil) _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestMintTokenUnknownConnectorNoConnectors(t *testing.T) { @@ -139,9 +157,6 @@ func TestMintTokenUnknownConnectorNoConnectors(t *testing.T) { am.tokens = make(map[string]tokens.Plugin) - mim := am.identity.(*identitymanagermocks.Manager) - mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.Regexp(t, "FF10292", err) } @@ -160,9 +175,6 @@ func TestMintTokenUnknownConnectorMultipleConnectors(t *testing.T) { am.tokens["magic-tokens"] = nil am.tokens["magic-tokens2"] = nil - mim := am.identity.(*identitymanagermocks.Manager) - mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.Regexp(t, "FF10292", err) } @@ -178,9 +190,6 @@ func TestMintTokenUnknownConnectorBadNamespace(t *testing.T) { Pool: "pool1", } - mim := am.identity.(*identitymanagermocks.Manager) - mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - _, err := am.MintTokens(context.Background(), "", mint, false) assert.Regexp(t, "FF10131", err) } @@ -202,6 +211,8 @@ func TestMintTokenBadConnector(t *testing.T) { _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.Regexp(t, "FF10272", err) + + mim.AssertExpectations(t) } func TestMintTokenUnknownPoolSuccess(t *testing.T) { @@ -215,17 +226,16 @@ func TestMintTokenUnknownPoolSuccess(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) fb := database.TokenPoolQueryFactory.NewFilter(context.Background()) f := fb.And() f.Limit(1).Count(true) tokenPools := []*fftypes.TokenPool{ { - Name: "pool1", - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + Name: "pool1", + State: fftypes.TokenPoolStateConfirmed, }, } totalCount := int64(1) @@ -238,12 +248,20 @@ func TestMintTokenUnknownPoolSuccess(t *testing.T) { return info.Count && info.Limit == 1 }))).Return(tokenPools, filterResult, nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(tokenPools[0], nil) - mti.On("MintTokens", context.Background(), mock.Anything, "F1", &mint.TokenTransfer).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == tokenPools[0] && data.Transfer == &mint.TokenTransfer + })).Return(nil) _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestMintTokenUnknownPoolNoPools(t *testing.T) { @@ -257,7 +275,6 @@ func TestMintTokenUnknownPoolNoPools(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) - mim := am.identity.(*identitymanagermocks.Manager) fb := database.TokenPoolQueryFactory.NewFilter(context.Background()) f := fb.And() f.Limit(1).Count(true) @@ -266,7 +283,6 @@ func TestMintTokenUnknownPoolNoPools(t *testing.T) { filterResult := &database.FilterResult{ TotalCount: &totalCount, } - mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPools", context.Background(), mock.MatchedBy((func(f database.AndFilter) bool { info, _ := f.Finalize() return info.Count && info.Limit == 1 @@ -274,6 +290,8 @@ func TestMintTokenUnknownPoolNoPools(t *testing.T) { _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.Regexp(t, "FF10292", err) + + mdi.AssertExpectations(t) } func TestMintTokenUnknownPoolMultiplePools(t *testing.T) { @@ -287,7 +305,6 @@ func TestMintTokenUnknownPoolMultiplePools(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) - mim := am.identity.(*identitymanagermocks.Manager) fb := database.TokenPoolQueryFactory.NewFilter(context.Background()) f := fb.And() f.Limit(1).Count(true) @@ -303,15 +320,15 @@ func TestMintTokenUnknownPoolMultiplePools(t *testing.T) { filterResult := &database.FilterResult{ TotalCount: &totalCount, } - mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPools", context.Background(), mock.MatchedBy((func(f database.AndFilter) bool { info, _ := f.Finalize() return info.Count && info.Limit == 1 }))).Return(tokenPools, filterResult, nil) - mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.Regexp(t, "FF10292", err) + + mdi.AssertExpectations(t) } func TestMintTokenUnknownPoolBadNamespace(t *testing.T) { @@ -324,9 +341,6 @@ func TestMintTokenUnknownPoolBadNamespace(t *testing.T) { }, } - mim := am.identity.(*identitymanagermocks.Manager) - mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - _, err := am.MintTokens(context.Background(), "", mint, false) assert.Regexp(t, "FF10131", err) } @@ -342,12 +356,12 @@ func TestMintTokensGetPoolsError(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) - mim := am.identity.(*identitymanagermocks.Manager) - mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPools", context.Background(), mock.Anything).Return(nil, nil, fmt.Errorf("pop")) _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) } func TestMintTokensBadPool(t *testing.T) { @@ -368,6 +382,9 @@ func TestMintTokensBadPool(t *testing.T) { _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) + mim.AssertExpectations(t) } func TestMintTokensIdentityFail(t *testing.T) { @@ -386,6 +403,8 @@ func TestMintTokensIdentityFail(t *testing.T) { _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.EqualError(t, err, "pop") + + mim.AssertExpectations(t) } func TestMintTokensFail(t *testing.T) { @@ -399,53 +418,29 @@ func TestMintTokensFail(t *testing.T) { Pool: "pool1", } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("MintTokens", context.Background(), mock.Anything, "F1", &mint.TokenTransfer).Return(fmt.Errorf("pop")) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) - mth.On("WriteOperationFailure", context.Background(), mock.Anything, fmt.Errorf("pop")) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &mint.TokenTransfer + })).Return(fmt.Errorf("pop")) _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.EqualError(t, err, "pop") -} - -func TestMintTokensFailAndDbFail(t *testing.T) { - am, cancel := newTestAssets(t) - defer cancel() - mint := &fftypes.TokenTransferInput{ - TokenTransfer: fftypes.TokenTransfer{ - Amount: *fftypes.NewFFBigInt(5), - }, - Pool: "pool1", - } - pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, - } - - mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) - mim := am.identity.(*identitymanagermocks.Manager) - mth := am.txHelper.(*txcommonmocks.Helper) - mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) - mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("MintTokens", context.Background(), mock.Anything, "F1", &mint.TokenTransfer).Return(fmt.Errorf("pop")) - mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) - mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) - mth.On("WriteOperationFailure", context.Background(), mock.Anything, fmt.Errorf("pop")) - - _, err := am.MintTokens(context.Background(), "ns1", mint, false) - assert.EqualError(t, err, "pop") + mdi.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestMintTokensOperationFail(t *testing.T) { @@ -473,6 +468,10 @@ func TestMintTokensOperationFail(t *testing.T) { _, err := am.MintTokens(context.Background(), "ns1", mint, false) assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) } func TestMintTokensConfirm(t *testing.T) { @@ -486,19 +485,17 @@ func TestMintTokensConfirm(t *testing.T) { Pool: "pool1", } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) mdm := am.data.(*datamocks.Manager) msa := am.syncasync.(*syncasyncmocks.Bridge) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("MintTokens", context.Background(), mock.Anything, "F1", &mint.TokenTransfer).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) msa.On("WaitForTokenTransfer", context.Background(), "ns1", mock.Anything, mock.Anything). @@ -507,6 +504,10 @@ func TestMintTokensConfirm(t *testing.T) { send(context.Background()) }). Return(&fftypes.TokenTransfer{}, nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &mint.TokenTransfer + })).Return(fmt.Errorf("pop")) _, err := am.MintTokens(context.Background(), "ns1", mint, true) assert.NoError(t, err) @@ -514,7 +515,7 @@ func TestMintTokensConfirm(t *testing.T) { mdi.AssertExpectations(t) mdm.AssertExpectations(t) msa.AssertExpectations(t) - mti.AssertExpectations(t) + mom.AssertExpectations(t) } func TestBurnTokensSuccess(t *testing.T) { @@ -528,26 +529,29 @@ func TestBurnTokensSuccess(t *testing.T) { Pool: "pool1", } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("BurnTokens", context.Background(), mock.Anything, "F1", &burn.TokenTransfer).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &burn.TokenTransfer + })).Return(nil) _, err := am.BurnTokens(context.Background(), "ns1", burn, false) assert.NoError(t, err) mim.AssertExpectations(t) mdi.AssertExpectations(t) - mti.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestBurnTokensIdentityFail(t *testing.T) { @@ -566,6 +570,8 @@ func TestBurnTokensIdentityFail(t *testing.T) { _, err := am.BurnTokens(context.Background(), "ns1", burn, false) assert.EqualError(t, err, "pop") + + mim.AssertExpectations(t) } func TestBurnTokensConfirm(t *testing.T) { @@ -579,19 +585,17 @@ func TestBurnTokensConfirm(t *testing.T) { Pool: "pool1", } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) mdm := am.data.(*datamocks.Manager) msa := am.syncasync.(*syncasyncmocks.Bridge) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("BurnTokens", context.Background(), mock.Anything, "F1", &burn.TokenTransfer).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) msa.On("WaitForTokenTransfer", context.Background(), "ns1", mock.Anything, mock.Anything). @@ -600,6 +604,10 @@ func TestBurnTokensConfirm(t *testing.T) { send(context.Background()) }). Return(&fftypes.TokenTransfer{}, nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &burn.TokenTransfer + })).Return(nil) _, err := am.BurnTokens(context.Background(), "ns1", burn, true) assert.NoError(t, err) @@ -607,7 +615,8 @@ func TestBurnTokensConfirm(t *testing.T) { mdi.AssertExpectations(t) mdm.AssertExpectations(t) msa.AssertExpectations(t) - mti.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestTransferTokensSuccess(t *testing.T) { @@ -623,26 +632,29 @@ func TestTransferTokensSuccess(t *testing.T) { Pool: "pool1", } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("TransferTokens", context.Background(), mock.Anything, "F1", &transfer.TokenTransfer).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &transfer.TokenTransfer + })).Return(nil) _, err := am.TransferTokens(context.Background(), "ns1", transfer, false) assert.NoError(t, err) mim.AssertExpectations(t) mdi.AssertExpectations(t) - mti.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestTransferTokensUnconfirmedPool(t *testing.T) { @@ -692,6 +704,8 @@ func TestTransferTokensIdentityFail(t *testing.T) { _, err := am.TransferTokens(context.Background(), "ns1", transfer, false) assert.EqualError(t, err, "pop") + + mim.AssertExpectations(t) } func TestTransferTokensNoFromOrTo(t *testing.T) { @@ -711,16 +725,15 @@ func TestTransferTokensNoFromOrTo(t *testing.T) { mim.AssertExpectations(t) } -func TestTransferTokensInvalidType(t *testing.T) { +func TestTransferTokensTransactionFail(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() transfer := &fftypes.TokenTransferInput{ TokenTransfer: fftypes.TokenTransfer{ - From: "A", - To: "B", - Connector: "magic-tokens", - Amount: *fftypes.NewFFBigInt(5), + From: "A", + To: "B", + Amount: *fftypes.NewFFBigInt(5), }, Pool: "pool1", } @@ -730,25 +743,26 @@ func TestTransferTokensInvalidType(t *testing.T) { } mdi := am.database.(*databasemocks.Plugin) + mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) - mdi.On("GetTokenPool", am.ctx, "ns1", "pool1").Return(pool, nil) - mth.On("SubmitNewTransaction", am.ctx, "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) - mdi.On("InsertOperation", am.ctx, mock.Anything).Return(nil) - - sender := &transferSender{ - mgr: am, - namespace: "ns1", - transfer: transfer, - } - assert.PanicsWithValue(t, "unknown transfer type: ", func() { - sender.Send(am.ctx) - }) + mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) + mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) + mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(nil, fmt.Errorf("pop")) + + _, err := am.TransferTokens(context.Background(), "ns1", transfer, false) + assert.EqualError(t, err, "pop") + + mim.AssertExpectations(t) + mdi.AssertExpectations(t) + mth.AssertExpectations(t) } -func TestTransferTokensTransactionFail(t *testing.T) { +func TestTransferTokensWithBroadcastMessage(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() + msgID := fftypes.NewUUID() + hash := fftypes.NewRandB32() transfer := &fftypes.TokenTransferInput{ TokenTransfer: fftypes.TokenTransfer{ From: "A", @@ -756,27 +770,56 @@ func TestTransferTokensTransactionFail(t *testing.T) { Amount: *fftypes.NewFFBigInt(5), }, Pool: "pool1", + Message: &fftypes.MessageInOut{ + Message: fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: msgID, + }, + Hash: hash, + }, + InlineData: fftypes.InlineData{ + { + Value: fftypes.JSONAnyPtr("test data"), + }, + }, + }, } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) + mbm := am.broadcast.(*broadcastmocks.Manager) + mms := &sysmessagingmocks.MessageSender{} mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(nil, fmt.Errorf("pop")) + mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) + mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) + mbm.On("NewBroadcast", "ns1", transfer.Message).Return(mms) + mms.On("Prepare", context.Background()).Return(nil) + mms.On("Send", context.Background()).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &transfer.TokenTransfer + })).Return(nil) _, err := am.TransferTokens(context.Background(), "ns1", transfer, false) - assert.EqualError(t, err, "pop") + assert.NoError(t, err) + assert.Equal(t, *msgID, *transfer.TokenTransfer.Message) + assert.Equal(t, *hash, *transfer.TokenTransfer.MessageHash) + mbm.AssertExpectations(t) mim.AssertExpectations(t) mdi.AssertExpectations(t) + mms.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } -func TestTransferTokensWithBroadcastMessage(t *testing.T) { +func TestTransferTokensWithBroadcastMessageSendFail(t *testing.T) { am, cancel := newTestAssets(t) defer cancel() @@ -804,37 +847,34 @@ func TestTransferTokensWithBroadcastMessage(t *testing.T) { }, } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mbm := am.broadcast.(*broadcastmocks.Manager) mms := &sysmessagingmocks.MessageSender{} mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("TransferTokens", context.Background(), mock.Anything, "F1", &transfer.TokenTransfer).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) mbm.On("NewBroadcast", "ns1", transfer.Message).Return(mms) mms.On("Prepare", context.Background()).Return(nil) - mdi.On("UpsertMessage", context.Background(), mock.MatchedBy(func(msg *fftypes.Message) bool { - return msg.State == fftypes.MessageStateStaged - }), database.UpsertOptimizationNew).Return(nil) + mms.On("Send", context.Background()).Return(fmt.Errorf("pop")) _, err := am.TransferTokens(context.Background(), "ns1", transfer, false) - assert.NoError(t, err) + assert.Regexp(t, "pop", err) assert.Equal(t, *msgID, *transfer.TokenTransfer.Message) assert.Equal(t, *hash, *transfer.TokenTransfer.MessageHash) mbm.AssertExpectations(t) mim.AssertExpectations(t) mdi.AssertExpectations(t) - mti.AssertExpectations(t) mms.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestTransferTokensWithBroadcastPrepareFail(t *testing.T) { @@ -901,26 +941,26 @@ func TestTransferTokensWithPrivateMessage(t *testing.T) { }, } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mpm := am.messaging.(*privatemessagingmocks.Manager) mms := &sysmessagingmocks.MessageSender{} mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("TransferTokens", context.Background(), mock.Anything, "F1", &transfer.TokenTransfer).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) mpm.On("NewMessage", "ns1", transfer.Message).Return(mms) mms.On("Prepare", context.Background()).Return(nil) - mdi.On("UpsertMessage", context.Background(), mock.MatchedBy(func(msg *fftypes.Message) bool { - return msg.State == fftypes.MessageStateStaged - }), database.UpsertOptimizationNew).Return(nil) + mms.On("Send", context.Background()).Return(nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &transfer.TokenTransfer + })).Return(nil) _, err := am.TransferTokens(context.Background(), "ns1", transfer, false) assert.NoError(t, err) @@ -930,8 +970,9 @@ func TestTransferTokensWithPrivateMessage(t *testing.T) { mpm.AssertExpectations(t) mim.AssertExpectations(t) mdi.AssertExpectations(t) - mti.AssertExpectations(t) mms.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestTransferTokensWithInvalidMessage(t *testing.T) { @@ -981,19 +1022,17 @@ func TestTransferTokensConfirm(t *testing.T) { Pool: "pool1", } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) mdm := am.data.(*datamocks.Manager) msa := am.syncasync.(*syncasyncmocks.Bridge) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("TransferTokens", context.Background(), mock.Anything, "F1", &transfer.TokenTransfer).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) msa.On("WaitForTokenTransfer", context.Background(), "ns1", mock.Anything, mock.Anything). @@ -1002,6 +1041,10 @@ func TestTransferTokensConfirm(t *testing.T) { send(context.Background()) }). Return(&fftypes.TokenTransfer{}, nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &transfer.TokenTransfer + })).Return(nil) _, err := am.TransferTokens(context.Background(), "ns1", transfer, true) assert.NoError(t, err) @@ -1009,7 +1052,9 @@ func TestTransferTokensConfirm(t *testing.T) { mdi.AssertExpectations(t) mdm.AssertExpectations(t) msa.AssertExpectations(t) - mti.AssertExpectations(t) + mim.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestTransferTokensWithBroadcastConfirm(t *testing.T) { @@ -1040,27 +1085,23 @@ func TestTransferTokensWithBroadcastConfirm(t *testing.T) { }, } pool := &fftypes.TokenPool{ - ProtocolID: "F1", - State: fftypes.TokenPoolStateConfirmed, + State: fftypes.TokenPoolStateConfirmed, } mdi := am.database.(*databasemocks.Plugin) - mti := am.tokens["magic-tokens"].(*tokenmocks.Plugin) mim := am.identity.(*identitymanagermocks.Manager) mbm := am.broadcast.(*broadcastmocks.Manager) mms := &sysmessagingmocks.MessageSender{} msa := am.syncasync.(*syncasyncmocks.Bridge) mth := am.txHelper.(*txcommonmocks.Helper) + mom := am.operations.(*operationmocks.Manager) mim.On("NormalizeSigningKey", context.Background(), "", identity.KeyNormalizationBlockchainPlugin).Return("0x12345", nil) mdi.On("GetTokenPool", context.Background(), "ns1", "pool1").Return(pool, nil) - mti.On("TransferTokens", context.Background(), mock.Anything, "F1", &transfer.TokenTransfer).Return(nil) mdi.On("InsertOperation", context.Background(), mock.Anything).Return(nil) mth.On("SubmitNewTransaction", context.Background(), "ns1", fftypes.TransactionTypeTokenTransfer).Return(fftypes.NewUUID(), nil) mbm.On("NewBroadcast", "ns1", transfer.Message).Return(mms) mms.On("Prepare", context.Background()).Return(nil) - mdi.On("UpsertMessage", context.Background(), mock.MatchedBy(func(msg *fftypes.Message) bool { - return msg.State == fftypes.MessageStateStaged - }), database.UpsertOptimizationNew).Return(nil) + mms.On("Send", context.Background()).Return(nil) msa.On("WaitForMessage", context.Background(), "ns1", mock.Anything, mock.Anything). Run(func(args mock.Arguments) { send := args[3].(syncasync.RequestSender) @@ -1073,6 +1114,10 @@ func TestTransferTokensWithBroadcastConfirm(t *testing.T) { send(context.Background()) }). Return(&transfer.TokenTransfer, nil) + mom.On("RunOperation", context.Background(), mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferData) + return op.Type == fftypes.OpTypeTokenTransfer && data.Pool == pool && data.Transfer == &transfer.TokenTransfer + })).Return(nil) _, err := am.TransferTokens(context.Background(), "ns1", transfer, true) assert.NoError(t, err) @@ -1082,9 +1127,9 @@ func TestTransferTokensWithBroadcastConfirm(t *testing.T) { mbm.AssertExpectations(t) mim.AssertExpectations(t) mdi.AssertExpectations(t) - mti.AssertExpectations(t) mms.AssertExpectations(t) msa.AssertExpectations(t) + mom.AssertExpectations(t) } func TestTransferTokensPoolNotFound(t *testing.T) { diff --git a/internal/batch/batch_manager.go b/internal/batch/batch_manager.go index 239f5cc277..a1dd9d37f9 100644 --- a/internal/batch/batch_manager.go +++ b/internal/batch/batch_manager.go @@ -28,11 +28,12 @@ import ( "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/retry" "github.com/hyperledger/firefly/internal/sysmessaging" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" ) -func NewBatchManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, dm data.Manager) (Manager, error) { +func NewBatchManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, dm data.Manager, txHelper txcommon.Helper) (Manager, error) { if di == nil || dm == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } @@ -44,12 +45,16 @@ func NewBatchManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, di data ni: ni, database: di, data: dm, + txHelper: txHelper, readOffset: -1, // On restart we trawl for all ready messages readPageSize: uint64(readPageSize), messagePollTimeout: config.GetDuration(config.BatchManagerReadPollTimeout), startupOffsetRetryAttempts: config.GetInt(config.OrchestratorStartupAttempts), - dispatchers: make(map[string]*dispatcher), - newMessages: make(chan int64, 1), + dispatcherMap: make(map[string]*dispatcher), + allDispatchers: make([]*dispatcher, 0), + newMessages: make(chan int64, readPageSize), + shoulderTap: make(chan bool, 1), + rewindOffset: -1, done: make(chan struct{}), retry: &retry.Retry{ InitialDelay: config.GetDuration(config.BatchRetryInitDelay), @@ -85,20 +90,26 @@ type batchManager struct { ni sysmessaging.LocalNodeInfo database database.Plugin data data.Manager + txHelper txcommon.Helper dispatcherMux sync.Mutex - dispatchers map[string]*dispatcher + dispatcherMap map[string]*dispatcher + allDispatchers []*dispatcher newMessages chan int64 done chan struct{} retry *retry.Retry readOffset int64 + rewindOffsetMux sync.Mutex + rewindOffset int64 + shoulderTap chan bool readPageSize uint64 messagePollTimeout time.Duration startupOffsetRetryAttempts int } -type DispatchHandler func(context.Context, *fftypes.Batch, []*fftypes.Bytes32) error +type DispatchHandler func(context.Context, *DispatchState) error type DispatcherOptions struct { + BatchType fftypes.BatchType BatchMaxSize uint BatchMaxBytes int64 BatchTimeout time.Duration @@ -121,19 +132,25 @@ func (bm *batchManager) getDispatcherKey(txType fftypes.TransactionType, msgType } func (bm *batchManager) RegisterDispatcher(name string, txType fftypes.TransactionType, msgTypes []fftypes.MessageType, handler DispatchHandler, options DispatcherOptions) { + bm.dispatcherMux.Lock() + defer bm.dispatcherMux.Unlock() + dispatcher := &dispatcher{ name: name, handler: handler, options: options, processors: make(map[string]*batchProcessor), } + bm.allDispatchers = append(bm.allDispatchers, dispatcher) for _, msgType := range msgTypes { - bm.dispatchers[bm.getDispatcherKey(txType, msgType)] = dispatcher + bm.dispatcherMap[bm.getDispatcherKey(txType, msgType)] = dispatcher } } func (bm *batchManager) Start() error { go bm.messageSequencer() + // We must be always ready to process DB events, or we block commits. So we have a dedicated worker for that + go bm.newMessageNotifier() return nil } @@ -141,68 +158,79 @@ func (bm *batchManager) NewMessages() chan<- int64 { return bm.newMessages } -func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fftypes.MessageType, group *fftypes.Bytes32, namespace string, identity *fftypes.SignerRef) (*batchProcessor, error) { +func (bm *batchManager) getProcessor(txType fftypes.TransactionType, msgType fftypes.MessageType, group *fftypes.Bytes32, namespace string, signer *fftypes.SignerRef) (*batchProcessor, error) { bm.dispatcherMux.Lock() defer bm.dispatcherMux.Unlock() dispatcherKey := bm.getDispatcherKey(txType, msgType) - dispatcher, ok := bm.dispatchers[dispatcherKey] + dispatcher, ok := bm.dispatcherMap[dispatcherKey] if !ok { return nil, i18n.NewError(bm.ctx, i18n.MsgUnregisteredBatchType, dispatcherKey) } - name := bm.getProcessorKey(namespace, identity, group) + name := bm.getProcessorKey(namespace, signer, group) processor, ok := dispatcher.processors[name] if !ok { processor = newBatchProcessor( bm.ctx, // Background context, not the call context bm.ni, bm.database, + bm.data, &batchProcessorConf{ DispatcherOptions: dispatcher.options, name: name, txType: txType, dispatcherName: dispatcher.name, namespace: namespace, - identity: *identity, + signer: *signer, group: group, dispatch: dispatcher.handler, }, bm.retry, + bm.txHelper, ) dispatcher.processors[name] = processor + log.L(bm.ctx).Debugf("Created new processor: %s", name) } - log.L(bm.ctx).Debugf("Created new processor: %s", name) return processor, nil } -func (bm *batchManager) assembleMessageData(msg *fftypes.Message) (data []*fftypes.Data, err error) { +func (bm *batchManager) assembleMessageData(id *fftypes.UUID) (msg *fftypes.Message, retData fftypes.DataArray, err error) { var foundAll = false - err = bm.retry.Do(bm.ctx, fmt.Sprintf("assemble message %s data", msg.Header.ID), func(attempt int) (retry bool, err error) { - data, foundAll, err = bm.data.GetMessageData(bm.ctx, msg, true) + err = bm.retry.Do(bm.ctx, "retrieve message", func(attempt int) (retry bool, err error) { + msg, retData, foundAll, err = bm.data.GetMessageWithDataCached(bm.ctx, id) // continual retry for persistence error (distinct from not-found) return true, err }) if err != nil { - return nil, err + return nil, nil, err } if !foundAll { - return nil, i18n.NewError(bm.ctx, i18n.MsgDataNotFound, msg.Header.ID) + return nil, nil, i18n.NewError(bm.ctx, i18n.MsgDataNotFound, id) } - return data, nil + return msg, retData, nil } -func (bm *batchManager) readPage() ([]*fftypes.Message, error) { +func (bm *batchManager) readPage() ([]*fftypes.IDAndSequence, error) { + + // Pop out a rewind offset if there is one and it's behind the cursor + bm.rewindOffsetMux.Lock() + rewindOffset := bm.rewindOffset + if rewindOffset >= 0 && rewindOffset < bm.readOffset { + bm.readOffset = rewindOffset + } + bm.rewindOffset = -1 + bm.rewindOffsetMux.Unlock() - var msgs []*fftypes.Message + var ids []*fftypes.IDAndSequence err := bm.retry.Do(bm.ctx, "retrieve messages", func(attempt int) (retry bool, err error) { fb := database.MessageQueryFactory.NewFilterLimit(bm.ctx, bm.readPageSize) - msgs, _, err = bm.database.GetMessages(bm.ctx, fb.And( + ids, err = bm.database.GetMessageIDs(bm.ctx, fb.And( fb.Gt("sequence", bm.readOffset), fb.Eq("state", fftypes.MessageStateReady), ).Sort("sequence").Limit(bm.readPageSize)) return true, err }) - return msgs, err + return ids, err } func (bm *batchManager) messageSequencer() { @@ -215,34 +243,40 @@ func (bm *batchManager) messageSequencer() { bm.reapQuiescing() // Read messages from the DB - in an error condition we retry until success, or a closed context - msgs, err := bm.readPage() + entries, err := bm.readPage() if err != nil { l.Debugf("Exiting: %s", err) return } - batchWasFull := (uint64(len(msgs)) == bm.readPageSize) + batchWasFull := (uint64(len(entries)) == bm.readPageSize) - if len(msgs) > 0 { - for _, msg := range msgs { - data, err := bm.assembleMessageData(msg) + if len(entries) > 0 { + for _, entry := range entries { + msg, data, err := bm.assembleMessageData(&entry.ID) if err != nil { - l.Errorf("Failed to retrieve message data for %s: %s", msg.Header.ID, err) + l.Errorf("Failed to retrieve message data for %s (seq=%d): %s", entry.ID, entry.Sequence, err) continue } - err = bm.dispatchMessage(msg, data...) + // We likely retrieved this message from the cache, which is written by the message-writer before + // the database store. Meaning we cannot rely on the sequence having been set. + msg.Sequence = entry.Sequence + + processor, err := bm.getProcessor(msg.Header.TxType, msg.Header.Type, msg.Header.Group, msg.Header.Namespace, &msg.Header.SignerRef) if err != nil { l.Errorf("Failed to dispatch message %s: %s", msg.Header.ID, err) continue } + + bm.dispatchMessage(processor, msg, data) } // Next time round only read after the messages we just processed (unless we get a tap to rewind) - bm.readOffset = msgs[len(msgs)-1].Sequence + bm.readOffset = entries[len(entries)-1].Sequence } // Wait to be woken again - if !batchWasFull && !bm.drainNewMessages() { + if !batchWasFull { if done := bm.waitForNewMessages(); done { l.Debugf("Exiting: %s", err) return @@ -252,39 +286,41 @@ func (bm *batchManager) messageSequencer() { } func (bm *batchManager) newMessageNotification(seq int64) { - log.L(bm.ctx).Debugf("Notification of message %d", seq) - // The readOffset is the last sequence we have already read. - // So we need to ensure it is at least one earlier, than this message sequence + // Determine if we need to queue q rewind + bm.rewindOffsetMux.Lock() lastSequenceBeforeMsg := seq - 1 - if lastSequenceBeforeMsg < bm.readOffset { - bm.readOffset = lastSequenceBeforeMsg + if bm.rewindOffset == -1 || lastSequenceBeforeMsg < bm.rewindOffset { + bm.rewindOffset = lastSequenceBeforeMsg + } + bm.rewindOffsetMux.Unlock() + // Shoulder tap that there is a new message, regardless of whether we rewound + // the cursor. As we need to wake up the poll. + select { + case bm.shoulderTap <- true: + default: } } -func (bm *batchManager) drainNewMessages() bool { - // Drain any new message notifications, moving back our readOffset as required - newMessages := false - checkingMessages := true - for checkingMessages { +func (bm *batchManager) newMessageNotifier() { + l := log.L(bm.ctx) + for { select { case seq := <-bm.newMessages: bm.newMessageNotification(seq) - newMessages = true - default: - checkingMessages = false + case <-bm.ctx.Done(): + l.Debugf("Exiting due to cancelled context") + return } } - return newMessages } func (bm *batchManager) waitForNewMessages() (done bool) { l := log.L(bm.ctx) - // Otherwise set a timeout timeout := time.NewTimer(bm.messagePollTimeout) select { - case seq := <-bm.newMessages: - bm.newMessageNotification(seq) + case <-bm.shoulderTap: + timeout.Stop() return false case <-timeout.C: l.Debugf("Woken after poll timeout") @@ -295,25 +331,20 @@ func (bm *batchManager) waitForNewMessages() (done bool) { } } -func (bm *batchManager) dispatchMessage(msg *fftypes.Message, data ...*fftypes.Data) error { +func (bm *batchManager) dispatchMessage(processor *batchProcessor, msg *fftypes.Message, data fftypes.DataArray) { l := log.L(bm.ctx) - processor, err := bm.getProcessor(msg.Header.TxType, msg.Header.Type, msg.Header.Group, msg.Header.Namespace, &msg.Header.SignerRef) - if err != nil { - return err - } - l.Debugf("Dispatching message %s to %s batch processor %s", msg.Header.ID, msg.Header.Type, processor.conf.name) + l.Debugf("Dispatching message %s (seq=%d) to %s batch processor %s", msg.Header.ID, msg.Sequence, msg.Header.Type, processor.conf.name) work := &batchWork{ msg: msg, data: data, } processor.newWork <- work - return nil } func (bm *batchManager) reapQuiescing() { bm.dispatcherMux.Lock() var reaped []*batchProcessor - for _, d := range bm.dispatchers { + for _, d := range bm.allDispatchers { for k, p := range d.processors { select { case <-p.quescing: @@ -341,7 +372,7 @@ func (bm *batchManager) getProcessors() []*batchProcessor { defer bm.dispatcherMux.Unlock() var processors []*batchProcessor - for _, d := range bm.dispatchers { + for _, d := range bm.allDispatchers { for _, p := range d.processors { processors = append(processors, p) } diff --git a/internal/batch/batch_manager_test.go b/internal/batch/batch_manager_test.go index a37ac73801..5eccb47cbe 100644 --- a/internal/batch/batch_manager_test.go +++ b/internal/batch/batch_manager_test.go @@ -24,6 +24,7 @@ import ( "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/sysmessagingmocks" @@ -33,29 +34,44 @@ import ( "github.com/stretchr/testify/mock" ) -func TestE2EDispatchBroadcast(t *testing.T) { - log.SetLevel("debug") +func testConfigReset() { config.Reset() + log.SetLevel("debug") +} + +func newTestBatchManager(t *testing.T) (*batchManager, func()) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + mni := &sysmessagingmocks.LocalNodeInfo{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, err := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) + assert.NoError(t, err) + return bm.(*batchManager), bm.(*batchManager).cancelCtx +} + +func TestE2EDispatchBroadcast(t *testing.T) { + testConfigReset() mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) readyForDispatch := make(chan bool) - waitForDispatch := make(chan *fftypes.Batch) - handler := func(ctx context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + waitForDispatch := make(chan *DispatchState) + handler := func(ctx context.Context, state *DispatchState) error { _, ok := <-readyForDispatch if !ok { return nil } - assert.Len(t, s, 2) + assert.Len(t, state.Pins, 2) h := sha256.New() nonceBytes, _ := hex.DecodeString( "746f70696331", /*| topic1 | */ ) // little endian 12345 in 8 byte hex h.Write(nonceBytes) - assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), s[0].String()) + assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), state.Pins[0].String()) h = sha256.New() nonceBytes, _ = hex.DecodeString( @@ -63,13 +79,13 @@ func TestE2EDispatchBroadcast(t *testing.T) { /*| topic2 | */ ) // little endian 12345 in 8 byte hex h.Write(nonceBytes) - assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), s[1].String()) + assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), state.Pins[1].String()) - waitForDispatch <- b + waitForDispatch <- state return nil } ctx, cancel := context.WithCancel(context.Background()) - bmi, _ := NewBatchManager(ctx, mni, mdi, mdm) + bmi, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) bm := bmi.(*batchManager) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, handler, DispatcherOptions{ @@ -97,11 +113,13 @@ func TestE2EDispatchBroadcast(t *testing.T) { ID: dataID1, Hash: dataHash, } - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{data}, true, nil) - mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() - mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(msg, fftypes.DataArray{data}, true, nil) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() + mdi.On("GetMessageIDs", mock.Anything, mock.Anything).Return([]*fftypes.IDAndSequence{{ID: *msg.Header.ID}}, nil).Once() + mdi.On("GetMessageIDs", mock.Anything, mock.Anything).Return([]*fftypes.IDAndSequence{}, nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) + mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) // pins rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything, mock.Anything).Return(nil) rag.RunFn = func(a mock.Arguments) { ctx := a.Get(0).(context.Context) @@ -129,8 +147,8 @@ func TestE2EDispatchBroadcast(t *testing.T) { assert.NotNil(t, status.Processors[0].Status.Flushing) b := <-waitForDispatch - assert.Equal(t, *msg.Header.ID, *b.Payload.Messages[0].Header.ID) - assert.Equal(t, *data.ID, *b.Payload.Data[0].ID) + assert.Equal(t, *msg.Header.ID, *b.Messages[0].Header.ID) + assert.Equal(t, *data.ID, *b.Data[0].ID) close(readyForDispatch) @@ -146,23 +164,23 @@ func TestE2EDispatchBroadcast(t *testing.T) { } func TestE2EDispatchPrivateUnpinned(t *testing.T) { - log.SetLevel("debug") - config.Reset() + testConfigReset() mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) readyForDispatch := make(chan bool) - waitForDispatch := make(chan *fftypes.Batch) + waitForDispatch := make(chan *DispatchState) var groupID fftypes.Bytes32 _ = groupID.UnmarshalText([]byte("44dc0861e69d9bab17dd5e90a8898c2ea156ad04e5fabf83119cc010486e6c1b")) - handler := func(ctx context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + handler := func(ctx context.Context, state *DispatchState) error { _, ok := <-readyForDispatch if !ok { return nil } - assert.Len(t, s, 2) + assert.Len(t, state.Pins, 2) h := sha256.New() nonceBytes, _ := hex.DecodeString( "746f70696331" + "44dc0861e69d9bab17dd5e90a8898c2ea156ad04e5fabf83119cc010486e6c1b" + "6469643a66697265666c793a6f72672f61626364" + "0000000000003039", @@ -170,7 +188,7 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { /*| context | | sender + nonce */ ) // little endian 12345 in 8 byte hex h.Write(nonceBytes) - assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), s[0].String()) + assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), state.Pins[0].String()) h = sha256.New() nonceBytes, _ = hex.DecodeString( @@ -179,12 +197,12 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { /*| context | | sender + nonce */ ) // little endian 12345 in 8 byte hex h.Write(nonceBytes) - assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), s[1].String()) - waitForDispatch <- b + assert.Equal(t, hex.EncodeToString(h.Sum([]byte{})), state.Pins[1].String()) + waitForDispatch <- state return nil } ctx, cancel := context.WithCancel(context.Background()) - bmi, _ := NewBatchManager(ctx, mni, mdi, mdm) + bmi, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) bm := bmi.(*batchManager) bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypePrivate}, handler, DispatcherOptions{ @@ -213,9 +231,11 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { ID: dataID1, Hash: dataHash, } - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{data}, true, nil) - mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() - mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(msg, fftypes.DataArray{data}, true, nil) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() + mdi.On("GetMessageIDs", mock.Anything, mock.Anything).Return([]*fftypes.IDAndSequence{{ID: *msg.Header.ID}}, nil).Once() + mdi.On("GetMessageIDs", mock.Anything, mock.Anything).Return([]*fftypes.IDAndSequence{}, nil) + mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) // pins mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -246,8 +266,8 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { readyForDispatch <- true b := <-waitForDispatch - assert.Equal(t, *msg.Header.ID, *b.Payload.Messages[0].Header.ID) - assert.Equal(t, *data.ID, *b.Payload.Data[0].ID) + assert.Equal(t, *msg.Header.ID, *b.Messages[0].Header.ID) + assert.Equal(t, *data.ID, *b.Data[0].ID) // Wait until everything closes close(readyForDispatch) @@ -257,19 +277,23 @@ func TestE2EDispatchPrivateUnpinned(t *testing.T) { } func TestDispatchUnknownType(t *testing.T) { - log.SetLevel("debug") - config.Reset() + testConfigReset() mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) ctx, cancel := context.WithCancel(context.Background()) - bmi, _ := NewBatchManager(ctx, mni, mdi, mdm) + bmi, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) bm := bmi.(*batchManager) - msg := &fftypes.Message{} - mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil).Once() - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + }, + } + mdi.On("GetMessageIDs", mock.Anything, mock.Anything).Return([]*fftypes.IDAndSequence{{ID: *msg.Header.ID}}, nil).Once() + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(msg, fftypes.DataArray{}, true, nil) err := bm.Start() assert.NoError(t, err) @@ -280,7 +304,7 @@ func TestDispatchUnknownType(t *testing.T) { } func TestInitFailNoPersistence(t *testing.T) { - _, err := NewBatchManager(context.Background(), nil, nil, nil) + _, err := NewBatchManager(context.Background(), nil, nil, nil, nil) assert.Error(t, err) } @@ -289,10 +313,10 @@ func TestGetInvalidBatchTypeMsg(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) defer bm.Close() - msg := &fftypes.Message{Header: fftypes.MessageHeader{}} - err := bm.(*batchManager).dispatchMessage(msg) + _, err := bm.(*batchManager).getProcessor(fftypes.BatchTypeBroadcast, "wrong", nil, "ns1", &fftypes.SignerRef{}) assert.Regexp(t, "FF10126", err) } @@ -300,8 +324,9 @@ func TestMessageSequencerCancelledContext(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + mdi.On("GetMessageIDs", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")).Once() + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) defer bm.Close() ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -314,26 +339,35 @@ func TestMessageSequencerMissingMessageData(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) + bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeNone, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, + func(c context.Context, state *DispatchState) error { + return nil + }, + DispatcherOptions{BatchType: fftypes.BatchTypeBroadcast}, + ) dataID := fftypes.NewUUID() - mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything). - Return([]*fftypes.Message{ - { - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - Namespace: "ns1", - }, - Data: []*fftypes.DataRef{ - {ID: dataID}, - }}, - }, nil, nil). + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.MessageTypeBroadcast, + Namespace: "ns1", + TxType: fftypes.TransactionTypeNone, + }, + Data: []*fftypes.DataRef{ + {ID: dataID}, + }} + + mdi.On("GetMessageIDs", mock.Anything, mock.Anything, mock.Anything). + Return([]*fftypes.IDAndSequence{{ID: *msg.Header.ID}}, nil, nil). Run(func(args mock.Arguments) { bm.Close() }). Once() - mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Message{}, nil, nil) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, false, nil) + mdi.On("GetMessageIDs", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.IDAndSequence{}, nil, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(msg, fftypes.DataArray{}, false, nil) bm.(*batchManager).messageSequencer() @@ -347,27 +381,32 @@ func TestMessageSequencerUpdateMessagesFail(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) ctx, cancelCtx := context.WithCancel(context.Background()) - bm, _ := NewBatchManager(ctx, mni, mdi, mdm) - bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - return nil - }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}) + bm, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) + bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, + func(c context.Context, state *DispatchState) error { + return nil + }, + DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}, + ) dataID := fftypes.NewUUID() - mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Message{ - { - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - TxType: fftypes.TransactionTypeBatchPin, - Type: fftypes.MessageTypeBroadcast, - Namespace: "ns1", - }, - Data: []*fftypes.DataRef{ - {ID: dataID}, - }}, - }, nil, nil) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{{ID: dataID}}, true, nil) + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + TxType: fftypes.TransactionTypeBatchPin, + Type: fftypes.MessageTypeBroadcast, + Namespace: "ns1", + }, + Data: []*fftypes.DataRef{ + {ID: dataID}, + }, + } + mdi.On("GetMessageIDs", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.IDAndSequence{{ID: *msg.Header.ID}}, nil, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(msg, fftypes.DataArray{{ID: dataID}}, true, nil) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() mdi.On("InsertTransaction", mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) // transaction submit mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -398,27 +437,30 @@ func TestMessageSequencerDispatchFail(t *testing.T) { mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) ctx, cancelCtx := context.WithCancel(context.Background()) - bm, _ := NewBatchManager(ctx, mni, mdi, mdm) - bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - cancelCtx() - return fmt.Errorf("fizzle") - }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}) + bm, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) + bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, + func(c context.Context, state *DispatchState) error { + cancelCtx() + return fmt.Errorf("fizzle") + }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}, + ) dataID := fftypes.NewUUID() - mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Message{ - { - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - TxType: fftypes.TransactionTypeBatchPin, - Type: fftypes.MessageTypeBroadcast, - Namespace: "ns1", - }, - Data: []*fftypes.DataRef{ - {ID: dataID}, - }}, - }, nil, nil) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{{ID: dataID}}, true, nil) + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + TxType: fftypes.TransactionTypeBatchPin, + Type: fftypes.MessageTypeBroadcast, + Namespace: "ns1", + }, + Data: []*fftypes.DataRef{ + {ID: dataID}, + }, + } + mdi.On("GetMessageIDs", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.IDAndSequence{{ID: *msg.Header.ID}}, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(msg, fftypes.DataArray{{ID: dataID}}, true, nil) mdi.On("RunAsGroup", mock.Anything, mock.Anything, mock.Anything).Return(nil) bm.(*batchManager).messageSequencer() @@ -429,31 +471,36 @@ func TestMessageSequencerDispatchFail(t *testing.T) { mdi.AssertExpectations(t) mdm.AssertExpectations(t) } + func TestMessageSequencerUpdateBatchFail(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()) ctx, cancelCtx := context.WithCancel(context.Background()) - bm, _ := NewBatchManager(ctx, mni, mdi, mdm) - bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - return nil - }, DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(ctx, mni, mdi, mdm, txHelper) + bm.RegisterDispatcher("utdispatcher", fftypes.TransactionTypeBatchPin, []fftypes.MessageType{fftypes.MessageTypeBroadcast}, + func(c context.Context, state *DispatchState) error { + return nil + }, + DispatcherOptions{BatchMaxSize: 1, DisposeTimeout: 0}, + ) dataID := fftypes.NewUUID() - mdi.On("GetMessages", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Message{ - { - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - TxType: fftypes.TransactionTypeBatchPin, - Type: fftypes.MessageTypeBroadcast, - Namespace: "ns1", - }, - Data: []*fftypes.DataRef{ - {ID: dataID}, - }}, - }, nil, nil) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{{ID: dataID}}, true, nil) + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + TxType: fftypes.TransactionTypeBatchPin, + Type: fftypes.MessageTypeBroadcast, + Namespace: "ns1", + }, + Data: []*fftypes.DataRef{ + {ID: dataID}, + }, + } + mdi.On("GetMessageIDs", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.IDAndSequence{{ID: *msg.Header.ID}}, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(msg, fftypes.DataArray{{ID: dataID}}, true, nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("fizzle")) rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything, mock.Anything) rag.RunFn = func(a mock.Arguments) { @@ -479,39 +526,46 @@ func TestMessageSequencerUpdateBatchFail(t *testing.T) { } func TestWaitForPollTimeout(t *testing.T) { - mdi := &databasemocks.Plugin{} - mdm := &datamocks.Manager{} - mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) - bm.(*batchManager).messagePollTimeout = 1 * time.Microsecond - bm.(*batchManager).waitForNewMessages() + bm, _ := newTestBatchManager(t) + bm.messagePollTimeout = 1 * time.Microsecond + bm.waitForNewMessages() } -func TestWaitForNewMessage(t *testing.T) { - mdi := &databasemocks.Plugin{} - mdm := &datamocks.Manager{} - mni := &sysmessagingmocks.LocalNodeInfo{} - bmi, _ := NewBatchManager(context.Background(), mni, mdi, mdm) - bm := bmi.(*batchManager) +func TestRewindForNewMessage(t *testing.T) { + bm, cancel := newTestBatchManager(t) + defer cancel() + go bm.newMessageNotifier() + bm.messagePollTimeout = 1 * time.Second + bm.waitForNewMessages() bm.readOffset = 22222 + bm.NewMessages() <- 12346 + bm.NewMessages() <- 12347 bm.NewMessages() <- 12345 bm.waitForNewMessages() - assert.Equal(t, int64(12344), bm.readOffset) + assert.Equal(t, int64(12344), bm.rewindOffset) + + mdi := bm.database.(*databasemocks.Plugin) + mdi.On("GetMessageIDs", mock.Anything, mock.MatchedBy(func(f database.Filter) bool { + fi, err := f.Finalize() + assert.NoError(t, err) + v, err := fi.Children[0].Value.Value() + assert.NoError(t, err) + assert.Equal(t, int64(12344), v) + return true + })).Return(nil, nil) + _, err := bm.readPage() + assert.NoError(t, err) } func TestAssembleMessageDataNilData(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) bm.Close() - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, false, nil) - _, err := bm.(*batchManager).assembleMessageData(&fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - }, - Data: fftypes.DataRefs{{ID: nil}}, - }) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(nil, nil, false, nil) + _, _, err := bm.(*batchManager).assembleMessageData(fftypes.NewUUID()) assert.Regexp(t, "FF10133", err) } @@ -519,17 +573,12 @@ func TestGetMessageDataFail(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(nil, nil, false, fmt.Errorf("pop")) bm.Close() - _, _ = bm.(*batchManager).assembleMessageData(&fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, - }, - }) + _, _, err := bm.(*batchManager).assembleMessageData(fftypes.NewUUID()) + assert.Regexp(t, "FF10158", err) mdm.AssertExpectations(t) } @@ -537,16 +586,10 @@ func TestGetMessageNotFound(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} - bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm) - mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, false, nil) + txHelper := txcommon.NewTransactionHelper(mdi, mdm) + bm, _ := NewBatchManager(context.Background(), mni, mdi, mdm, txHelper) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(nil, nil, false, nil) bm.Close() - _, err := bm.(*batchManager).assembleMessageData(&fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, - }, - }) + _, _, err := bm.(*batchManager).assembleMessageData(fftypes.NewUUID()) assert.Regexp(t, "FF10133", err) } diff --git a/internal/batch/batch_processor.go b/internal/batch/batch_processor.go index 4d17900ff9..7d1bdedfec 100644 --- a/internal/batch/batch_processor.go +++ b/internal/batch/batch_processor.go @@ -21,11 +21,14 @@ import ( "crypto/sha256" "database/sql/driver" "encoding/binary" + "fmt" "math" "sync" "time" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/internal/retry" "github.com/hyperledger/firefly/internal/sysmessaging" "github.com/hyperledger/firefly/internal/txcommon" @@ -35,7 +38,7 @@ import ( type batchWork struct { msg *fftypes.Message - data []*fftypes.Data + data fftypes.DataArray } type batchProcessorConf struct { @@ -44,7 +47,7 @@ type batchProcessorConf struct { dispatcherName string txType fftypes.TransactionType namespace string - identity fftypes.SignerRef + signer fftypes.SignerRef group *fftypes.Bytes32 dispatch DispatchHandler } @@ -73,6 +76,7 @@ type FlushStatus struct { type batchProcessor struct { ctx context.Context ni sysmessaging.LocalNodeInfo + data data.Manager database database.Plugin txHelper txcommon.Helper cancelCtx func() @@ -89,9 +93,16 @@ type batchProcessor struct { conf *batchProcessorConf } +type DispatchState struct { + Persisted fftypes.BatchPersisted + Messages []*fftypes.Message + Data fftypes.DataArray + Pins []*fftypes.Bytes32 +} + const batchSizeEstimateBase = int64(512) -func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, conf *batchProcessorConf, baseRetryConf *retry.Retry) *batchProcessor { +func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di database.Plugin, dm data.Manager, conf *batchProcessorConf, baseRetryConf *retry.Retry, txHelper txcommon.Helper) *batchProcessor { pCtx := log.WithLogField(log.WithLogField(ctx, "d", conf.dispatcherName), "p", conf.name) pCtx, cancelCtx := context.WithCancel(pCtx) bp := &batchProcessor{ @@ -99,7 +110,8 @@ func newBatchProcessor(ctx context.Context, ni sysmessaging.LocalNodeInfo, di da cancelCtx: cancelCtx, ni: ni, database: di, - txHelper: txcommon.NewTransactionHelper(di), + data: dm, + txHelper: txHelper, newWork: make(chan *batchWork, conf.BatchMaxSize), quescing: make(chan bool, 1), done: make(chan struct{}), @@ -186,7 +198,7 @@ func (bp *batchProcessor) addWork(newWork *batchWork) (full, overflow bool) { func (bp *batchProcessor) addFlushedSequences(flushAssembly []*batchWork) { // We need to keep track of the sequences we're flushing, because until we finish our flush - // the batch processor might be re-queuing the same messages to use due to rewinds. + // the batch processor might be re-queuing the same messages to us due to rewinds. // We keep twice the batch size, which might be made up of multiple batches maxFlushedSeqLen := int(2 * bp.conf.BatchMaxSize) @@ -234,7 +246,7 @@ func (bp *batchProcessor) startFlush(overflow bool) (id *fftypes.UUID, flushAsse return id, flushAssembly, byteSize } -func (bp *batchProcessor) endFlush(batch *fftypes.Batch, byteSize int64) { +func (bp *batchProcessor) endFlush(state *DispatchState, byteSize int64) { bp.statusMux.Lock() defer bp.statusMux.Unlock() fs := &bp.flushStatus @@ -250,10 +262,10 @@ func (bp *batchProcessor) endFlush(batch *fftypes.Batch, byteSize int64) { fs.totalBytesFlushed += byteSize fs.AverageBatchBytes = (fs.totalBytesFlushed / fs.TotalBatches) - fs.totalMessagesFlushed += int64(len(batch.Payload.Messages)) + fs.totalMessagesFlushed += int64(len(state.Messages)) fs.AverageBatchMessages = math.Round((float64(fs.totalMessagesFlushed)/float64(fs.TotalBatches))*100) / 100 - fs.totalDataFlushed += int64(len(batch.Payload.Data)) + fs.totalDataFlushed += int64(len(state.Data)) fs.AverageBatchData = math.Round((float64(fs.totalDataFlushed)/float64(fs.TotalBatches))*100) / 100 } @@ -347,50 +359,66 @@ func (bp *batchProcessor) assemblyLoop() { func (bp *batchProcessor) flush(overflow bool) error { id, flushWork, byteSize := bp.startFlush(overflow) - batch := bp.buildFlushBatch(id, flushWork) - pins, err := bp.persistBatch(batch) + log.L(bp.ctx).Debugf("Flushing batch %s", id) + state := bp.initFlushState(id, flushWork) + + // Sealing phase: assigns persisted pins to messages, and finalizes the manifest + err := bp.sealBatch(state) if err != nil { return err } + log.L(bp.ctx).Debugf("Sealed batch %s", id) - err = bp.dispatchBatch(batch, pins) + // Dispatch phase: the heavy lifting work - calling plugins to do the hard work of the batch. + // The dispatcher can update the state, such as appending to the BlobsPublished array, + // to affect DB updates as part of the finalization phase. + err = bp.dispatchBatch(state) if err != nil { return err } + log.L(bp.ctx).Debugf("Dispatched batch %s", id) - err = bp.markMessagesDispatched(batch) + // Finalization phase: Writes back the changes to the DB, so that these messages will not be + // are all tagged as part of this batch, and won't be included in any future batches. + err = bp.markPayloadDispatched(state) if err != nil { return err } + log.L(bp.ctx).Debugf("Finalized batch %s", id) - bp.endFlush(batch, byteSize) + bp.endFlush(state, byteSize) return nil } -func (bp *batchProcessor) buildFlushBatch(id *fftypes.UUID, newWork []*batchWork) *fftypes.Batch { - log.L(bp.ctx).Debugf("Flushing batch %s", id) - batch := &fftypes.Batch{ - ID: id, - Namespace: bp.conf.namespace, - SignerRef: bp.conf.identity, - Group: bp.conf.group, - Payload: fftypes.BatchPayload{}, - Created: fftypes.Now(), - Node: bp.ni.GetNodeUUID(bp.ctx), +func (bp *batchProcessor) initFlushState(id *fftypes.UUID, flushWork []*batchWork) *DispatchState { + state := &DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: id, + Type: bp.conf.DispatcherOptions.BatchType, + Namespace: bp.conf.namespace, + SignerRef: bp.conf.signer, + Group: bp.conf.group, + Created: fftypes.Now(), + Node: bp.ni.GetNodeUUID(bp.ctx), + }, + }, } - for _, w := range newWork { + for _, w := range flushWork { if w.msg != nil { - w.msg.BatchID = batch.ID - w.msg.State = "" // state should always be set by receivers when loading the batch - batch.Payload.Messages = append(batch.Payload.Messages, w.msg) + w.msg.BatchID = id + state.Messages = append(state.Messages, w.msg.BatchMessage()) + } + for _, d := range w.data { + log.L(bp.ctx).Debugf("Adding data '%s' to batch '%s' for message '%s'", d.ID, id, w.msg.Header.ID) + state.Data = append(state.Data, d.BatchData(state.Persisted.Type)) } - batch.Payload.Data = append(batch.Payload.Data, w.data...) } - return batch + return state } -func (bp *batchProcessor) maskContext(ctx context.Context, msg *fftypes.Message, topic string) (contextOrPin *fftypes.Bytes32, err error) { +func (bp *batchProcessor) maskContext(ctx context.Context, msg *fftypes.Message, topic string) (msgPinString string, contextOrPin *fftypes.Bytes32, err error) { hashBuilder := sha256.New() hashBuilder.Write([]byte(topic)) @@ -399,7 +427,7 @@ func (bp *batchProcessor) maskContext(ctx context.Context, msg *fftypes.Message, // of the topic. There would be no way to unmask it if we did, because we don't have // the full list of senders to know what their next hashes should be. if msg.Header.Group == nil { - return fftypes.HashResult(hashBuilder), nil + return "", fftypes.HashResult(hashBuilder), nil } // For private groups, we need to make the topic specific to the group (which is @@ -418,7 +446,7 @@ func (bp *batchProcessor) maskContext(ctx context.Context, msg *fftypes.Message, } err = bp.database.UpsertNonceNext(ctx, gc) if err != nil { - return nil, err + return "", nil, err } // Now combine our sending identity, and this nonce, to produce the hash that should @@ -429,65 +457,100 @@ func (bp *batchProcessor) maskContext(ctx context.Context, msg *fftypes.Message, binary.BigEndian.PutUint64(nonceBytes, uint64(gc.Nonce)) hashBuilder.Write(nonceBytes) - return fftypes.HashResult(hashBuilder), err + pin := fftypes.HashResult(hashBuilder) + pinStr := fmt.Sprintf("%s:%.16d", pin, gc.Nonce) + log.L(ctx).Debugf("Assigned pin '%s' to message %s for topic '%s'", pinStr, msg.Header.ID, topic) + return pinStr, pin, err } -func (bp *batchProcessor) maskContexts(ctx context.Context, batch *fftypes.Batch) ([]*fftypes.Bytes32, error) { +func (bp *batchProcessor) maskContexts(ctx context.Context, messages []*fftypes.Message) ([]*fftypes.Bytes32, error) { // Calculate the sequence hashes - contextsOrPins := make([]*fftypes.Bytes32, 0, len(batch.Payload.Messages)) - for _, msg := range batch.Payload.Messages { + pinsAssigned := false + contextsOrPins := make([]*fftypes.Bytes32, 0, len(messages)) + for _, msg := range messages { + if len(msg.Pins) > 0 { + // We have already allocated pins to this message, we cannot re-allocate. + log.L(ctx).Debugf("Message %s already has %d pins allocated", msg.Header.ID, len(msg.Pins)) + continue + } for _, topic := range msg.Header.Topics { - contextOrPin, err := bp.maskContext(ctx, msg, topic) + pinString, contextOrPin, err := bp.maskContext(ctx, msg, topic) if err != nil { return nil, err } contextsOrPins = append(contextsOrPins, contextOrPin) if msg.Header.Group != nil { - msg.Pins = append(msg.Pins, contextOrPin.String()) + msg.Pins = append(msg.Pins, pinString /* contains the nonce as well as the pin hash */) + pinsAssigned = true + } + } + if pinsAssigned { + // It's important we update the message pins at this phase, as we have "spent" a nonce + // on this topic from the database. So this message has grabbed a slot in our queue. + // If we fail the dispatch, and redo the batch sealing process, we must not allocate + // a second nonce to it (and as such modifiy the batch payload). + err := bp.database.UpdateMessage(ctx, msg.Header.ID, + database.MessageQueryFactory.NewUpdate(ctx).Set("pins", msg.Pins), + ) + if err != nil { + return nil, err } } } return contextsOrPins, nil } -func (bp *batchProcessor) persistBatch(batch *fftypes.Batch) (contexts []*fftypes.Bytes32, err error) { +func (bp *batchProcessor) sealBatch(state *DispatchState) (err error) { err = bp.retry.Do(bp.ctx, "batch persist", func(attempt int) (retry bool, err error) { return true, bp.database.RunAsGroup(bp.ctx, func(ctx context.Context) (err error) { if bp.conf.txType == fftypes.TransactionTypeBatchPin { // Generate a new Transaction, which will be used to record status of the associated transaction as it happens - if contexts, err = bp.maskContexts(ctx, batch); err != nil { + if state.Pins, err = bp.maskContexts(ctx, state.Messages); err != nil { return err } } - batch.Payload.TX.Type = bp.conf.txType - if batch.Payload.TX.ID, err = bp.txHelper.SubmitNewTransaction(ctx, batch.Namespace, bp.conf.txType); err != nil { + state.Persisted.TX.Type = bp.conf.txType + if state.Persisted.TX.ID, err = bp.txHelper.SubmitNewTransaction(ctx, state.Persisted.Namespace, bp.conf.txType); err != nil { return err } + manifest := state.Persisted.GenManifest(state.Messages, state.Data) + + // The hash of the batch, is the hash of the manifest to minimize the compute cost. + // Note in v0.13 and before, it was the hash of the payload - so the inbound route has a fallback to accepting the full payload hash + manifestString := manifest.String() + state.Persisted.Manifest = fftypes.JSONAnyPtr(manifestString) + state.Persisted.Hash = fftypes.HashString(manifestString) - batch.Hash = batch.Payload.Hash() - log.L(ctx).Debugf("Batch %s sealed. Hash=%s", batch.ID, batch.Hash) - return bp.database.UpsertBatch(ctx, batch) + log.L(ctx).Debugf("Batch %s sealed. Hash=%s", state.Persisted.ID, state.Persisted.Hash) + + // At this point the manifest of the batch is finalized. We write it to the database + return bp.database.UpsertBatch(ctx, &state.Persisted) }) }) - return contexts, err + return err } -func (bp *batchProcessor) dispatchBatch(batch *fftypes.Batch, pins []*fftypes.Bytes32) error { +func (bp *batchProcessor) dispatchBatch(state *DispatchState) error { // Call the dispatcher to do the heavy lifting - will only exit if we're closed - return bp.retry.Do(bp.ctx, "batch dispatch", func(attempt int) (retry bool, err error) { - return true, bp.conf.dispatch(bp.ctx, batch, pins) + return operations.RunWithOperationCache(bp.ctx, func(ctx context.Context) error { + return bp.retry.Do(ctx, "batch dispatch", func(attempt int) (retry bool, err error) { + return true, bp.conf.dispatch(ctx, state) + }) }) } -func (bp *batchProcessor) markMessagesDispatched(batch *fftypes.Batch) error { +func (bp *batchProcessor) markPayloadDispatched(state *DispatchState) error { return bp.retry.Do(bp.ctx, "mark dispatched messages", func(attempt int) (retry bool, err error) { return true, bp.database.RunAsGroup(bp.ctx, func(ctx context.Context) (err error) { // Update all the messages in the batch with the batch ID - msgIDs := make([]driver.Value, len(batch.Payload.Messages)) - for i, msg := range batch.Payload.Messages { + msgIDs := make([]driver.Value, len(state.Messages)) + for i, msg := range state.Messages { msgIDs[i] = msg.Header.ID + // We don't want to have to read the DB again if we want to query for the batch ID, or pins, + // so ensure the copy in our cache gets updated. + bp.data.UpdateMessageIfCached(ctx, msg) } fb := database.MessageQueryFactory.NewFilter(ctx) filter := fb.And( @@ -495,31 +558,34 @@ func (bp *batchProcessor) markMessagesDispatched(batch *fftypes.Batch) error { fb.Eq("state", fftypes.MessageStateReady), // In the outside chance the next state transition happens first (which supersedes this) ) - var update database.Update + var allMsgsUpdate database.Update if bp.conf.txType == fftypes.TransactionTypeBatchPin { // Sent state waiting for confirm - update = database.MessageQueryFactory.NewUpdate(ctx). - Set("batch", batch.ID). // Mark the batch they are in + allMsgsUpdate = database.MessageQueryFactory.NewUpdate(ctx). + Set("batch", state.Persisted.ID). // Mark the batch they are in Set("state", fftypes.MessageStateSent) // Set them sent, so they won't be picked up and re-sent after restart/rewind } else { // Immediate confirmation if no batch pinning - update = database.MessageQueryFactory.NewUpdate(ctx). - Set("batch", batch.ID). + allMsgsUpdate = database.MessageQueryFactory.NewUpdate(ctx). + Set("batch", state.Persisted.ID). Set("state", fftypes.MessageStateConfirmed). Set("confirmed", fftypes.Now()) } - if err = bp.database.UpdateMessages(ctx, filter, update); err != nil { + if err = bp.database.UpdateMessages(ctx, filter, allMsgsUpdate); err != nil { return err } if bp.conf.txType == fftypes.TransactionTypeUnpinned { - for _, msg := range batch.Payload.Messages { + for _, msg := range state.Messages { // Emit a confirmation event locally immediately - event := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, batch.Namespace, msg.Header.ID, batch.Payload.TX.ID) - event.Correlator = msg.Header.CID - if err := bp.database.InsertEvent(ctx, event); err != nil { - return err + for _, topic := range msg.Header.Topics { + // One event per topic + event := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, state.Persisted.Namespace, msg.Header.ID, state.Persisted.TX.ID, topic) + event.Correlator = msg.Header.CID + if err := bp.database.InsertEvent(ctx, event); err != nil { + return err + } } } } diff --git a/internal/batch/batch_processor_test.go b/internal/batch/batch_processor_test.go index 63b3cd89ff..c5111a4e40 100644 --- a/internal/batch/batch_processor_test.go +++ b/internal/batch/batch_processor_test.go @@ -16,6 +16,7 @@ package batch import ( "context" + "encoding/json" "fmt" "testing" "time" @@ -23,7 +24,9 @@ import ( "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/retry" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/sysmessagingmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" "github.com/hyperledger/firefly/pkg/fftypes" @@ -34,11 +37,13 @@ import ( func newTestBatchProcessor(dispatch DispatchHandler) (*databasemocks.Plugin, *batchProcessor) { mdi := &databasemocks.Plugin{} mni := &sysmessagingmocks.LocalNodeInfo{} + mdm := &datamocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mni.On("GetNodeUUID", mock.Anything).Return(fftypes.NewUUID()).Maybe() - bp := newBatchProcessor(context.Background(), mni, mdi, &batchProcessorConf{ + bp := newBatchProcessor(context.Background(), mni, mdi, mdm, &batchProcessorConf{ namespace: "ns1", txType: fftypes.TransactionTypeBatchPin, - identity: fftypes.SignerRef{Author: "did:firefly:org/abcd", Key: "0x12345"}, + signer: fftypes.SignerRef{Author: "did:firefly:org/abcd", Key: "0x12345"}, dispatch: dispatch, DispatcherOptions: DispatcherOptions{ BatchMaxSize: 10, @@ -49,7 +54,7 @@ func newTestBatchProcessor(dispatch DispatchHandler) (*databasemocks.Plugin, *ba }, &retry.Retry{ InitialDelay: 1 * time.Microsecond, MaximumDelay: 1 * time.Microsecond, - }) + }, txHelper) bp.txHelper = &txcommonmocks.Helper{} return mdi, bp } @@ -66,20 +71,22 @@ func TestUnfilledBatch(t *testing.T) { log.SetLevel("debug") config.Reset() - dispatched := make(chan *fftypes.Batch) - mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - dispatched <- b + dispatched := make(chan *DispatchState) + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { + dispatched <- state return nil }) mockRunAsGroupPassthrough(mdi) mdi.On("UpdateMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) - mdi.On("UpdateBatch", mock.Anything, mock.Anything).Return(nil) mth := bp.txHelper.(*txcommonmocks.Helper) mth.On("SubmitNewTransaction", mock.Anything, "ns1", fftypes.TransactionTypeBatchPin).Return(fftypes.NewUUID(), nil) + mdm := bp.data.(*datamocks.Manager) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() + // Dispatch the work go func() { for i := 0; i < 5; i++ { @@ -94,36 +101,42 @@ func TestUnfilledBatch(t *testing.T) { batch := <-dispatched // Check we got all the messages in a single batch - assert.Equal(t, 5, len(batch.Payload.Messages)) + assert.Equal(t, 5, len(batch.Messages)) bp.cancelCtx() <-bp.done + + mdm.AssertExpectations(t) + mdi.AssertExpectations(t) + mth.AssertExpectations(t) } func TestBatchSizeOverflow(t *testing.T) { log.SetLevel("debug") config.Reset() - dispatched := make(chan *fftypes.Batch) - mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - dispatched <- b + dispatched := make(chan *DispatchState) + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { + dispatched <- state return nil }) bp.conf.BatchMaxBytes = batchSizeEstimateBase + (&fftypes.Message{}).EstimateSize(false) + 100 mockRunAsGroupPassthrough(mdi) mdi.On("UpdateMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) - mdi.On("UpdateBatch", mock.Anything, mock.Anything).Return(nil) mth := bp.txHelper.(*txcommonmocks.Helper) mth.On("SubmitNewTransaction", mock.Anything, "ns1", fftypes.TransactionTypeBatchPin).Return(fftypes.NewUUID(), nil) + mdm := bp.data.(*datamocks.Manager) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() + // Dispatch the work + msgIDs := []*fftypes.UUID{fftypes.NewUUID(), fftypes.NewUUID()} go func() { for i := 0; i < 2; i++ { - msgid := fftypes.NewUUID() bp.newWork <- &batchWork{ - msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgid}, Sequence: int64(1000 + i)}, + msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgIDs[i]}, Sequence: int64(1000 + i)}, } } }() @@ -133,27 +146,31 @@ func TestBatchSizeOverflow(t *testing.T) { batch2 := <-dispatched // Check we got all messages across two batches - assert.Equal(t, 1, len(batch1.Payload.Messages)) - assert.Equal(t, int64(1000), batch1.Payload.Messages[0].Sequence) - assert.Equal(t, 1, len(batch2.Payload.Messages)) - assert.Equal(t, int64(1001), batch2.Payload.Messages[0].Sequence) + assert.Equal(t, 1, len(batch1.Messages)) + assert.Equal(t, msgIDs[0], batch1.Messages[0].Header.ID) + assert.Equal(t, 1, len(batch2.Messages)) + assert.Equal(t, msgIDs[1], batch2.Messages[0].Header.ID) bp.cancelCtx() <-bp.done + + mdi.AssertExpectations(t) + mth.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestCloseToUnblockDispatch(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return fmt.Errorf("pop") }) bp.cancelCtx() - bp.dispatchBatch(&fftypes.Batch{}, []*fftypes.Bytes32{}) + bp.dispatchBatch(&DispatchState{}) <-bp.done } func TestCloseToUnblockUpsertBatch(t *testing.T) { - mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) bp.retry.MaximumDelay = 1 * time.Microsecond @@ -187,7 +204,7 @@ func TestCloseToUnblockUpsertBatch(t *testing.T) { } func TestCalcPinsFail(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) bp.cancelCtx() @@ -196,16 +213,18 @@ func TestCalcPinsFail(t *testing.T) { mockRunAsGroupPassthrough(mdi) gid := fftypes.NewRandB32() - _, err := bp.persistBatch(&fftypes.Batch{ - Group: gid, - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{ - {Header: fftypes.MessageHeader{ - Group: gid, - Topics: fftypes.FFStringArray{"topic1"}, - }}, + err := bp.sealBatch(&DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Group: gid, }, }, + Messages: []*fftypes.Message{ + {Header: fftypes.MessageHeader{ + Group: gid, + Topics: fftypes.FFStringArray{"topic1"}, + }}, + }, }) assert.Regexp(t, "FF10158", err) @@ -215,7 +234,7 @@ func TestCalcPinsFail(t *testing.T) { } func TestAddWorkInRecentlyFlushed(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) bp.flushedSequences = []int64{100, 500, 400, 900, 200, 700} @@ -229,7 +248,7 @@ func TestAddWorkInRecentlyFlushed(t *testing.T) { } func TestAddWorkInSortDeDup(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) bp.assemblyQueue = []*batchWork{ @@ -254,7 +273,7 @@ func TestAddWorkInSortDeDup(t *testing.T) { } func TestStartFlushOverflow(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) batchID := fftypes.NewUUID() @@ -283,7 +302,7 @@ func TestStartFlushOverflow(t *testing.T) { } func TestStartQuiesceNonBlocking(t *testing.T) { - _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { + _, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { return nil }) bp.startQuiesce() @@ -294,9 +313,9 @@ func TestMarkMessageDispatchedUnpinnedOK(t *testing.T) { log.SetLevel("debug") config.Reset() - dispatched := make(chan *fftypes.Batch) - mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { - dispatched <- b + dispatched := make(chan *DispatchState) + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { + dispatched <- state return nil }) bp.conf.txType = fftypes.TransactionTypeUnpinned @@ -310,12 +329,15 @@ func TestMarkMessageDispatchedUnpinnedOK(t *testing.T) { mth := bp.txHelper.(*txcommonmocks.Helper) mth.On("SubmitNewTransaction", mock.Anything, "ns1", fftypes.TransactionTypeUnpinned).Return(fftypes.NewUUID(), nil) + mdm := bp.data.(*datamocks.Manager) + mdm.On("UpdateMessageIfCached", mock.Anything, mock.Anything).Return() + // Dispatch the work go func() { for i := 0; i < 5; i++ { msgid := fftypes.NewUUID() bp.newWork <- &batchWork{ - msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgid}, Sequence: int64(1000 + i)}, + msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgid, Topics: fftypes.FFStringArray{"topic1"}}, Sequence: int64(1000 + i)}, } } }() @@ -324,10 +346,171 @@ func TestMarkMessageDispatchedUnpinnedOK(t *testing.T) { batch := <-dispatched // Check we got all the messages in a single batch - assert.Equal(t, 5, len(batch.Payload.Messages)) + assert.Equal(t, 5, len(batch.Messages)) + + bp.cancelCtx() + <-bp.done + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + mth.AssertExpectations(t) +} + +func TestMaskContextsDuplicate(t *testing.T) { + log.SetLevel("debug") + config.Reset() + + dispatched := make(chan *DispatchState) + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { + dispatched <- state + return nil + }) + + mdi.On("UpsertNonceNext", mock.Anything, mock.Anything).Return(nil).Once() + mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + + messages := []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.MessageTypePrivate, + Group: fftypes.NewRandB32(), + Topics: fftypes.FFStringArray{"topic1"}, + }, + }, + } + + _, err := bp.maskContexts(bp.ctx, messages) + assert.NoError(t, err) + + // 2nd time no DB ops + _, err = bp.maskContexts(bp.ctx, messages) + assert.NoError(t, err) bp.cancelCtx() <-bp.done mdi.AssertExpectations(t) } + +func TestMaskContextsUpdataMessageFail(t *testing.T) { + log.SetLevel("debug") + config.Reset() + + dispatched := make(chan *DispatchState) + mdi, bp := newTestBatchProcessor(func(c context.Context, state *DispatchState) error { + dispatched <- state + return nil + }) + + mdi.On("UpsertNonceNext", mock.Anything, mock.Anything).Return(nil).Once() + mdi.On("UpdateMessage", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")).Once() + + messages := []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.MessageTypePrivate, + Group: fftypes.NewRandB32(), + Topics: fftypes.FFStringArray{"topic1"}, + }, + }, + } + + _, err := bp.maskContexts(bp.ctx, messages) + assert.Regexp(t, "pop", err) + + bp.cancelCtx() + <-bp.done + + mdi.AssertExpectations(t) +} + +func TestBigBatchEstimate(t *testing.T) { + log.SetLevel("debug") + config.Reset() + + bd := []byte(`{ + "id": "37ba893b-fcfa-4cf9-8ce8-34cd8bc9bc72", + "type": "broadcast", + "namespace": "default", + "node": "248ba775-f595-40a6-a989-c2f2faae2dea", + "author": "did:firefly:org/org_0", + "key": "0x7e3bb2198959d3a1c3ede9db1587560320ce8998", + "Group": null, + "created": "2022-03-18T14:57:33.228374398Z", + "hash": "7c620c12207ec153afea75d958de3edf601beced2570c798ebc246c2c44a5f66", + "payload": { + "tx": { + "type": "batch_pin", + "id": "8d3f06b8-adb5-4745-a536-a9e262fd2e9f" + }, + "messages": [ + { + "header": { + "id": "2b393190-28e7-4b86-8af6-00906e94989b", + "type": "broadcast", + "txtype": "batch_pin", + "author": "did:firefly:org/org_0", + "key": "0x7e3bb2198959d3a1c3ede9db1587560320ce8998", + "created": "2022-03-18T14:57:32.209734225Z", + "namespace": "default", + "topics": [ + "default" + ], + "tag": "perf_02e01e12-b918-4982-8407-2f9a08d673f3_740", + "datahash": "b5b0c398450707b885f5973248ffa9a542f4c2f54860eba6c2d7aee48d0f9109" + }, + "hash": "5fc430f1c8134c6c32c4e34ef65984843bb77bb19e73c862d464669537d96dbd", + "data": [ + { + "id": "147743b4-bd23-4da1-bd21-90c4ad9f1650", + "hash": "8ed265110f60711f79de1bc87b476e00bd8f8be436cdda3cf27fbf886d5e6ce6" + } + ] + } + ], + "data": [ + { + "id": "147743b4-bd23-4da1-bd21-90c4ad9f1650", + "validator": "json", + "namespace": "default", + "hash": "8ed265110f60711f79de1bc87b476e00bd8f8be436cdda3cf27fbf886d5e6ce6", + "created": "2022-03-18T14:57:32.209705277Z", + "value": { + "broadcastID": "740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740740" + } + } + ] + } + }`) + var batch fftypes.Batch + err := json.Unmarshal(bd, &batch) + assert.NoError(t, err) + + sizeEstimate := batchSizeEstimateBase + for i, m := range batch.Payload.Messages { + dataJSONSize := 0 + bw := &batchWork{ + msg: m, + } + for _, dr := range m.Data { + for _, d := range batch.Payload.Data { + if d.ID.Equals(dr.ID) { + bw.data = append(bw.data, d) + break + } + } + bd, err := json.Marshal(&bw.data) + assert.NoError(t, err) + dataJSONSize += len(bd) + } + md, err := json.Marshal(&bw.msg) + assert.NoError(t, err) + msgJSONSize := len(md) + t.Logf("Msg=%.3d/%s Estimate=%d JSON - Msg=%d Data=%d Total=%d", i, m.Header.ID, bw.estimateSize(), msgJSONSize, dataJSONSize, msgJSONSize+dataJSONSize) + sizeEstimate += bw.estimateSize() + } + + assert.Greater(t, sizeEstimate, int64(len(bd))) +} diff --git a/internal/batchpin/batchpin.go b/internal/batchpin/batchpin.go index 6334abb127..9534a610b1 100644 --- a/internal/batchpin/batchpin.go +++ b/internal/batchpin/batchpin.go @@ -19,15 +19,23 @@ package batchpin import ( "context" + "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/identity" "github.com/hyperledger/firefly/internal/metrics" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" ) type Submitter interface { - SubmitPinnedBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error + fftypes.Named + + SubmitPinnedBatch(ctx context.Context, batch *fftypes.BatchPersisted, contexts []*fftypes.Bytes32) error + + // From operations.OperationHandler + PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) + RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) } type batchPinSubmitter struct { @@ -35,39 +43,44 @@ type batchPinSubmitter struct { identity identity.Manager blockchain blockchain.Plugin metrics metrics.Manager + operations operations.Manager } -func NewBatchPinSubmitter(di database.Plugin, im identity.Manager, bi blockchain.Plugin, mm metrics.Manager) Submitter { - return &batchPinSubmitter{ +func NewBatchPinSubmitter(ctx context.Context, di database.Plugin, im identity.Manager, bi blockchain.Plugin, mm metrics.Manager, om operations.Manager) (Submitter, error) { + if di == nil || im == nil || bi == nil || mm == nil || om == nil { + return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) + } + bp := &batchPinSubmitter{ database: di, identity: im, blockchain: bi, metrics: mm, + operations: om, } + om.RegisterHandler(ctx, bp, []fftypes.OpType{ + fftypes.OpTypeBlockchainPinBatch, + }) + return bp, nil } -func (bp *batchPinSubmitter) SubmitPinnedBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error { +func (bp *batchPinSubmitter) Name() string { + return "BatchPinSubmitter" +} +func (bp *batchPinSubmitter) SubmitPinnedBatch(ctx context.Context, batch *fftypes.BatchPersisted, contexts []*fftypes.Bytes32) error { // The pending blockchain transaction op := fftypes.NewOperation( bp.blockchain, batch.Namespace, - batch.Payload.TX.ID, - fftypes.OpTypeBlockchainBatchPin) - if err := bp.database.InsertOperation(ctx, op); err != nil { + batch.TX.ID, + fftypes.OpTypeBlockchainPinBatch) + addBatchPinInputs(op, batch.ID, contexts) + if err := bp.operations.AddOrReuseOperation(ctx, op); err != nil { return err } if bp.metrics.IsMetricsEnabled() { bp.metrics.CountBatchPin() } - // Write the batch pin to the blockchain - return bp.blockchain.SubmitBatchPin(ctx, op.ID, nil /* TODO: ledger selection */, batch.Key, &blockchain.BatchPin{ - Namespace: batch.Namespace, - TransactionID: batch.Payload.TX.ID, - BatchID: batch.ID, - BatchHash: batch.Hash, - BatchPayloadRef: batch.PayloadRef, - Contexts: contexts, - }) + return bp.operations.RunOperation(ctx, opBatchPin(op, batch, contexts)) } diff --git a/internal/batchpin/batchpin_test.go b/internal/batchpin/batchpin_test.go index 613b65b06e..016519c208 100644 --- a/internal/batchpin/batchpin_test.go +++ b/internal/batchpin/batchpin_test.go @@ -26,6 +26,7 @@ import ( "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/metricsmocks" + "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -34,110 +35,140 @@ import ( var utConfPrefix = config.NewPluginConfig("metrics") func newTestBatchPinSubmitter(t *testing.T, enableMetrics bool) *batchPinSubmitter { + config.Reset() + mdi := &databasemocks.Plugin{} mim := &identitymanagermocks.Manager{} mbi := &blockchainmocks.Plugin{} mmi := &metricsmocks.Manager{} + mom := &operationmocks.Manager{} mmi.On("IsMetricsEnabled").Return(enableMetrics) + mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) if enableMetrics { mmi.On("CountBatchPin").Return() } mbi.On("Name").Return("ut").Maybe() - bps := NewBatchPinSubmitter(mdi, mim, mbi, mmi).(*batchPinSubmitter) - return bps + bps, err := NewBatchPinSubmitter(context.Background(), mdi, mim, mbi, mmi, mom) + assert.NoError(t, err) + return bps.(*batchPinSubmitter) +} + +func TestInitFail(t *testing.T) { + _, err := NewBatchPinSubmitter(context.Background(), nil, nil, nil, nil, nil) + assert.Regexp(t, "FF10128", err) +} + +func TestName(t *testing.T) { + bp := newTestBatchPinSubmitter(t, false) + assert.Equal(t, "BatchPinSubmitter", bp.Name()) } func TestSubmitPinnedBatchOk(t *testing.T) { bp := newTestBatchPinSubmitter(t, false) ctx := context.Background() - mbi := bp.blockchain.(*blockchainmocks.Plugin) mdi := bp.database.(*databasemocks.Plugin) - - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "id1", - Key: "0x12345", - }, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - ID: fftypes.NewUUID(), + mmi := bp.metrics.(*metricsmocks.Manager) + mom := bp.operations.(*operationmocks.Manager) + + batch := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "id1", + Key: "0x12345", }, }, + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, } contexts := []*fftypes.Bytes32{} - mdi.On("InsertOperation", ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { - assert.Equal(t, fftypes.OpTypeBlockchainBatchPin, op.Type) + mom.On("AddOrReuseOperation", ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { + assert.Equal(t, fftypes.OpTypeBlockchainPinBatch, op.Type) assert.Equal(t, "ut", op.Plugin) - assert.Equal(t, *batch.Payload.TX.ID, *op.Transaction) + assert.Equal(t, *batch.TX.ID, *op.Transaction) return true })).Return(nil) - mbi.On("SubmitBatchPin", ctx, mock.Anything, (*fftypes.UUID)(nil), "0x12345", mock.Anything).Return(nil) - mmi := bp.metrics.(*metricsmocks.Manager) mmi.On("IsMetricsEnabled").Return(false) + mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(batchPinData) + return op.Type == fftypes.OpTypeBlockchainPinBatch && data.Batch == batch + })).Return(nil) + err := bp.SubmitPinnedBatch(ctx, batch, contexts) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mmi.AssertExpectations(t) + mom.AssertExpectations(t) } func TestSubmitPinnedBatchWithMetricsOk(t *testing.T) { bp := newTestBatchPinSubmitter(t, true) ctx := context.Background() - mbi := bp.blockchain.(*blockchainmocks.Plugin) mdi := bp.database.(*databasemocks.Plugin) mmi := bp.metrics.(*metricsmocks.Manager) - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "id1", - Key: "0x12345", - }, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - ID: fftypes.NewUUID(), + mom := bp.operations.(*operationmocks.Manager) + + batch := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "id1", + Key: "0x12345", }, }, + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, } contexts := []*fftypes.Bytes32{} - mdi.On("InsertOperation", ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { - assert.Equal(t, fftypes.OpTypeBlockchainBatchPin, op.Type) + mom.On("AddOrReuseOperation", ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { + assert.Equal(t, fftypes.OpTypeBlockchainPinBatch, op.Type) assert.Equal(t, "ut", op.Plugin) - assert.Equal(t, *batch.Payload.TX.ID, *op.Transaction) + assert.Equal(t, *batch.TX.ID, *op.Transaction) return true })).Return(nil) - mbi.On("SubmitBatchPin", ctx, mock.Anything, (*fftypes.UUID)(nil), "0x12345", mock.Anything).Return(nil) mmi.On("IsMetricsEnabled").Return(true) - mmi.On("BatchPinCounter").Return() + mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(batchPinData) + return op.Type == fftypes.OpTypeBlockchainPinBatch && data.Batch == batch + })).Return(nil) err := bp.SubmitPinnedBatch(ctx, batch, contexts) assert.NoError(t, err) + + mdi.AssertExpectations(t) + mmi.AssertExpectations(t) + mom.AssertExpectations(t) } func TestSubmitPinnedBatchOpFail(t *testing.T) { bp := newTestBatchPinSubmitter(t, false) ctx := context.Background() - mdi := bp.database.(*databasemocks.Plugin) + mom := bp.operations.(*operationmocks.Manager) mmi := bp.metrics.(*metricsmocks.Manager) - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "id1", - Key: "0x12345", - }, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - ID: fftypes.NewUUID(), + batch := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "id1", + Key: "0x12345", }, }, + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, } contexts := []*fftypes.Bytes32{} - mdi.On("InsertOperation", ctx, mock.Anything).Return(fmt.Errorf("pop")) + mom.On("AddOrReuseOperation", ctx, mock.Anything).Return(fmt.Errorf("pop")) mmi.On("IsMetricsEnabled").Return(false) err := bp.SubmitPinnedBatch(ctx, batch, contexts) assert.Regexp(t, "pop", err) diff --git a/internal/batchpin/operations.go b/internal/batchpin/operations.go new file mode 100644 index 0000000000..adaaf01694 --- /dev/null +++ b/internal/batchpin/operations.go @@ -0,0 +1,103 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchpin + +import ( + "context" + + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/pkg/blockchain" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +type batchPinData struct { + Batch *fftypes.BatchPersisted `json:"batch"` + Contexts []*fftypes.Bytes32 `json:"contexts"` +} + +func addBatchPinInputs(op *fftypes.Operation, batchID *fftypes.UUID, contexts []*fftypes.Bytes32) { + contextStr := make([]string, len(contexts)) + for i, c := range contexts { + contextStr[i] = c.String() + } + op.Input = fftypes.JSONObject{ + "batch": batchID.String(), + "contexts": contextStr, + } +} + +func retrieveBatchPinInputs(ctx context.Context, op *fftypes.Operation) (batchID *fftypes.UUID, contexts []*fftypes.Bytes32, err error) { + batchID, err = fftypes.ParseUUID(ctx, op.Input.GetString("batch")) + if err != nil { + return nil, nil, err + } + contextStr := op.Input.GetStringArray("contexts") + contexts = make([]*fftypes.Bytes32, len(contextStr)) + for i, c := range contextStr { + contexts[i], err = fftypes.ParseBytes32(ctx, c) + if err != nil { + return nil, nil, err + } + } + return batchID, contexts, nil +} + +func (bp *batchPinSubmitter) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + switch op.Type { + case fftypes.OpTypeBlockchainPinBatch: + batchID, contexts, err := retrieveBatchPinInputs(ctx, op) + if err != nil { + return nil, err + } + batch, err := bp.database.GetBatchByID(ctx, batchID) + if err != nil { + return nil, err + } else if batch == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + return opBatchPin(op, batch, contexts), nil + + default: + return nil, i18n.NewError(ctx, i18n.MsgOperationNotSupported, op.Type) + } +} + +func (bp *batchPinSubmitter) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { + switch data := op.Data.(type) { + case batchPinData: + batch := data.Batch + return nil, false, bp.blockchain.SubmitBatchPin(ctx, op.ID, nil /* TODO: ledger selection */, batch.Key, &blockchain.BatchPin{ + Namespace: batch.Namespace, + TransactionID: batch.TX.ID, + BatchID: batch.ID, + BatchHash: batch.Hash, + BatchPayloadRef: batch.PayloadRef, + Contexts: data.Contexts, + }) + + default: + return nil, false, i18n.NewError(ctx, i18n.MsgOperationDataIncorrect, op.Data) + } +} + +func opBatchPin(op *fftypes.Operation, batch *fftypes.BatchPersisted, contexts []*fftypes.Bytes32) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: batchPinData{Batch: batch, Contexts: contexts}, + } +} diff --git a/internal/batchpin/operations_test.go b/internal/batchpin/operations_test.go new file mode 100644 index 0000000000..0074de7d30 --- /dev/null +++ b/internal/batchpin/operations_test.go @@ -0,0 +1,148 @@ +// Copyright © 2021 Kaleido, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in comdiliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or imdilied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batchpin + +import ( + "context" + "fmt" + "testing" + + "github.com/hyperledger/firefly/mocks/blockchainmocks" + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestPrepareAndRunBatchPin(t *testing.T) { + bp := newTestBatchPinSubmitter(t, false) + + op := &fftypes.Operation{ + Type: fftypes.OpTypeBlockchainPinBatch, + ID: fftypes.NewUUID(), + } + batch := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Key: "0x123", + }, + }, + } + contexts := []*fftypes.Bytes32{ + fftypes.NewRandB32(), + fftypes.NewRandB32(), + } + addBatchPinInputs(op, batch.ID, contexts) + + mbi := bp.blockchain.(*blockchainmocks.Plugin) + mdi := bp.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", context.Background(), batch.ID).Return(batch, nil) + mbi.On("SubmitBatchPin", context.Background(), op.ID, mock.Anything, "0x123", mock.Anything).Return(nil) + + po, err := bp.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, batch, po.Data.(batchPinData).Batch) + + _, complete, err := bp.RunOperation(context.Background(), opBatchPin(op, batch, contexts)) + + assert.False(t, complete) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationNotSupported(t *testing.T) { + bp := newTestBatchPinSubmitter(t, false) + + po, err := bp.PrepareOperation(context.Background(), &fftypes.Operation{}) + + assert.Nil(t, po) + assert.Regexp(t, "FF10371", err) +} + +func TestPrepareOperationBatchPinBadBatch(t *testing.T) { + bp := newTestBatchPinSubmitter(t, false) + + op := &fftypes.Operation{ + Type: fftypes.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "bad"}, + } + + _, err := bp.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10142", err) +} + +func TestPrepareOperationBatchPinBadContext(t *testing.T) { + bp := newTestBatchPinSubmitter(t, false) + + op := &fftypes.Operation{ + Type: fftypes.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{ + "batch": fftypes.NewUUID().String(), + "contexts": []string{"bad"}, + }, + } + + _, err := bp.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10232", err) +} + +func TestRunOperationNotSupported(t *testing.T) { + bp := newTestBatchPinSubmitter(t, false) + + _, complete, err := bp.RunOperation(context.Background(), &fftypes.PreparedOperation{}) + + assert.False(t, complete) + assert.Regexp(t, "FF10378", err) +} + +func TestPrepareOperationBatchPinError(t *testing.T) { + bp := newTestBatchPinSubmitter(t, false) + + batchID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{ + "batch": batchID.String(), + "contexts": []string{}, + }, + } + + mdi := bp.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", context.Background(), batchID).Return(nil, fmt.Errorf("pop")) + + _, err := bp.PrepareOperation(context.Background(), op) + assert.EqualError(t, err, "pop") +} + +func TestPrepareOperationBatchPinNotFound(t *testing.T) { + bp := newTestBatchPinSubmitter(t, false) + + batchID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{ + "batch": batchID.String(), + "contexts": []string{}, + }, + } + + mdi := bp.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", context.Background(), batchID).Return(nil, nil) + + _, err := bp.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) +} diff --git a/internal/blockchain/ethereum/ethereum.go b/internal/blockchain/ethereum/ethereum.go index 2dd07e74cf..d058a13946 100644 --- a/internal/blockchain/ethereum/ethereum.go +++ b/internal/blockchain/ethereum/ethereum.go @@ -631,26 +631,26 @@ func parseContractLocation(ctx context.Context, location *fftypes.JSONAny) (*Loc return ðLocation, nil } -func (e *Ethereum) AddSubscription(ctx context.Context, subscription *fftypes.ContractListenerInput) error { - location, err := parseContractLocation(ctx, subscription.Location) +func (e *Ethereum) AddContractListener(ctx context.Context, listener *fftypes.ContractListenerInput) error { + location, err := parseContractLocation(ctx, listener.Location) if err != nil { return err } - abi, err := e.FFIEventDefinitionToABI(ctx, &subscription.Event.FFIEventDefinition) + abi, err := e.FFIEventDefinitionToABI(ctx, &listener.Event.FFIEventDefinition) if err != nil { return i18n.WrapError(ctx, err, i18n.MsgContractParamInvalid) } - subName := fmt.Sprintf("ff-sub-%s", subscription.ID) - result, err := e.streams.createSubscription(ctx, location, e.initInfo.stream.ID, subName, abi) + subName := fmt.Sprintf("ff-sub-%s", listener.ID) + result, err := e.streams.createSubscription(ctx, location, e.initInfo.stream.ID, subName, listener.Options.FirstEvent, abi) if err != nil { return err } - subscription.ProtocolID = result.ID + listener.ProtocolID = result.ID return nil } -func (e *Ethereum) DeleteSubscription(ctx context.Context, subscription *fftypes.ContractListener) error { +func (e *Ethereum) DeleteContractListener(ctx context.Context, subscription *fftypes.ContractListener) error { return e.streams.deleteSubscription(ctx, subscription.ProtocolID) } diff --git a/internal/blockchain/ethereum/ethereum_test.go b/internal/blockchain/ethereum/ethereum_test.go index 795232c5ac..f36ff4d209 100644 --- a/internal/blockchain/ethereum/ethereum_test.go +++ b/internal/blockchain/ethereum/ethereum_test.go @@ -244,7 +244,7 @@ func TestInitAllExistingStreams(t *testing.T) { httpmock.NewJsonResponderOrPanic(200, []eventStream{{ID: "es12345", WebSocket: eventStreamWebsocket{Topic: "topic1"}}})) httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions", httpmock.NewJsonResponderOrPanic(200, []subscription{ - {ID: "sub12345", Name: "BatchPin_30783132333435e3" /* this is the subname for our combo of instance path and BatchPin */}, + {ID: "sub12345", Stream: "es12345", Name: "BatchPin_30783132333435e3" /* this is the subname for our combo of instance path and BatchPin */}, })) httpmock.RegisterResponder("PATCH", "http://localhost:12345/eventstreams/es12345", httpmock.NewJsonResponderOrPanic(200, &eventStream{ID: "es12345", WebSocket: eventStreamWebsocket{Topic: "topic1"}})) @@ -1260,13 +1260,16 @@ func TestAddSubscription(t *testing.T) { }, }, }, + Options: &fftypes.ContractListenerOptions{ + FirstEvent: string(fftypes.SubOptsFirstEventNewest), + }, }, } httpmock.RegisterResponder("POST", `http://localhost:12345/subscriptions`, httpmock.NewJsonResponderOrPanic(200, &subscription{})) - err := e.AddSubscription(context.Background(), sub) + err := e.AddContractListener(context.Background(), sub) assert.NoError(t, err) } @@ -1305,7 +1308,7 @@ func TestAddSubscriptionBadParamDetails(t *testing.T) { httpmock.RegisterResponder("POST", `http://localhost:12345/subscriptions`, httpmock.NewJsonResponderOrPanic(200, &subscription{})) - err := e.AddSubscription(context.Background(), sub) + err := e.AddContractListener(context.Background(), sub) assert.Regexp(t, "FF10311", err) } @@ -1330,7 +1333,7 @@ func TestAddSubscriptionBadLocation(t *testing.T) { }, } - err := e.AddSubscription(context.Background(), sub) + err := e.AddContractListener(context.Background(), sub) assert.Regexp(t, "FF10310", err) } @@ -1354,13 +1357,16 @@ func TestAddSubscriptionFail(t *testing.T) { "address": "0x123", }.String()), Event: &fftypes.FFISerializedEvent{}, + Options: &fftypes.ContractListenerOptions{ + FirstEvent: string(fftypes.SubOptsFirstEventNewest), + }, }, } httpmock.RegisterResponder("POST", `http://localhost:12345/subscriptions`, httpmock.NewStringResponder(500, "pop")) - err := e.AddSubscription(context.Background(), sub) + err := e.AddContractListener(context.Background(), sub) assert.Regexp(t, "FF10111", err) assert.Regexp(t, "pop", err) @@ -1386,7 +1392,7 @@ func TestDeleteSubscription(t *testing.T) { httpmock.RegisterResponder("DELETE", `http://localhost:12345/subscriptions/sb-1`, httpmock.NewStringResponder(204, "")) - err := e.DeleteSubscription(context.Background(), sub) + err := e.DeleteContractListener(context.Background(), sub) assert.NoError(t, err) } @@ -1411,7 +1417,7 @@ func TestDeleteSubscriptionFail(t *testing.T) { httpmock.RegisterResponder("DELETE", `http://localhost:12345/subscriptions/sb-1`, httpmock.NewStringResponder(500, "")) - err := e.DeleteSubscription(context.Background(), sub) + err := e.DeleteContractListener(context.Background(), sub) assert.Regexp(t, "FF10111", err) } diff --git a/internal/blockchain/ethereum/eventstream.go b/internal/blockchain/ethereum/eventstream.go index a19b79d787..eb89c98627 100644 --- a/internal/blockchain/ethereum/eventstream.go +++ b/internal/blockchain/ethereum/eventstream.go @@ -26,6 +26,7 @@ import ( "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/restclient" + "github.com/hyperledger/firefly/pkg/fftypes" ) type streamManager struct { @@ -133,11 +134,18 @@ func (s *streamManager) getSubscriptions(ctx context.Context) (subs []*subscript return subs, nil } -func (s *streamManager) createSubscription(ctx context.Context, location *Location, stream, subName string, abi ABIElementMarshaling) (*subscription, error) { +func (s *streamManager) createSubscription(ctx context.Context, location *Location, stream, subName, fromBlock string, abi ABIElementMarshaling) (*subscription, error) { + // Map FireFly "firstEvent" values to Ethereum "fromBlock" values + switch fromBlock { + case string(fftypes.SubOptsFirstEventOldest): + fromBlock = "0" + case string(fftypes.SubOptsFirstEventNewest): + fromBlock = "latest" + } sub := subscription{ Name: subName, Stream: stream, - FromBlock: "0", + FromBlock: fromBlock, Address: location.Address, Event: abi, } @@ -176,11 +184,11 @@ func (s *streamManager) ensureSubscription(ctx context.Context, instancePath, st subName := fmt.Sprintf("%s_%s", abi.Name, instanceUniqueHash) for _, s := range existingSubs { - if s.Name == subName || + if s.Stream == stream && (s.Name == subName || /* Check for the plain name we used to use originally, before adding uniqueness qualifier. If one of these very early environments needed a new subscription, the existing one would need to be deleted manually. */ - s.Name == abi.Name { + s.Name == abi.Name) { sub = s } } @@ -190,7 +198,7 @@ func (s *streamManager) ensureSubscription(ctx context.Context, instancePath, st } if sub == nil { - if sub, err = s.createSubscription(ctx, location, stream, subName, abi); err != nil { + if sub, err = s.createSubscription(ctx, location, stream, subName, string(fftypes.SubOptsFirstEventOldest), abi); err != nil { return nil, err } } diff --git a/internal/blockchain/fabric/eventstream.go b/internal/blockchain/fabric/eventstream.go index 43965940db..4e2115503a 100644 --- a/internal/blockchain/fabric/eventstream.go +++ b/internal/blockchain/fabric/eventstream.go @@ -23,6 +23,7 @@ import ( "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/restclient" + "github.com/hyperledger/firefly/pkg/fftypes" ) type streamManager struct { @@ -112,7 +113,11 @@ func (s *streamManager) getSubscriptions(ctx context.Context) (subs []*subscript return subs, nil } -func (s *streamManager) createSubscription(ctx context.Context, location *Location, stream, name, event string) (*subscription, error) { +func (s *streamManager) createSubscription(ctx context.Context, location *Location, stream, name, event, fromBlock string) (*subscription, error) { + // Map FireFly "firstEvent" values to Fabric "fromBlock" values + if fromBlock == string(fftypes.SubOptsFirstEventOldest) { + fromBlock = "0" + } sub := subscription{ Name: name, Channel: location.Channel, @@ -122,7 +127,7 @@ func (s *streamManager) createSubscription(ctx context.Context, location *Locati ChaincodeID: location.Chaincode, EventFilter: event, }, - FromBlock: "0", + FromBlock: fromBlock, } res, err := s.client.R(). SetContext(ctx). @@ -153,13 +158,13 @@ func (s *streamManager) ensureSubscription(ctx context.Context, location *Locati subName := event for _, s := range existingSubs { - if s.Name == subName { + if s.Stream == stream && s.Name == subName { sub = s } } if sub == nil { - if sub, err = s.createSubscription(ctx, location, stream, subName, event); err != nil { + if sub, err = s.createSubscription(ctx, location, stream, subName, event, string(fftypes.SubOptsFirstEventOldest)); err != nil { return nil, err } } diff --git a/internal/blockchain/fabric/fabric.go b/internal/blockchain/fabric/fabric.go index a78c55b4af..7a89d426d6 100644 --- a/internal/blockchain/fabric/fabric.go +++ b/internal/blockchain/fabric/fabric.go @@ -692,20 +692,20 @@ func parseContractLocation(ctx context.Context, location *fftypes.JSONAny) (*Loc return &fabricLocation, nil } -func (f *Fabric) AddSubscription(ctx context.Context, subscription *fftypes.ContractListenerInput) error { - location, err := parseContractLocation(ctx, subscription.Location) +func (f *Fabric) AddContractListener(ctx context.Context, listener *fftypes.ContractListenerInput) error { + location, err := parseContractLocation(ctx, listener.Location) if err != nil { return err } - result, err := f.streams.createSubscription(ctx, location, f.initInfo.stream.ID, "", subscription.Event.Name) + result, err := f.streams.createSubscription(ctx, location, f.initInfo.stream.ID, "", listener.Event.Name, listener.Options.FirstEvent) if err != nil { return err } - subscription.ProtocolID = result.ID + listener.ProtocolID = result.ID return nil } -func (f *Fabric) DeleteSubscription(ctx context.Context, subscription *fftypes.ContractListener) error { +func (f *Fabric) DeleteContractListener(ctx context.Context, subscription *fftypes.ContractListener) error { return f.streams.deleteSubscription(ctx, subscription.ProtocolID) } diff --git a/internal/blockchain/fabric/fabric_test.go b/internal/blockchain/fabric/fabric_test.go index 5d9e0de2bc..33ef7a89a9 100644 --- a/internal/blockchain/fabric/fabric_test.go +++ b/internal/blockchain/fabric/fabric_test.go @@ -240,7 +240,7 @@ func TestInitAllExistingStreams(t *testing.T) { httpmock.NewJsonResponderOrPanic(200, []eventStream{{ID: "es12345", WebSocket: eventStreamWebsocket{Topic: "topic1"}}})) httpmock.RegisterResponder("GET", "http://localhost:12345/subscriptions", httpmock.NewJsonResponderOrPanic(200, []subscription{ - {ID: "sub12345", Name: "BatchPin"}, + {ID: "sub12345", Stream: "es12345", Name: "BatchPin"}, })) resetConf() @@ -1093,13 +1093,21 @@ func TestAddSubscription(t *testing.T) { "chaincode": "mycode", }.String()), Event: &fftypes.FFISerializedEvent{}, + Options: &fftypes.ContractListenerOptions{ + FirstEvent: string(fftypes.SubOptsFirstEventOldest), + }, }, } httpmock.RegisterResponder("POST", `http://localhost:12345/subscriptions`, - httpmock.NewJsonResponderOrPanic(200, &subscription{})) + func(req *http.Request) (*http.Response, error) { + var body map[string]interface{} + json.NewDecoder(req.Body).Decode(&body) + assert.Equal(t, "0", body["fromBlock"]) + return httpmock.NewJsonResponderOrPanic(200, &subscription{})(req) + }) - err := e.AddSubscription(context.Background(), sub) + err := e.AddContractListener(context.Background(), sub) assert.NoError(t, err) } @@ -1124,7 +1132,7 @@ func TestAddSubscriptionBadLocation(t *testing.T) { }, } - err := e.AddSubscription(context.Background(), sub) + err := e.AddContractListener(context.Background(), sub) assert.Regexp(t, "FF10310", err) } @@ -1149,13 +1157,16 @@ func TestAddSubscriptionFail(t *testing.T) { "chaincode": "mycode", }.String()), Event: &fftypes.FFISerializedEvent{}, + Options: &fftypes.ContractListenerOptions{ + FirstEvent: string(fftypes.SubOptsFirstEventNewest), + }, }, } httpmock.RegisterResponder("POST", `http://localhost:12345/subscriptions`, httpmock.NewStringResponder(500, "pop")) - err := e.AddSubscription(context.Background(), sub) + err := e.AddContractListener(context.Background(), sub) assert.Regexp(t, "FF10284", err) assert.Regexp(t, "pop", err) @@ -1181,7 +1192,7 @@ func TestDeleteSubscription(t *testing.T) { httpmock.RegisterResponder("DELETE", `http://localhost:12345/subscriptions/sb-1`, httpmock.NewStringResponder(204, "")) - err := e.DeleteSubscription(context.Background(), sub) + err := e.DeleteContractListener(context.Background(), sub) assert.NoError(t, err) } @@ -1206,7 +1217,7 @@ func TestDeleteSubscriptionFail(t *testing.T) { httpmock.RegisterResponder("DELETE", `http://localhost:12345/subscriptions/sb-1`, httpmock.NewStringResponder(500, "pop")) - err := e.DeleteSubscription(context.Background(), sub) + err := e.DeleteContractListener(context.Background(), sub) assert.Regexp(t, "FF10284", err) assert.Regexp(t, "pop", err) diff --git a/internal/broadcast/datatype_test.go b/internal/broadcast/datatype_test.go index 97381682ba..b2d009c5b6 100644 --- a/internal/broadcast/datatype_test.go +++ b/internal/broadcast/datatype_test.go @@ -73,12 +73,11 @@ func TestBroadcastDatatypeBadValue(t *testing.T) { func TestBroadcastUpsertFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) mim := bm.identity.(*identitymanagermocks.Manager) mim.On("ResolveInputSigningIdentity", mock.Anything, "ns1", mock.Anything).Return(nil) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) + mdm.On("WriteNewMessage", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) mdm.On("VerifyNamespaceExists", mock.Anything, "ns1").Return(nil) mdm.On("CheckDatatype", mock.Anything, "ns1", mock.Anything).Return(nil) @@ -89,6 +88,9 @@ func TestBroadcastUpsertFail(t *testing.T) { Value: fftypes.JSONAnyPtr(`{"some": "data"}`), }, false) assert.EqualError(t, err, "pop") + + mim.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestBroadcastDatatypeInvalid(t *testing.T) { @@ -112,40 +114,16 @@ func TestBroadcastDatatypeInvalid(t *testing.T) { assert.EqualError(t, err, "pop") } -func TestBroadcastBroadcastFail(t *testing.T) { - bm, cancel := newTestBroadcast(t) - defer cancel() - mdi := bm.database.(*databasemocks.Plugin) - mdm := bm.data.(*datamocks.Manager) - mim := bm.identity.(*identitymanagermocks.Manager) - - mim.On("ResolveInputSigningIdentity", mock.Anything, "ns1", mock.Anything).Return(nil) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) - mdm.On("VerifyNamespaceExists", mock.Anything, "ns1").Return(nil) - mdm.On("CheckDatatype", mock.Anything, "ns1", mock.Anything).Return(nil) - mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) - - _, err := bm.BroadcastDatatype(context.Background(), "ns1", &fftypes.Datatype{ - Namespace: "ns1", - Name: "ent1", - Version: "0.0.1", - Value: fftypes.JSONAnyPtr(`{"some": "data"}`), - }, false) - assert.EqualError(t, err, "pop") -} - func TestBroadcastOk(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) mim := bm.identity.(*identitymanagermocks.Manager) mim.On("ResolveInputSigningIdentity", mock.Anything, "ns1", mock.Anything).Return(nil) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) mdm.On("VerifyNamespaceExists", mock.Anything, "ns1").Return(nil) mdm.On("CheckDatatype", mock.Anything, "ns1", mock.Anything).Return(nil) - mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) + mdm.On("WriteNewMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) _, err := bm.BroadcastDatatype(context.Background(), "ns1", &fftypes.Datatype{ Namespace: "ns1", @@ -154,4 +132,7 @@ func TestBroadcastOk(t *testing.T) { Value: fftypes.JSONAnyPtr(`{"some": "data"}`), }, false) assert.NoError(t, err) + + mdm.AssertExpectations(t) + mim.AssertExpectations(t) } diff --git a/internal/broadcast/definition.go b/internal/broadcast/definition.go index 3a2e36a4a6..c466fecea5 100644 --- a/internal/broadcast/definition.go +++ b/internal/broadcast/definition.go @@ -20,9 +20,9 @@ import ( "context" "encoding/json" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/identity" - "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" ) @@ -52,10 +52,10 @@ func (bm *broadcastManager) BroadcastIdentityClaim(ctx context.Context, ns strin return bm.broadcastDefinitionCommon(ctx, ns, def, signingIdentity, tag, waitConfirm) } -func (bm *broadcastManager) broadcastDefinitionCommon(ctx context.Context, ns string, def fftypes.Definition, signingIdentity *fftypes.SignerRef, tag string, waitConfirm bool) (msg *fftypes.Message, err error) { +func (bm *broadcastManager) broadcastDefinitionCommon(ctx context.Context, ns string, def fftypes.Definition, signingIdentity *fftypes.SignerRef, tag string, waitConfirm bool) (*fftypes.Message, error) { // Serialize it into a data object, as a piece of data we can write to a message - data := &fftypes.Data{ + d := &fftypes.Data{ Validator: fftypes.ValidatorTypeSystemDefinition, ID: fftypes.NewUUID(), Namespace: ns, @@ -63,40 +63,39 @@ func (bm *broadcastManager) broadcastDefinitionCommon(ctx context.Context, ns st } b, err := json.Marshal(&def) if err == nil { - data.Value = fftypes.JSONAnyPtrBytes(b) - err = data.Seal(ctx, nil) + d.Value = fftypes.JSONAnyPtrBytes(b) + err = d.Seal(ctx, nil) } if err != nil { return nil, i18n.WrapError(ctx, err, i18n.MsgSerializationFailed) } - // Write as data to the local store - if err = bm.database.UpsertData(ctx, data, database.UpsertOptimizationNew); err != nil { - return nil, err - } - // Create a broadcast message referring to the data - in := &fftypes.MessageInOut{ - Message: fftypes.Message{ - Header: fftypes.MessageHeader{ - Namespace: ns, - Type: fftypes.MessageTypeDefinition, - SignerRef: *signingIdentity, - Topics: fftypes.FFStringArray{def.Topic()}, - Tag: tag, - TxType: fftypes.TransactionTypeBatchPin, - }, - Data: fftypes.DataRefs{ - {ID: data.ID, Hash: data.Hash}, + newMsg := &data.NewMessage{ + Message: &fftypes.MessageInOut{ + Message: fftypes.Message{ + Header: fftypes.MessageHeader{ + Namespace: ns, + Type: fftypes.MessageTypeDefinition, + SignerRef: *signingIdentity, + Topics: fftypes.FFStringArray{def.Topic()}, + Tag: tag, + TxType: fftypes.TransactionTypeBatchPin, + }, + Data: fftypes.DataRefs{ + {ID: d.ID, Hash: d.Hash, ValueSize: d.ValueSize}, + }, }, }, + NewData: fftypes.DataArray{d}, + AllData: fftypes.DataArray{d}, } // Broadcast the message sender := broadcastSender{ mgr: bm, namespace: ns, - msg: in, + msg: newMsg, resolved: true, } sender.setDefaults() @@ -105,5 +104,5 @@ func (bm *broadcastManager) broadcastDefinitionCommon(ctx context.Context, ns st } else { err = sender.Send(ctx) } - return &in.Message, err + return &newMsg.Message.Message, err } diff --git a/internal/broadcast/definition_test.go b/internal/broadcast/definition_test.go index 6afeb24585..b0aaaebd10 100644 --- a/internal/broadcast/definition_test.go +++ b/internal/broadcast/definition_test.go @@ -21,10 +21,8 @@ import ( "testing" "github.com/hyperledger/firefly/internal/identity" - "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" - "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -34,18 +32,15 @@ func TestBroadcastDefinitionAsNodeConfirm(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) msa := bm.syncasync.(*syncasyncmocks.Bridge) mim := bm.identity.(*identitymanagermocks.Manager) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) mim.On("ResolveInputSigningIdentity", mock.Anything, "ff_system", mock.Anything).Return(nil) msa.On("WaitForMessage", bm.ctx, "ff_system", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) _, err := bm.BroadcastDefinitionAsNode(bm.ctx, fftypes.SystemNamespace, &fftypes.Namespace{}, fftypes.SystemTagDefineNamespace, true) assert.EqualError(t, err, "pop") - mdi.AssertExpectations(t) msa.AssertExpectations(t) mim.AssertExpectations(t) } @@ -54,11 +49,9 @@ func TestBroadcastIdentityClaim(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) msa := bm.syncasync.(*syncasyncmocks.Bridge) mim := bm.identity.(*identitymanagermocks.Manager) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) mim.On("NormalizeSigningKey", mock.Anything, "0x1234", identity.KeyNormalizationBlockchainPlugin).Return("", nil) msa.On("WaitForMessage", bm.ctx, "ff_system", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) @@ -69,7 +62,6 @@ func TestBroadcastIdentityClaim(t *testing.T) { }, fftypes.SystemTagDefineNamespace, true) assert.EqualError(t, err, "pop") - mdi.AssertExpectations(t) msa.AssertExpectations(t) mim.AssertExpectations(t) } @@ -96,35 +88,20 @@ func TestBroadcastDatatypeDefinitionAsNodeConfirm(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) msa := bm.syncasync.(*syncasyncmocks.Bridge) mim := bm.identity.(*identitymanagermocks.Manager) ns := "customNamespace" - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) mim.On("ResolveInputSigningIdentity", mock.Anything, ns, mock.Anything).Return(nil) msa.On("WaitForMessage", bm.ctx, ns, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) _, err := bm.BroadcastDefinitionAsNode(bm.ctx, ns, &fftypes.Datatype{}, fftypes.SystemTagDefineNamespace, true) assert.EqualError(t, err, "pop") - mdi.AssertExpectations(t) msa.AssertExpectations(t) mim.AssertExpectations(t) } -func TestBroadcastDefinitionAsNodeUpsertFail(t *testing.T) { - bm, cancel := newTestBroadcast(t) - defer cancel() - - mdi := bm.database.(*databasemocks.Plugin) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) - mim := bm.identity.(*identitymanagermocks.Manager) - mim.On("ResolveInputSigningIdentity", mock.Anything, fftypes.SystemNamespace, mock.Anything).Return(nil) - _, err := bm.BroadcastDefinitionAsNode(bm.ctx, fftypes.SystemNamespace, &fftypes.Namespace{}, fftypes.SystemTagDefineNamespace, false) - assert.Regexp(t, "pop", err) -} - func TestBroadcastDefinitionBadIdentity(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() diff --git a/internal/broadcast/manager.go b/internal/broadcast/manager.go index d2e0cf0943..18b75c0f61 100644 --- a/internal/broadcast/manager.go +++ b/internal/broadcast/manager.go @@ -17,9 +17,7 @@ package broadcast import ( - "bytes" "context" - "encoding/json" "github.com/hyperledger/firefly/internal/batch" "github.com/hyperledger/firefly/internal/batchpin" @@ -29,6 +27,7 @@ import ( "github.com/hyperledger/firefly/internal/identity" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/metrics" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/internal/syncasync" "github.com/hyperledger/firefly/internal/sysmessaging" "github.com/hyperledger/firefly/pkg/blockchain" @@ -41,6 +40,8 @@ import ( const broadcastDispatcherName = "pinned_broadcast" type Manager interface { + fftypes.Named + NewBroadcast(ns string, in *fftypes.MessageInOut) sysmessaging.MessageSender BroadcastDatatype(ctx context.Context, ns string, datatype *fftypes.Datatype, waitConfirm bool) (msg *fftypes.Message, err error) BroadcastNamespace(ctx context.Context, ns *fftypes.Namespace, waitConfirm bool) (msg *fftypes.Message, err error) @@ -51,6 +52,10 @@ type Manager interface { BroadcastTokenPool(ctx context.Context, ns string, pool *fftypes.TokenPoolAnnouncement, waitConfirm bool) (msg *fftypes.Message, err error) Start() error WaitStop() + + // From operations.OperationHandler + PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) + RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) } type broadcastManager struct { @@ -66,10 +71,11 @@ type broadcastManager struct { batchpin batchpin.Submitter maxBatchPayloadLength int64 metrics metrics.Manager + operations operations.Manager } -func NewBroadcastManager(ctx context.Context, di database.Plugin, im identity.Manager, dm data.Manager, bi blockchain.Plugin, dx dataexchange.Plugin, pi sharedstorage.Plugin, ba batch.Manager, sa syncasync.Bridge, bp batchpin.Submitter, mm metrics.Manager) (Manager, error) { - if di == nil || im == nil || dm == nil || bi == nil || dx == nil || pi == nil || ba == nil { +func NewBroadcastManager(ctx context.Context, di database.Plugin, im identity.Manager, dm data.Manager, bi blockchain.Plugin, dx dataexchange.Plugin, si sharedstorage.Plugin, ba batch.Manager, sa syncasync.Bridge, bp batchpin.Submitter, mm metrics.Manager, om operations.Manager) (Manager, error) { + if di == nil || im == nil || dm == nil || bi == nil || dx == nil || si == nil || ba == nil || mm == nil || om == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } bm := &broadcastManager{ @@ -79,19 +85,23 @@ func NewBroadcastManager(ctx context.Context, di database.Plugin, im identity.Ma data: dm, blockchain: bi, exchange: dx, - sharedstorage: pi, + sharedstorage: si, batch: ba, syncasync: sa, batchpin: bp, maxBatchPayloadLength: config.GetByteSize(config.BroadcastBatchPayloadLimit), metrics: mm, + operations: om, } + bo := batch.DispatcherOptions{ + BatchType: fftypes.BatchTypeBroadcast, BatchMaxSize: config.GetUint(config.BroadcastBatchSize), BatchMaxBytes: bm.maxBatchPayloadLength, BatchTimeout: config.GetDuration(config.BroadcastBatchTimeout), DisposeTimeout: config.GetDuration(config.BroadcastBatchAgentTimeout), } + ba.RegisterDispatcher(broadcastDispatcherName, fftypes.TransactionTypeBatchPin, []fftypes.MessageType{ @@ -99,75 +109,71 @@ func NewBroadcastManager(ctx context.Context, di database.Plugin, im identity.Ma fftypes.MessageTypeDefinition, fftypes.MessageTypeTransferBroadcast, }, bm.dispatchBatch, bo) - return bm, nil -} - -func (bm *broadcastManager) dispatchBatch(ctx context.Context, batch *fftypes.Batch, pins []*fftypes.Bytes32) error { - // Serialize the full payload, which has already been sealed for us by the BatchManager - payload, err := json.Marshal(batch) - if err != nil { - return i18n.WrapError(ctx, err, i18n.MsgSerializationFailed) - } + om.RegisterHandler(ctx, bm, []fftypes.OpType{ + fftypes.OpTypeSharedStorageUploadBatch, + fftypes.OpTypeSharedStorageUploadBlob, + }) - // Write it to IPFS to get a payload reference - // The payload ref will be persisted back to the batch, as well as being used in the TX - batch.PayloadRef, err = bm.sharedstorage.PublishData(ctx, bytes.NewReader(payload)) - if err != nil { - return err - } + return bm, nil +} - return bm.database.RunAsGroup(ctx, func(ctx context.Context) error { - return bm.submitTXAndUpdateDB(ctx, batch, pins) - }) +func (bm *broadcastManager) Name() string { + return "BroadcastManager" } -func (bm *broadcastManager) submitTXAndUpdateDB(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error { +func (bm *broadcastManager) dispatchBatch(ctx context.Context, state *batch.DispatchState) error { - // Update the batch to store the payloadRef - err := bm.database.UpdateBatch(ctx, batch.ID, database.BatchQueryFactory.NewUpdate(ctx).Set("payloadref", batch.PayloadRef)) - if err != nil { + // Ensure all the blobs are published + if err := bm.uploadBlobs(ctx, state.Persisted.TX.ID, state.Data); err != nil { return err } - // The completed SharedStorage upload + // Upload the batch itself op := fftypes.NewOperation( bm.sharedstorage, - batch.Namespace, - batch.Payload.TX.ID, - fftypes.OpTypeSharedStorageBatchBroadcast) - op.Status = fftypes.OpStatusSucceeded // Note we performed the action synchronously above - err = bm.database.InsertOperation(ctx, op) - if err != nil { + state.Persisted.Namespace, + state.Persisted.TX.ID, + fftypes.OpTypeSharedStorageUploadBatch) + addUploadBatchInputs(op, state.Persisted.ID) + if err := bm.operations.AddOrReuseOperation(ctx, op); err != nil { return err } + batch := state.Persisted.GenInflight(state.Messages, state.Data) - log.L(ctx).Infof("Pinning broadcast batch %s with author=%s key=%s", batch.ID, batch.Author, batch.Key) - return bm.batchpin.SubmitPinnedBatch(ctx, batch, contexts) + // We are in an (indefinite) retry cycle from the batch processor to dispatch this batch, that is only + // termianted with shutdown. So we leave the operation pending on failure, as it is still being retried. + // The user will still have the failure details recorded. + if err := bm.operations.RunOperation(ctx, opUploadBatch(op, batch, &state.Persisted), operations.RemainPendingOnFailure); err != nil { + return err + } + log.L(ctx).Infof("Pinning broadcast batch %s with author=%s key=%s payload=%s", batch.ID, batch.Author, batch.Key, state.Persisted.PayloadRef) + return bm.batchpin.SubmitPinnedBatch(ctx, &state.Persisted, state.Pins) } -func (bm *broadcastManager) publishBlobs(ctx context.Context, dataToPublish []*fftypes.DataAndBlob) error { - for _, d := range dataToPublish { - // Stream from the local data exchange ... - reader, err := bm.exchange.DownloadBLOB(ctx, d.Blob.PayloadRef) - if err != nil { - return i18n.WrapError(ctx, err, i18n.MsgDownloadBlobFailed, d.Blob.PayloadRef) - } - defer reader.Close() - - // ... to the shared storage - sharedRef, err := bm.sharedstorage.PublishData(ctx, reader) - if err != nil { - return err - } - log.L(ctx).Infof("Published blob with hash '%s' for data '%s' to shared storage: '%s'", d.Blob.Hash, d.Data.ID, sharedRef) - - // Update the data in the database, with the shared reference. - // We do this independently for each piece of data - update := database.DataQueryFactory.NewUpdate(ctx).Set("blob.public", sharedRef) - err = bm.database.UpdateData(ctx, d.Data.ID, update) - if err != nil { - return err +func (bm *broadcastManager) uploadBlobs(ctx context.Context, tx *fftypes.UUID, data fftypes.DataArray) error { + for _, d := range data { + // We only need to send a blob if there is one, and it's not been uploaded to the shared storage + if d.Blob != nil && d.Blob.Hash != nil && d.Blob.Public == "" { + + op := fftypes.NewOperation( + bm.sharedstorage, + d.Namespace, + tx, + fftypes.OpTypeSharedStorageUploadBlob) + addUploadBlobInputs(op, d.ID) + + blob, err := bm.database.GetBlobMatchingHash(ctx, d.Blob.Hash) + if err != nil { + return err + } else if blob == nil { + return i18n.NewError(ctx, i18n.MsgBlobNotFound, d.Blob.Hash) + } + + err = bm.operations.RunOperation(ctx, opUploadBlob(op, d, blob)) + if err != nil { + return err + } } } diff --git a/internal/broadcast/manager_test.go b/internal/broadcast/manager_test.go index 492774ec83..42b0e2ca97 100644 --- a/internal/broadcast/manager_test.go +++ b/internal/broadcast/manager_test.go @@ -17,14 +17,14 @@ package broadcast import ( - "bytes" "context" "fmt" - "io" - "io/ioutil" "testing" + "github.com/hyperledger/firefly/internal/batch" "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/data" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/mocks/batchmocks" "github.com/hyperledger/firefly/mocks/batchpinmocks" "github.com/hyperledger/firefly/mocks/blockchainmocks" @@ -33,9 +33,9 @@ import ( "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/metricsmocks" + "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/mocks/sharedstoragemocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" - "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -53,6 +53,7 @@ func newTestBroadcastCommon(t *testing.T, metricsEnabled bool) (*broadcastManage msa := &syncasyncmocks.Bridge{} mbp := &batchpinmocks.Submitter{} mmi := &metricsmocks.Manager{} + mom := &operationmocks.Manager{} mmi.On("IsMetricsEnabled").Return(metricsEnabled) mbi.On("Name").Return("ut_blockchain").Maybe() mpi.On("Name").Return("ut_sharedstorage").Maybe() @@ -64,6 +65,7 @@ func newTestBroadcastCommon(t *testing.T, metricsEnabled bool) (*broadcastManage fftypes.MessageTypeDefinition, fftypes.MessageTypeTransferBroadcast, }, mock.Anything, mock.Anything).Return() + mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() rag.RunFn = func(a mock.Arguments) { @@ -73,7 +75,7 @@ func newTestBroadcastCommon(t *testing.T, metricsEnabled bool) (*broadcastManage } ctx, cancel := context.WithCancel(context.Background()) - b, err := NewBroadcastManager(ctx, mdi, mim, mdm, mbi, mdx, mpi, mba, msa, mbp, mmi) + b, err := NewBroadcastManager(ctx, mdi, mim, mdm, mbi, mdx, mpi, mba, msa, mbp, mmi, mom) assert.NoError(t, err) return b.(*broadcastManager), cancel } @@ -90,300 +92,298 @@ func newTestBroadcastWithMetrics(t *testing.T) (*broadcastManager, func()) { } func TestInitFail(t *testing.T) { - _, err := NewBroadcastManager(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + _, err := NewBroadcastManager(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) assert.Regexp(t, "FF10128", err) } +func TestName(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + assert.Equal(t, "BroadcastManager", bm.Name()) +} + func TestBroadcastMessageGood(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - msg := &fftypes.MessageInOut{} - bm.database.(*databasemocks.Plugin).On("UpsertMessage", mock.Anything, &msg.Message, database.UpsertOptimizationNew).Return(nil) + dataID := fftypes.NewUUID() + dataHash := fftypes.NewRandB32() + newMsg := &data.NewMessage{ + Message: &fftypes.MessageInOut{ + Message: fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + }, + Data: fftypes.DataRefs{ + {ID: dataID, Hash: dataHash}, + }, + }, + }, + AllData: fftypes.DataArray{ + {ID: dataID, Hash: dataHash}, + }, + } + + mdm := bm.data.(*datamocks.Manager) + mdm.On("WriteNewMessage", mock.Anything, newMsg).Return(nil) broadcast := broadcastSender{ mgr: bm, - msg: msg, + msg: newMsg, } err := broadcast.sendInternal(context.Background(), methodSend) assert.NoError(t, err) bm.Start() bm.WaitStop() + + mdm.AssertExpectations(t) } func TestBroadcastMessageBad(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - dupID := fftypes.NewUUID() - msg := &fftypes.MessageInOut{ - Message: fftypes.Message{ - Data: fftypes.DataRefs{ - {ID: dupID /* missing hash */}, + newMsg := &data.NewMessage{ + Message: &fftypes.MessageInOut{ + Message: fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + }, + Data: fftypes.DataRefs{ + {ID: fftypes.NewUUID(), Hash: nil}, + }, }, }, } - bm.database.(*databasemocks.Plugin).On("UpsertMessage", mock.Anything, msg, false).Return(nil) broadcast := broadcastSender{ mgr: bm, - msg: msg, + msg: newMsg, } err := broadcast.sendInternal(context.Background(), methodSend) assert.Regexp(t, "FF10144", err) } -func TestDispatchBatchInvalidData(t *testing.T) { +func TestDispatchBatchBlobsFaill(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - err := bm.dispatchBatch(context.Background(), &fftypes.Batch{ - Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ - {Value: fftypes.JSONAnyPtr(`!json`)}, - }, + blobHash := fftypes.NewRandB32() + state := &batch.DispatchState{ + Data: []*fftypes.Data{ + {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ + Hash: blobHash, + }}, }, - }, []*fftypes.Bytes32{fftypes.NewRandB32()}) - assert.Regexp(t, "FF10137", err) -} + Pins: []*fftypes.Bytes32{fftypes.NewRandB32()}, + } -func TestDispatchBatchUploadFail(t *testing.T) { - bm, cancel := newTestBroadcast(t) - defer cancel() - bm.sharedstorage.(*sharedstoragemocks.Plugin).On("PublishData", mock.Anything, mock.Anything).Return("", fmt.Errorf("pop")) + mdi := bm.database.(*databasemocks.Plugin) + mdi.On("GetBlobMatchingHash", bm.ctx, blobHash).Return(nil, fmt.Errorf("pop")) - err := bm.dispatchBatch(context.Background(), &fftypes.Batch{}, []*fftypes.Bytes32{fftypes.NewRandB32()}) + err := bm.dispatchBatch(bm.ctx, state) assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) } -func TestDispatchBatchSubmitBatchPinSucceed(t *testing.T) { +func TestDispatchBatchInsertOpFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), + state := &batch.DispatchState{ + Pins: []*fftypes.Bytes32{fftypes.NewRandB32()}, } - mdi := bm.database.(*databasemocks.Plugin) - mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) - mbp := bm.batchpin.(*batchpinmocks.Submitter) - mps.On("PublishData", mock.Anything, mock.Anything).Return("id1", nil) - mdi.On("UpdateBatch", mock.Anything, batch.ID, mock.Anything).Return(nil) - mdi.On("InsertOperation", mock.Anything, mock.Anything).Return(nil) - mbp.On("SubmitPinnedBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) + mom := bm.operations.(*operationmocks.Manager) + mom.On("AddOrReuseOperation", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - err := bm.dispatchBatch(context.Background(), batch, []*fftypes.Bytes32{fftypes.NewRandB32()}) - assert.NoError(t, err) + err := bm.dispatchBatch(context.Background(), state) + assert.EqualError(t, err, "pop") + + mom.AssertExpectations(t) } -func TestDispatchBatchSubmitBroadcastFail(t *testing.T) { +func TestDispatchBatchUploadFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) - mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) - mbp := bm.batchpin.(*batchpinmocks.Submitter) - mps.On("PublishData", mock.Anything, mock.Anything).Return("id1", nil) - mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) - mdi.On("InsertOperation", mock.Anything, mock.Anything).Return(nil) - mbp.On("SubmitPinnedBatch", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - - err := bm.dispatchBatch(context.Background(), &fftypes.Batch{SignerRef: fftypes.SignerRef{Author: "wrong", Key: "wrong"}}, []*fftypes.Bytes32{fftypes.NewRandB32()}) - assert.EqualError(t, err, "pop") -} + state := &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + }, + Pins: []*fftypes.Bytes32{fftypes.NewRandB32()}, + } -func TestSubmitTXAndUpdateDBUpdateBatchFail(t *testing.T) { - bm, cancel := newTestBroadcast(t) - defer cancel() + mom := bm.operations.(*operationmocks.Manager) + mom.On("AddOrReuseOperation", mock.Anything, mock.Anything).Return(nil) + mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(uploadBatchData) + return op.Type == fftypes.OpTypeSharedStorageUploadBatch && data.Batch.ID.Equals(state.Persisted.ID) + }), operations.RemainPendingOnFailure).Return(fmt.Errorf("pop")) - mdi := bm.database.(*databasemocks.Plugin) - mdi.On("UpsertTransaction", mock.Anything, mock.Anything, false).Return(nil) - mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - bm.blockchain.(*blockchainmocks.Plugin).On("SubmitBatchPin", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("", fmt.Errorf("pop")) + err := bm.dispatchBatch(context.Background(), state) + assert.EqualError(t, err, "pop") - err := bm.submitTXAndUpdateDB(context.Background(), &fftypes.Batch{SignerRef: fftypes.SignerRef{Author: "org1", Key: "0x12345"}}, []*fftypes.Bytes32{fftypes.NewRandB32()}) - assert.Regexp(t, "pop", err) + mom.AssertExpectations(t) } -func TestSubmitTXAndUpdateDBAddOp1Fail(t *testing.T) { +func TestDispatchBatchSubmitBatchPinSucceed(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) - mbi := bm.blockchain.(*blockchainmocks.Plugin) - mdi.On("UpsertTransaction", mock.Anything, mock.Anything, false).Return(nil) - mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) - mdi.On("InsertOperation", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - mbi.On("SubmitBatchPin", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("txid", nil) - mbi.On("Name").Return("unittest") - - batch := &fftypes.Batch{ - SignerRef: fftypes.SignerRef{Author: "org1", Key: "0x12345"}, - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{ - {Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - }}, + state := &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), }, }, + Pins: []*fftypes.Bytes32{fftypes.NewRandB32()}, } - err := bm.submitTXAndUpdateDB(context.Background(), batch, []*fftypes.Bytes32{fftypes.NewRandB32()}) - assert.Regexp(t, "pop", err) + mdi := bm.database.(*databasemocks.Plugin) + mbp := bm.batchpin.(*batchpinmocks.Submitter) + mom := bm.operations.(*operationmocks.Manager) + mom.On("AddOrReuseOperation", mock.Anything, mock.Anything).Return(nil) + mbp.On("SubmitPinnedBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) + mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(uploadBatchData) + return op.Type == fftypes.OpTypeSharedStorageUploadBatch && data.Batch.ID.Equals(state.Persisted.ID) + }), operations.RemainPendingOnFailure).Return(nil) + + err := bm.dispatchBatch(context.Background(), state) + assert.NoError(t, err) + + mdi.AssertExpectations(t) + mbp.AssertExpectations(t) + mom.AssertExpectations(t) } -func TestSubmitTXAndUpdateDBSucceed(t *testing.T) { +func TestDispatchBatchSubmitBroadcastFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) - mbi := bm.blockchain.(*blockchainmocks.Plugin) - mbp := bm.batchpin.(*batchpinmocks.Submitter) - mdi.On("UpsertTransaction", mock.Anything, mock.Anything, false).Return(nil) - mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) - mdi.On("InsertOperation", mock.Anything, mock.Anything).Return(nil) - mbi.On("SubmitBatchPin", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) - mbp.On("SubmitPinnedBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) - - msgID := fftypes.NewUUID() - batch := &fftypes.Batch{ - SignerRef: fftypes.SignerRef{Author: "org1", Key: "0x12345"}, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: fftypes.NewUUID(), - }, - Messages: []*fftypes.Message{ - {Header: fftypes.MessageHeader{ - ID: msgID, - }}, + state := &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{Author: "wrong", Key: "wrong"}, }, }, - PayloadRef: "ipfs_id", + Pins: []*fftypes.Bytes32{fftypes.NewRandB32()}, } - err := bm.submitTXAndUpdateDB(context.Background(), batch, []*fftypes.Bytes32{fftypes.NewRandB32()}) - assert.NoError(t, err) + mdi := bm.database.(*databasemocks.Plugin) + mbp := bm.batchpin.(*batchpinmocks.Submitter) + mom := bm.operations.(*operationmocks.Manager) + mom.On("AddOrReuseOperation", mock.Anything, mock.Anything).Return(nil) + mbp.On("SubmitPinnedBatch", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(uploadBatchData) + return op.Type == fftypes.OpTypeSharedStorageUploadBatch && data.Batch.ID.Equals(state.Persisted.ID) + }), operations.RemainPendingOnFailure).Return(nil) - op := mdi.Calls[1].Arguments[1].(*fftypes.Operation) - assert.Equal(t, *batch.Payload.TX.ID, *op.Transaction) - assert.Equal(t, "ut_sharedstorage", op.Plugin) - assert.Equal(t, fftypes.OpTypeSharedStorageBatchBroadcast, op.Type) + err := bm.dispatchBatch(context.Background(), state) + assert.EqualError(t, err, "pop") + mdi.AssertExpectations(t) + mbp.AssertExpectations(t) + mom.AssertExpectations(t) } -func TestPublishBlobsUpdateDataFail(t *testing.T) { +func TestUploadBlobsPublishFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdx := bm.exchange.(*dataexchangemocks.Plugin) mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) - mim := bm.identity.(*identitymanagermocks.Manager) + mdi := bm.database.(*databasemocks.Plugin) + mom := bm.operations.(*operationmocks.Manager) - blobHash := fftypes.NewRandB32() + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + PayloadRef: "blob/1", + } dataID := fftypes.NewUUID() ctx := context.Background() - mdx.On("DownloadBLOB", ctx, "blob/1").Return(ioutil.NopCloser(bytes.NewReader([]byte(`some data`))), nil) - mps.On("PublishData", ctx, mock.MatchedBy(func(reader io.ReadCloser) bool { - b, err := ioutil.ReadAll(reader) - assert.NoError(t, err) - assert.Equal(t, "some data", string(b)) - return true - })).Return("payload-ref", nil) - mdi.On("UpdateData", ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - mim.On("ResolveInputIdentity", ctx, mock.Anything).Return(nil) - - err := bm.publishBlobs(ctx, []*fftypes.DataAndBlob{ + mdi.On("GetBlobMatchingHash", ctx, blob.Hash).Return(blob, nil) + mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(uploadBlobData) + return op.Type == fftypes.OpTypeSharedStorageUploadBlob && data.Blob == blob + })).Return(fmt.Errorf("pop")) + + err := bm.uploadBlobs(ctx, fftypes.NewUUID(), fftypes.DataArray{ { - Data: &fftypes.Data{ - ID: dataID, - Blob: &fftypes.BlobRef{ - Hash: blobHash, - }, - }, - Blob: &fftypes.Blob{ - Hash: blobHash, - PayloadRef: "blob/1", + ID: dataID, + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, }, }, }) assert.EqualError(t, err, "pop") mdi.AssertExpectations(t) + mdx.AssertExpectations(t) + mps.AssertExpectations(t) + } -func TestPublishBlobsPublishFail(t *testing.T) { +func TestUploadBlobsGetBlobFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() mdi := bm.database.(*databasemocks.Plugin) - mdx := bm.exchange.(*dataexchangemocks.Plugin) - mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) - mim := bm.identity.(*identitymanagermocks.Manager) - blobHash := fftypes.NewRandB32() + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + PayloadRef: "blob/1", + } dataID := fftypes.NewUUID() ctx := context.Background() - mdx.On("DownloadBLOB", ctx, "blob/1").Return(ioutil.NopCloser(bytes.NewReader([]byte(`some data`))), nil) - mps.On("PublishData", ctx, mock.MatchedBy(func(reader io.ReadCloser) bool { - b, err := ioutil.ReadAll(reader) - assert.NoError(t, err) - assert.Equal(t, "some data", string(b)) - return true - })).Return("", fmt.Errorf("pop")) - mim.On("ResolveInputIdentity", ctx, mock.Anything).Return(nil) - - err := bm.publishBlobs(ctx, []*fftypes.DataAndBlob{ + mdi.On("GetBlobMatchingHash", ctx, blob.Hash).Return(nil, fmt.Errorf("pop")) + + err := bm.uploadBlobs(ctx, fftypes.NewUUID(), fftypes.DataArray{ { - Data: &fftypes.Data{ - ID: dataID, - Blob: &fftypes.BlobRef{ - Hash: blobHash, - }, - }, - Blob: &fftypes.Blob{ - Hash: blobHash, - PayloadRef: "blob/1", + ID: dataID, + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, }, }, }) - assert.EqualError(t, err, "pop") + assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) + } -func TestPublishBlobsDownloadFail(t *testing.T) { +func TestUploadBlobsGetBlobNotFound(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() mdi := bm.database.(*databasemocks.Plugin) - mdx := bm.exchange.(*dataexchangemocks.Plugin) - mim := bm.identity.(*identitymanagermocks.Manager) - blobHash := fftypes.NewRandB32() + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + PayloadRef: "blob/1", + } dataID := fftypes.NewUUID() ctx := context.Background() - mdx.On("DownloadBLOB", ctx, "blob/1").Return(nil, fmt.Errorf("pop")) - mim.On("ResolveInputIdentity", ctx, mock.Anything).Return(nil) + mdi.On("GetBlobMatchingHash", ctx, blob.Hash).Return(nil, nil) - err := bm.publishBlobs(ctx, []*fftypes.DataAndBlob{ + err := bm.uploadBlobs(ctx, fftypes.NewUUID(), fftypes.DataArray{ { - Data: &fftypes.Data{ - ID: dataID, - Blob: &fftypes.BlobRef{ - Hash: blobHash, - }, - }, - Blob: &fftypes.Blob{ - Hash: blobHash, - PayloadRef: "blob/1", + ID: dataID, + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, }, }, }) - assert.Regexp(t, "FF10240", err) + assert.Regexp(t, "FF10239", err) mdi.AssertExpectations(t) + } diff --git a/internal/broadcast/message.go b/internal/broadcast/message.go index e26da5cb7a..eceb37a18c 100644 --- a/internal/broadcast/message.go +++ b/internal/broadcast/message.go @@ -19,10 +19,10 @@ package broadcast import ( "context" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/sysmessaging" - "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" ) @@ -30,7 +30,9 @@ func (bm *broadcastManager) NewBroadcast(ns string, in *fftypes.MessageInOut) sy broadcast := &broadcastSender{ mgr: bm, namespace: ns, - msg: in, + msg: &data.NewMessage{ + Message: in, + }, } broadcast.setDefaults() return broadcast @@ -52,7 +54,7 @@ func (bm *broadcastManager) BroadcastMessage(ctx context.Context, ns string, in type broadcastSender struct { mgr *broadcastManager namespace string - msg *fftypes.MessageInOut + msg *data.NewMessage resolved bool } @@ -82,92 +84,70 @@ func (s *broadcastSender) SendAndWait(ctx context.Context) error { } func (s *broadcastSender) setDefaults() { - s.msg.Header.ID = fftypes.NewUUID() - s.msg.Header.Namespace = s.namespace - s.msg.State = fftypes.MessageStateReady - if s.msg.Header.Type == "" { - s.msg.Header.Type = fftypes.MessageTypeBroadcast + msg := s.msg.Message + msg.Header.ID = fftypes.NewUUID() + msg.Header.Namespace = s.namespace + msg.State = fftypes.MessageStateReady + if msg.Header.Type == "" { + msg.Header.Type = fftypes.MessageTypeBroadcast } // We only have one transaction type for broadcast currently - s.msg.Header.TxType = fftypes.TransactionTypeBatchPin + msg.Header.TxType = fftypes.TransactionTypeBatchPin } func (s *broadcastSender) resolveAndSend(ctx context.Context, method sendMethod) error { - sent := false - - // We optimize the DB storage of all the parts of the message using transaction semantics (assuming those are supported by the DB plugin) - var dataToPublish []*fftypes.DataAndBlob - err := s.mgr.database.RunAsGroup(ctx, func(ctx context.Context) (err error) { - if !s.resolved { - if dataToPublish, err = s.resolve(ctx); err != nil { - return err - } - msgSizeEstimate := s.msg.EstimateSize(true) - if msgSizeEstimate > s.mgr.maxBatchPayloadLength { - return i18n.NewError(ctx, i18n.MsgTooLargeBroadcast, float64(msgSizeEstimate)/1024, float64(s.mgr.maxBatchPayloadLength)/1024) - } - s.resolved = true - } - - // For the simple case where we have no data to publish and aren't waiting for blockchain confirmation, - // insert the local message immediately within the same DB transaction. - // Otherwise, break out of the DB transaction (since those operations could take multiple seconds). - if len(dataToPublish) == 0 && method != methodSendAndWait { - sent = true - return s.sendInternal(ctx, method) - } - return nil - }) - - if err != nil || sent { - return err - } - // Perform deferred processing - if len(dataToPublish) > 0 { - if err := s.mgr.publishBlobs(ctx, dataToPublish); err != nil { + if !s.resolved { + if err := s.resolve(ctx); err != nil { return err } + msgSizeEstimate := s.msg.Message.EstimateSize(true) + if msgSizeEstimate > s.mgr.maxBatchPayloadLength { + return i18n.NewError(ctx, i18n.MsgTooLargeBroadcast, float64(msgSizeEstimate)/1024, float64(s.mgr.maxBatchPayloadLength)/1024) + } + s.resolved = true } return s.sendInternal(ctx, method) } -func (s *broadcastSender) resolve(ctx context.Context) ([]*fftypes.DataAndBlob, error) { +func (s *broadcastSender) resolve(ctx context.Context) error { + msg := s.msg.Message + // Resolve the sending identity - if s.msg.Header.Type != fftypes.MessageTypeDefinition || s.msg.Header.Tag != fftypes.SystemTagIdentityClaim { - if err := s.mgr.identity.ResolveInputSigningIdentity(ctx, s.msg.Header.Namespace, &s.msg.Header.SignerRef); err != nil { - return nil, i18n.WrapError(ctx, err, i18n.MsgAuthorInvalid) + if msg.Header.Type != fftypes.MessageTypeDefinition || msg.Header.Tag != fftypes.SystemTagIdentityClaim { + if err := s.mgr.identity.ResolveInputSigningIdentity(ctx, msg.Header.Namespace, &msg.Header.SignerRef); err != nil { + return i18n.WrapError(ctx, err, i18n.MsgAuthorInvalid) } } // The data manager is responsible for the heavy lifting of storing/validating all our in-line data elements - dataRefs, dataToPublish, err := s.mgr.data.ResolveInlineDataBroadcast(ctx, s.namespace, s.msg.InlineData) - s.msg.Message.Data = dataRefs - return dataToPublish, err + err := s.mgr.data.ResolveInlineData(ctx, s.msg) + return err } func (s *broadcastSender) sendInternal(ctx context.Context, method sendMethod) (err error) { if method == methodSendAndWait { - out, err := s.mgr.syncasync.WaitForMessage(ctx, s.namespace, s.msg.Header.ID, s.Send) + out, err := s.mgr.syncasync.WaitForMessage(ctx, s.namespace, s.msg.Message.Header.ID, s.Send) if out != nil { - s.msg.Message = *out + s.msg.Message.Message = *out } return err } // Seal the message - if err := s.msg.Seal(ctx); err != nil { + msg := s.msg.Message + if err := msg.Seal(ctx); err != nil { return err } if method == methodPrepare { return nil } - // Store the message - this asynchronously triggers the next step in process - if err := s.mgr.database.UpsertMessage(ctx, &s.msg.Message, database.UpsertOptimizationNew); err != nil { + // Write the message + if err := s.mgr.data.WriteNewMessage(ctx, s.msg); err != nil { return err } - log.L(ctx).Infof("Sent broadcast message %s:%s sequence=%d", s.msg.Header.Namespace, s.msg.Header.ID, s.msg.Sequence) + log.L(ctx).Infof("Sent broadcast message %s:%s sequence=%d datacount=%d", msg.Header.Namespace, msg.Header.ID, msg.Sequence, len(s.msg.AllData)) return err } diff --git a/internal/broadcast/message_test.go b/internal/broadcast/message_test.go index b0066517fb..949ca7a82c 100644 --- a/internal/broadcast/message_test.go +++ b/internal/broadcast/message_test.go @@ -17,21 +17,15 @@ package broadcast import ( - "bytes" "context" "fmt" - "io" - "io/ioutil" "testing" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/syncasync" - "github.com/hyperledger/firefly/mocks/databasemocks" - "github.com/hyperledger/firefly/mocks/dataexchangemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" - "github.com/hyperledger/firefly/mocks/sharedstoragemocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" - "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -40,20 +34,12 @@ import ( func TestBroadcastMessageOk(t *testing.T) { bm, cancel := newTestBroadcastWithMetrics(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) mim := bm.identity.(*identitymanagermocks.Manager) ctx := context.Background() - rag := mdi.On("RunAsGroup", ctx, mock.Anything) - rag.RunFn = func(a mock.Arguments) { - var fn = a[1].(func(context.Context) error) - rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} - } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, - }, []*fftypes.DataAndBlob{}, nil) - mdi.On("UpsertMessage", ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) + mdm.On("ResolveInlineData", ctx, mock.Anything).Return(nil) + mdm.On("WriteNewMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) msg, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ @@ -70,31 +56,21 @@ func TestBroadcastMessageOk(t *testing.T) { }, }, false) assert.NoError(t, err) - assert.NotNil(t, msg.Data[0].ID) - assert.NotNil(t, msg.Data[0].Hash) assert.Equal(t, "ns1", msg.Header.Namespace) - mdi.AssertExpectations(t) + mim.AssertExpectations(t) mdm.AssertExpectations(t) } func TestBroadcastMessageWaitConfirmOk(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) msa := bm.syncasync.(*syncasyncmocks.Bridge) mim := bm.identity.(*identitymanagermocks.Manager) ctx := context.Background() - rag := mdi.On("RunAsGroup", ctx, mock.Anything) - rag.RunFn = func(a mock.Arguments) { - var fn = a[1].(func(context.Context) error) - rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} - } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, - }, []*fftypes.DataAndBlob{}, nil) + mdm.On("ResolveInlineData", ctx, mock.Anything).Return(nil) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) replyMsg := &fftypes.Message{ @@ -109,7 +85,7 @@ func TestBroadcastMessageWaitConfirmOk(t *testing.T) { send(ctx) }). Return(replyMsg, nil) - mdi.On("UpsertMessage", ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) + mdm.On("WriteNewMessage", ctx, mock.Anything, mock.Anything).Return(nil) msg, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ Message: fftypes.Message{ @@ -128,76 +104,7 @@ func TestBroadcastMessageWaitConfirmOk(t *testing.T) { assert.Equal(t, replyMsg, msg) assert.Equal(t, "ns1", msg.Header.Namespace) - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) -} - -func TestBroadcastMessageWithBlobsOk(t *testing.T) { - bm, cancel := newTestBroadcast(t) - defer cancel() - mdi := bm.database.(*databasemocks.Plugin) - mdm := bm.data.(*datamocks.Manager) - mdx := bm.exchange.(*dataexchangemocks.Plugin) - mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) - mim := bm.identity.(*identitymanagermocks.Manager) - - blobHash := fftypes.NewRandB32() - dataID := fftypes.NewUUID() - - ctx := context.Background() - rag := mdi.On("RunAsGroup", ctx, mock.Anything) - rag.RunFn = func(a mock.Arguments) { - var fn = a[1].(func(context.Context) error) - rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} - } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: dataID, Hash: fftypes.NewRandB32()}, - }, []*fftypes.DataAndBlob{ - { - Data: &fftypes.Data{ - ID: dataID, - Blob: &fftypes.BlobRef{ - Hash: blobHash, - }, - }, - Blob: &fftypes.Blob{ - Hash: blobHash, - PayloadRef: "blob/1", - }, - }, - }, nil) - mdx.On("DownloadBLOB", ctx, "blob/1").Return(ioutil.NopCloser(bytes.NewReader([]byte(`some data`))), nil) - mps.On("PublishData", ctx, mock.MatchedBy(func(reader io.ReadCloser) bool { - b, err := ioutil.ReadAll(reader) - assert.NoError(t, err) - assert.Equal(t, "some data", string(b)) - return true - })).Return("payload-ref", nil) - mdi.On("UpdateData", ctx, mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertMessage", ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) - mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) - - msg, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ - Message: fftypes.Message{ - Header: fftypes.MessageHeader{ - SignerRef: fftypes.SignerRef{ - Author: "did:firefly:org/abcd", - Key: "0x12345", - }, - }, - }, - InlineData: fftypes.InlineData{ - {Blob: &fftypes.BlobRef{ - Hash: blobHash, - }}, - }, - }, false) - assert.NoError(t, err) - assert.NotNil(t, msg.Data[0].ID) - assert.NotNil(t, msg.Data[0].Hash) - assert.Equal(t, "ns1", msg.Header.Namespace) - - mdi.AssertExpectations(t) + msa.AssertExpectations(t) mdm.AssertExpectations(t) } @@ -205,19 +112,18 @@ func TestBroadcastMessageTooLarge(t *testing.T) { bm, cancel := newTestBroadcast(t) bm.maxBatchPayloadLength = 1000000 defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) mim := bm.identity.(*identitymanagermocks.Manager) ctx := context.Background() - rag := mdi.On("RunAsGroup", ctx, mock.Anything) - rag.RunFn = func(a mock.Arguments) { - var fn = a[1].(func(context.Context) error) - rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} - } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), ValueSize: 1000001}, - }, []*fftypes.DataAndBlob{}, nil) + mdm.On("ResolveInlineData", ctx, mock.Anything).Run( + func(args mock.Arguments) { + newMsg := args[1].(*data.NewMessage) + newMsg.Message.Data = fftypes.DataRefs{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), ValueSize: 1000001}, + } + }). + Return(nil) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) _, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ @@ -235,24 +141,17 @@ func TestBroadcastMessageTooLarge(t *testing.T) { }, true) assert.Regexp(t, "FF10327", err) - mdi.AssertExpectations(t) mdm.AssertExpectations(t) } func TestBroadcastMessageBadInput(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) mim := bm.identity.(*identitymanagermocks.Manager) ctx := context.Background() - rag := mdi.On("RunAsGroup", ctx, mock.Anything) - rag.RunFn = func(a mock.Arguments) { - var fn = a[1].(func(context.Context) error) - rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} - } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + mdm.On("ResolveInlineData", ctx, mock.Anything).Return(fmt.Errorf("pop")) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) _, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ @@ -262,7 +161,6 @@ func TestBroadcastMessageBadInput(t *testing.T) { }, false) assert.EqualError(t, err, "pop") - mdi.AssertExpectations(t) mdm.AssertExpectations(t) } @@ -284,81 +182,14 @@ func TestBroadcastMessageBadIdentity(t *testing.T) { mim.AssertExpectations(t) } -func TestPublishBlobsSendMessageFail(t *testing.T) { - bm, cancel := newTestBroadcast(t) - defer cancel() - mdi := bm.database.(*databasemocks.Plugin) - mdm := bm.data.(*datamocks.Manager) - mdx := bm.exchange.(*dataexchangemocks.Plugin) - mim := bm.identity.(*identitymanagermocks.Manager) - - blobHash := fftypes.NewRandB32() - dataID := fftypes.NewUUID() - - ctx := context.Background() - rag := mdi.On("RunAsGroup", ctx, mock.Anything) - rag.RunFn = func(a mock.Arguments) { - var fn = a[1].(func(context.Context) error) - rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} - } - mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: dataID, Hash: fftypes.NewRandB32()}, - }, []*fftypes.DataAndBlob{ - { - Data: &fftypes.Data{ - ID: dataID, - Blob: &fftypes.BlobRef{ - Hash: blobHash, - }, - }, - Blob: &fftypes.Blob{ - Hash: blobHash, - PayloadRef: "blob/1", - }, - }, - }, nil) - mdx.On("DownloadBLOB", ctx, "blob/1").Return(nil, fmt.Errorf("pop")) - - _, err := bm.BroadcastMessage(ctx, "ns1", &fftypes.MessageInOut{ - Message: fftypes.Message{ - Header: fftypes.MessageHeader{ - SignerRef: fftypes.SignerRef{ - Author: "did:firefly:org/abcd", - Key: "0x12345", - }, - }, - }, - InlineData: fftypes.InlineData{ - {Blob: &fftypes.BlobRef{ - Hash: blobHash, - }}, - }, - }, false) - assert.Regexp(t, "FF10240", err) - - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) - mdx.AssertExpectations(t) - mim.AssertExpectations(t) -} - func TestBroadcastPrepare(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) mim := bm.identity.(*identitymanagermocks.Manager) ctx := context.Background() - rag := mdi.On("RunAsGroup", ctx, mock.Anything) - rag.RunFn = func(a mock.Arguments) { - var fn = a[1].(func(context.Context) error) - rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} - } - mdm.On("ResolveInlineDataBroadcast", ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, - }, []*fftypes.DataAndBlob{}, nil) + mdm.On("ResolveInlineData", ctx, mock.Anything).Return(nil) mim.On("ResolveInputSigningIdentity", ctx, "ns1", mock.Anything).Return(nil) msg := &fftypes.MessageInOut{ @@ -378,10 +209,7 @@ func TestBroadcastPrepare(t *testing.T) { err := sender.Prepare(ctx) assert.NoError(t, err) - assert.NotNil(t, msg.Data[0].ID) - assert.NotNil(t, msg.Data[0].Hash) assert.Equal(t, "ns1", msg.Header.Namespace) - mdi.AssertExpectations(t) mdm.AssertExpectations(t) } diff --git a/internal/broadcast/namespace_test.go b/internal/broadcast/namespace_test.go index bcf5066b84..f3956db85a 100644 --- a/internal/broadcast/namespace_test.go +++ b/internal/broadcast/namespace_test.go @@ -24,7 +24,6 @@ import ( "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" - "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -69,9 +68,9 @@ func TestBroadcastNamespaceBroadcastOk(t *testing.T) { mim.On("ResolveInputSigningIdentity", mock.Anything, fftypes.SystemNamespace, mock.Anything).Return(nil) mdi.On("GetNamespace", mock.Anything, mock.Anything).Return(&fftypes.Namespace{Name: "ns1"}, nil) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) mdm.On("CheckDatatype", mock.Anything, "ns1", mock.Anything).Return(nil) - mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() + mdm.On("WriteNewMessage", mock.Anything, mock.Anything).Return(nil) buff := strings.Builder{} buff.Grow(4097) for i := 0; i < 4097; i++ { diff --git a/internal/broadcast/operations.go b/internal/broadcast/operations.go new file mode 100644 index 0000000000..6d0ba25d6b --- /dev/null +++ b/internal/broadcast/operations.go @@ -0,0 +1,187 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package broadcast + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +type uploadBatchData struct { + BatchPersisted *fftypes.BatchPersisted `json:"batchPersisted"` + Batch *fftypes.Batch `json:"batch"` +} + +type uploadBlobData struct { + Data *fftypes.Data `json:"data"` + Blob *fftypes.Blob `json:"batch"` +} + +func addUploadBatchInputs(op *fftypes.Operation, batchID *fftypes.UUID) { + op.Input = fftypes.JSONObject{ + "id": batchID.String(), + } +} + +func getUploadBatchOutputs(payloadRef string) fftypes.JSONObject { + return fftypes.JSONObject{ + "payloadRef": payloadRef, + } +} + +func addUploadBlobInputs(op *fftypes.Operation, dataID *fftypes.UUID) { + op.Input = fftypes.JSONObject{ + "dataId": dataID.String(), + } +} + +func getUploadBlobOutputs(payloadRef string) fftypes.JSONObject { + return fftypes.JSONObject{ + "payloadRef": payloadRef, + } +} + +func retrieveUploadBatchInputs(ctx context.Context, op *fftypes.Operation) (*fftypes.UUID, error) { + return fftypes.ParseUUID(ctx, op.Input.GetString("id")) +} + +func retrieveUploadBlobInputs(ctx context.Context, op *fftypes.Operation) (*fftypes.UUID, error) { + return fftypes.ParseUUID(ctx, op.Input.GetString("dataId")) +} + +func (bm *broadcastManager) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + switch op.Type { + case fftypes.OpTypeSharedStorageUploadBatch: + id, err := retrieveUploadBatchInputs(ctx, op) + if err != nil { + return nil, err + } + bp, err := bm.database.GetBatchByID(ctx, id) + if err != nil { + return nil, err + } else if bp == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + batch, err := bm.data.HydrateBatch(ctx, bp) + if err != nil { + return nil, err + } + return opUploadBatch(op, batch, bp), nil + + case fftypes.OpTypeSharedStorageUploadBlob: + dataID, err := retrieveUploadBlobInputs(ctx, op) + if err != nil { + return nil, err + } + d, err := bm.database.GetDataByID(ctx, dataID, false) + if err != nil { + return nil, err + } else if d == nil || d.Blob == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + blob, err := bm.database.GetBlobMatchingHash(ctx, d.Blob.Hash) + if err != nil { + return nil, err + } else if blob == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + return opUploadBlob(op, d, blob), nil + + default: + return nil, i18n.NewError(ctx, i18n.MsgOperationNotSupported, op.Type) + } +} + +func (bm *broadcastManager) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { + switch data := op.Data.(type) { + case uploadBatchData: + return bm.uploadBatch(ctx, data) + case uploadBlobData: + return bm.uploadBlob(ctx, data) + default: + return nil, false, i18n.NewError(ctx, i18n.MsgOperationDataIncorrect, op.Data) + } +} + +// uploadBatch uploads the serialized batch to public storage +func (bm *broadcastManager) uploadBatch(ctx context.Context, data uploadBatchData) (outputs fftypes.JSONObject, complete bool, err error) { + // Serialize the full payload, which has already been sealed for us by the BatchManager + payload, err := json.Marshal(data.Batch) + if err != nil { + return nil, false, i18n.WrapError(ctx, err, i18n.MsgSerializationFailed) + } + + // Write it to IPFS to get a payload reference + payloadRef, err := bm.sharedstorage.UploadData(ctx, bytes.NewReader(payload)) + if err != nil { + return nil, false, err + } + log.L(ctx).Infof("Published batch '%s' to shared storage: '%s'", data.Batch.ID, payloadRef) + + // Update the batch to store the payloadRef + data.BatchPersisted.PayloadRef = payloadRef + update := database.BatchQueryFactory.NewUpdate(ctx).Set("payloadref", payloadRef) + return getUploadBatchOutputs(payloadRef), true, bm.database.UpdateBatch(ctx, data.Batch.ID, update) +} + +// uploadBlob streams a blob from the local data exchange, to public storage +func (bm *broadcastManager) uploadBlob(ctx context.Context, data uploadBlobData) (outputs fftypes.JSONObject, complete bool, err error) { + + // Stream from the local data exchange ... + reader, err := bm.exchange.DownloadBLOB(ctx, data.Blob.PayloadRef) + if err != nil { + return nil, false, i18n.WrapError(ctx, err, i18n.MsgDownloadBlobFailed, data.Blob.PayloadRef) + } + defer reader.Close() + + // ... to the shared storage + data.Data.Blob.Public, err = bm.sharedstorage.UploadData(ctx, reader) + if err != nil { + return nil, false, err + } + + // Update the data in the DB + err = bm.database.UpdateData(ctx, data.Data.ID, database.DataQueryFactory.NewUpdate(ctx).Set("blob.public", data.Data.Blob.Public)) + if err != nil { + return nil, false, err + } + + log.L(ctx).Infof("Published blob with hash '%s' for data '%s' to shared storage: '%s'", data.Data.Blob.Hash, data.Data.ID, data.Data.Blob.Public) + return getUploadBlobOutputs(data.Data.Blob.Public), true, nil +} + +func opUploadBatch(op *fftypes.Operation, batch *fftypes.Batch, batchPersisted *fftypes.BatchPersisted) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: uploadBatchData{Batch: batch, BatchPersisted: batchPersisted}, + } +} + +func opUploadBlob(op *fftypes.Operation, data *fftypes.Data, blob *fftypes.Blob) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: uploadBlobData{Data: data, Blob: blob}, + } +} diff --git a/internal/broadcast/operations_test.go b/internal/broadcast/operations_test.go new file mode 100644 index 0000000000..91d14f33a1 --- /dev/null +++ b/internal/broadcast/operations_test.go @@ -0,0 +1,516 @@ +// Copyright © 2021 Kaleido, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in comdiliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or imdilied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package broadcast + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + "testing" + + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/dataexchangemocks" + "github.com/hyperledger/firefly/mocks/datamocks" + "github.com/hyperledger/firefly/mocks/sharedstoragemocks" + "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestPrepareAndRunBatchBroadcast(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBatch, + } + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } + batch := &fftypes.Batch{ + BatchHeader: bp.BatchHeader, + } + addUploadBatchInputs(op, bp.ID) + + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mdi := bm.database.(*databasemocks.Plugin) + mdm := bm.data.(*datamocks.Manager) + mdm.On("HydrateBatch", context.Background(), bp).Return(batch, nil) + mdi.On("GetBatchByID", context.Background(), bp.ID).Return(bp, nil) + mps.On("UploadData", context.Background(), mock.Anything).Return("123", nil) + mdi.On("UpdateBatch", context.Background(), bp.ID, mock.MatchedBy(func(update database.Update) bool { + info, _ := update.Finalize() + assert.Equal(t, 1, len(info.SetOperations)) + assert.Equal(t, "payloadref", info.SetOperations[0].Field) + val, _ := info.SetOperations[0].Value.Value() + assert.Equal(t, "123", val) + return true + })).Return(nil) + + po, err := bm.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, bp.ID, po.Data.(uploadBatchData).Batch.ID) + + _, complete, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch, bp)) + + assert.True(t, complete) + assert.NoError(t, err) + assert.Equal(t, "123", bp.PayloadRef) + + mps.AssertExpectations(t) + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestPrepareAndRunBatchBroadcastHydrateFail(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBatch, + } + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } + addUploadBatchInputs(op, bp.ID) + + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mdi := bm.database.(*databasemocks.Plugin) + mdm := bm.data.(*datamocks.Manager) + mdm.On("HydrateBatch", context.Background(), bp).Return(nil, fmt.Errorf("pop")) + mdi.On("GetBatchByID", context.Background(), bp.ID).Return(bp, nil) + + _, err := bm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "pop", err) + + mps.AssertExpectations(t) + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestPrepareOperationNotSupported(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + _, err := bm.PrepareOperation(context.Background(), &fftypes.Operation{}) + + assert.Regexp(t, "FF10371", err) +} + +func TestPrepareOperationBatchBroadcastBadInput(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBatch, + Input: fftypes.JSONObject{"id": "bad"}, + } + + _, err := bm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10142", err) +} + +func TestPrepareOperationBatchBroadcastError(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + batchID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBatch, + Input: fftypes.JSONObject{"id": batchID.String()}, + } + + mdi := bm.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", context.Background(), batchID).Return(nil, fmt.Errorf("pop")) + + _, err := bm.PrepareOperation(context.Background(), op) + assert.EqualError(t, err, "pop") +} + +func TestPrepareOperationBatchBroadcastNotFound(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + batchID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBatch, + Input: fftypes.JSONObject{"id": batchID.String()}, + } + + mdi := bm.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", context.Background(), batchID).Return(nil, nil) + + _, err := bm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) +} + +func TestRunOperationNotSupported(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + _, complete, err := bm.RunOperation(context.Background(), &fftypes.PreparedOperation{}) + + assert.False(t, complete) + assert.Regexp(t, "FF10378", err) +} + +func TestRunOperationBatchBroadcastInvalidData(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{} + batch := &fftypes.Batch{ + Payload: fftypes.BatchPayload{ + Data: fftypes.DataArray{ + {Value: fftypes.JSONAnyPtr(`!json`)}, + }, + }, + } + + _, complete, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch, &fftypes.BatchPersisted{})) + + assert.False(t, complete) + assert.Regexp(t, "FF10137", err) +} + +func TestRunOperationBatchBroadcastPublishFail(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{} + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } + + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mps.On("UploadData", context.Background(), mock.Anything).Return("", fmt.Errorf("pop")) + + _, complete, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch, &fftypes.BatchPersisted{})) + + assert.False(t, complete) + assert.EqualError(t, err, "pop") + + mps.AssertExpectations(t) +} + +func TestRunOperationBatchBroadcast(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{} + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } + + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mdi := bm.database.(*databasemocks.Plugin) + mps.On("UploadData", context.Background(), mock.Anything).Return("123", nil) + mdi.On("UpdateBatch", context.Background(), batch.ID, mock.MatchedBy(func(update database.Update) bool { + info, _ := update.Finalize() + assert.Equal(t, 1, len(info.SetOperations)) + assert.Equal(t, "payloadref", info.SetOperations[0].Field) + val, _ := info.SetOperations[0].Value.Value() + assert.Equal(t, "123", val) + return true + })).Return(nil) + + bp := &fftypes.BatchPersisted{} + outputs, complete, err := bm.RunOperation(context.Background(), opUploadBatch(op, batch, bp)) + assert.Equal(t, "123", outputs["payloadRef"]) + + assert.True(t, complete) + assert.NoError(t, err) + assert.Equal(t, "123", bp.PayloadRef) + + mps.AssertExpectations(t) + mdi.AssertExpectations(t) +} + +func TestPrepareAndRunUploadBlob(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBlob, + } + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + } + data := &fftypes.Data{ + ID: fftypes.NewUUID(), + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + }, + } + addUploadBlobInputs(op, data.ID) + + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mdx := bm.exchange.(*dataexchangemocks.Plugin) + mdi := bm.database.(*databasemocks.Plugin) + + reader := ioutil.NopCloser(strings.NewReader("some data")) + mdi.On("GetDataByID", mock.Anything, data.ID, false).Return(data, nil) + mdi.On("GetBlobMatchingHash", mock.Anything, blob.Hash).Return(blob, nil) + mps.On("UploadData", context.Background(), mock.Anything).Return("123", nil) + mdx.On("DownloadBLOB", context.Background(), mock.Anything).Return(reader, nil) + mdi.On("UpdateData", context.Background(), data.ID, mock.MatchedBy(func(update database.Update) bool { + info, _ := update.Finalize() + assert.Equal(t, 1, len(info.SetOperations)) + assert.Equal(t, "blob.public", info.SetOperations[0].Field) + val, _ := info.SetOperations[0].Value.Value() + assert.Equal(t, "123", val) + return true + })).Return(nil) + + po, err := bm.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, data, po.Data.(uploadBlobData).Data) + assert.Equal(t, blob, po.Data.(uploadBlobData).Blob) + + outputs, complete, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) + assert.Equal(t, "123", outputs["payloadRef"]) + + assert.True(t, complete) + assert.NoError(t, err) + + mps.AssertExpectations(t) + mdx.AssertExpectations(t) + mdi.AssertExpectations(t) + +} + +func TestPrepareUploadBlobGetBlobMissing(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBlob, + } + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + } + data := &fftypes.Data{ + ID: fftypes.NewUUID(), + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + }, + } + addUploadBlobInputs(op, data.ID) + + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mdx := bm.exchange.(*dataexchangemocks.Plugin) + mdi := bm.database.(*databasemocks.Plugin) + + mdi.On("GetDataByID", mock.Anything, data.ID, false).Return(data, nil) + mdi.On("GetBlobMatchingHash", mock.Anything, blob.Hash).Return(nil, nil) + + _, err := bm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) + + mps.AssertExpectations(t) + mdx.AssertExpectations(t) + mdi.AssertExpectations(t) + +} + +func TestPrepareUploadBlobGetBlobFailg(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBlob, + } + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + } + data := &fftypes.Data{ + ID: fftypes.NewUUID(), + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + }, + } + addUploadBlobInputs(op, data.ID) + + mdi := bm.database.(*databasemocks.Plugin) + + mdi.On("GetDataByID", mock.Anything, data.ID, false).Return(data, nil) + mdi.On("GetBlobMatchingHash", mock.Anything, blob.Hash).Return(nil, fmt.Errorf("pop")) + + _, err := bm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) + +} + +func TestPrepareUploadBlobGetDataMissing(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBlob, + } + dataID := fftypes.NewUUID() + addUploadBlobInputs(op, dataID) + + mdi := bm.database.(*databasemocks.Plugin) + + mdi.On("GetDataByID", mock.Anything, dataID, false).Return(nil, nil) + + _, err := bm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) + +} + +func TestPrepareUploadBlobGetDataFail(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBlob, + } + dataID := fftypes.NewUUID() + addUploadBlobInputs(op, dataID) + + mdi := bm.database.(*databasemocks.Plugin) + + mdi.On("GetDataByID", mock.Anything, dataID, false).Return(nil, fmt.Errorf("pop")) + + _, err := bm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) + +} + +func TestPrepareUploadBlobGetDataBadID(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeSharedStorageUploadBlob, + } + + _, err := bm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10142", err) + +} + +func TestRunOperationUploadBlobUpdateFail(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{} + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + } + data := &fftypes.Data{ + ID: fftypes.NewUUID(), + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + }, + } + + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mdx := bm.exchange.(*dataexchangemocks.Plugin) + mdi := bm.database.(*databasemocks.Plugin) + + reader := ioutil.NopCloser(strings.NewReader("some data")) + mdx.On("DownloadBLOB", context.Background(), mock.Anything).Return(reader, nil) + mps.On("UploadData", context.Background(), mock.Anything).Return("123", nil) + mdi.On("UpdateData", context.Background(), data.ID, mock.Anything).Return(fmt.Errorf("pop")) + + _, complete, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) + + assert.False(t, complete) + assert.Regexp(t, "pop", err) + + mps.AssertExpectations(t) + mdx.AssertExpectations(t) + mdi.AssertExpectations(t) +} + +func TestRunOperationUploadBlobUploadFail(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{} + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + } + data := &fftypes.Data{ + ID: fftypes.NewUUID(), + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + }, + } + + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mdx := bm.exchange.(*dataexchangemocks.Plugin) + + reader := ioutil.NopCloser(strings.NewReader("some data")) + mdx.On("DownloadBLOB", context.Background(), mock.Anything).Return(reader, nil) + mps.On("UploadData", context.Background(), mock.Anything).Return("", fmt.Errorf("pop")) + + _, complete, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) + + assert.False(t, complete) + assert.Regexp(t, "pop", err) + + mps.AssertExpectations(t) + mdx.AssertExpectations(t) +} + +func TestRunOperationUploadBlobDownloadFail(t *testing.T) { + bm, cancel := newTestBroadcast(t) + defer cancel() + + op := &fftypes.Operation{} + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + } + data := &fftypes.Data{ + ID: fftypes.NewUUID(), + Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + }, + } + + mps := bm.sharedstorage.(*sharedstoragemocks.Plugin) + mdx := bm.exchange.(*dataexchangemocks.Plugin) + + reader := ioutil.NopCloser(strings.NewReader("some data")) + mdx.On("DownloadBLOB", context.Background(), mock.Anything).Return(reader, fmt.Errorf("pop")) + + _, complete, err := bm.RunOperation(context.Background(), opUploadBlob(op, data, blob)) + + assert.False(t, complete) + assert.Regexp(t, "pop", err) + + mps.AssertExpectations(t) + mdx.AssertExpectations(t) +} diff --git a/internal/broadcast/tokenpool_test.go b/internal/broadcast/tokenpool_test.go index 1b81ef3f88..0cb4316951 100644 --- a/internal/broadcast/tokenpool_test.go +++ b/internal/broadcast/tokenpool_test.go @@ -24,7 +24,6 @@ import ( "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" - "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -81,7 +80,6 @@ func TestBroadcastTokenPoolInvalid(t *testing.T) { func TestBroadcastTokenPoolBroadcastFail(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) mim := bm.identity.(*identitymanagermocks.Manager) @@ -98,13 +96,11 @@ func TestBroadcastTokenPoolBroadcastFail(t *testing.T) { mim.On("ResolveInputSigningIdentity", mock.Anything, "ns1", mock.Anything).Return(nil) mdm.On("VerifyNamespaceExists", mock.Anything, "ns1").Return(nil) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) - mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) + mdm.On("WriteNewMessage", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) _, err := bm.BroadcastTokenPool(context.Background(), "ns1", pool, false) assert.EqualError(t, err, "pop") - mdi.AssertExpectations(t) mdm.AssertExpectations(t) mim.AssertExpectations(t) } @@ -112,7 +108,6 @@ func TestBroadcastTokenPoolBroadcastFail(t *testing.T) { func TestBroadcastTokenPoolOk(t *testing.T) { bm, cancel := newTestBroadcast(t) defer cancel() - mdi := bm.database.(*databasemocks.Plugin) mdm := bm.data.(*datamocks.Manager) mim := bm.identity.(*identitymanagermocks.Manager) @@ -129,13 +124,11 @@ func TestBroadcastTokenPoolOk(t *testing.T) { mim.On("ResolveInputSigningIdentity", mock.Anything, "ns1", mock.Anything).Return(nil) mdm.On("VerifyNamespaceExists", mock.Anything, "ns1").Return(nil) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) - mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(nil) + mdm.On("WriteNewMessage", mock.Anything, mock.Anything).Return(nil) _, err := bm.BroadcastTokenPool(context.Background(), "ns1", pool, false) assert.NoError(t, err) - mdi.AssertExpectations(t) mdm.AssertExpectations(t) mim.AssertExpectations(t) } diff --git a/internal/config/config.go b/internal/config/config.go index 03161b4273..adfd598a78 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -54,6 +54,10 @@ var ( APIRequestMaxTimeout = rootKey("api.requestMaxTimeout") // APIShutdownTimeout is the amount of time to wait for any in-flight requests to finish before killing the HTTP server APIShutdownTimeout = rootKey("api.shutdownTimeout") + // BatchCacheSize + BatchCacheSize = rootKey("batch.cache.size") + // BatchCacheSize + BatchCacheTTL = rootKey("batch.cache.ttl") // BatchManagerReadPageSize is the size of each page of messages read from the database into memory when assembling batches BatchManagerReadPageSize = rootKey("batch.manager.readPageSize") // BatchManagerReadPollTimeout is how long without any notifications of new messages to wait, before doing a page query @@ -74,6 +78,18 @@ var ( BroadcastBatchPayloadLimit = rootKey("broadcast.batch.payloadLimit") // BroadcastBatchTimeout is the timeout to wait for a batch to fill, before sending BroadcastBatchTimeout = rootKey("broadcast.batch.timeout") + // DownloadWorkerCount is the number of download workers created to pull data from shared storage to the local DX + DownloadWorkerCount = rootKey("download.worker.count") + // DownloadWorkerQueueLength is the length of the work queue in the channel to the workers - defaults to 2x the worker count + DownloadWorkerQueueLength = rootKey("download.worker.queueLength") + // DownloadRetryMaxAttempts is the maximum number of automatic attempts to make for each shared storage download before failing the operation + DownloadRetryMaxAttempts = rootKey("download.retry.maxAttempts") + // DownloadRetryInitDelay is the initial retry delay + DownloadRetryInitDelay = rootKey("download.retry.initialDelay") + // DownloadRetryMaxDelay is the maximum retry delay + DownloadRetryMaxDelay = rootKey("download.retry.maxDelay") + // DownloadRetryFactor is the backoff factor to use for retries + DownloadRetryFactor = rootKey("download.retry.factor") // PrivateMessagingBatchAgentTimeout how long to keep around a batching agent for a sending identity before disposal PrivateMessagingBatchAgentTimeout = rootKey("privatemessaging.batch.agentTimeout") // PrivateMessagingBatchSize is the maximum size of a batch for broadcast messages @@ -148,6 +164,10 @@ var ( EventDispatcherRetryMaxDelay = rootKey("event.dispatcher.retry.maxDelay") // EventDBEventsBufferSize the size of the buffer of change events EventDBEventsBufferSize = rootKey("event.dbevents.bufferSize") + // EventListenerTopicCacheSize cache size for blockchain listeners addresses + EventListenerTopicCacheSize = rootKey("event.listenerToipc.cache.size") + // EventListenerTopicCacheTTL cache time-to-live for private group addresses + EventListenerTopicCacheTTL = rootKey("event.listenerToipc.cache.ttl") // GroupCacheSize cache size for private group addresses GroupCacheSize = rootKey("group.cache.size") // GroupCacheTTL cache time-to-live for private group addresses @@ -184,6 +204,16 @@ var ( LogMaxAge = rootKey("log.maxAge") // LogCompress sets whether to compress backups LogCompress = rootKey("log.compress") + // MessageCacheSize + MessageCacheSize = rootKey("message.cache.size") + // MessageCacheTTL + MessageCacheTTL = rootKey("message.cache.ttl") + // MessageWriterCount + MessageWriterCount = rootKey("message.writer.count") + // MessageWriterBatchTimeout + MessageWriterBatchTimeout = rootKey("message.writer.batchTimeout") + // MessageWriterBatchMaxInserts + MessageWriterBatchMaxInserts = rootKey("message.writer.batchMaxInserts") // MetricsEnabled determines whether metrics will be instrumented and if the metrics server will be enabled or not MetricsEnabled = rootKey("metrics.enabled") // MetricsPath determines what path to serve the Prometheus metrics from @@ -220,12 +250,10 @@ var ( SubscriptionsRetryMaxDelay = rootKey("subscription.retry.maxDelay") // SubscriptionsRetryFactor the backoff factor to use for retry of database operations SubscriptionsRetryFactor = rootKey("subscription.retry.factor") - // AssetManagerRetryInitialDelay is the initial retry delay - AssetManagerRetryInitialDelay = rootKey("asset.manager.retry.initDelay") - // AssetManagerRetryMaxDelay is the initial retry delay - AssetManagerRetryMaxDelay = rootKey("asset.manager.retry.maxDelay") - // AssetManagerRetryFactor the backoff factor to use for retry of database operations - AssetManagerRetryFactor = rootKey("asset.manager.retry.factor") + // TransactionCacheSize + TransactionCacheSize = rootKey("transaction.cache.size") + // TransactionCacheTTL + TransactionCacheTTL = rootKey("transaction.cache.ttl") // AssetManagerKeyNormalization mechanism to normalize keys before using them. Valid options: "blockchain_plugin" - use blockchain plugin (default), "none" - do not attempt normalization AssetManagerKeyNormalization = rootKey("asset.manager.keyNormalization") // UIEnabled set to false to disable the UI (default is true, so UI will be enabled if ui.path is valid) @@ -295,6 +323,8 @@ func Reset() { viper.SetDefault(string(APIRequestTimeout), "120s") viper.SetDefault(string(APIShutdownTimeout), "10s") viper.SetDefault(string(AssetManagerKeyNormalization), "blockchain_plugin") + viper.SetDefault(string(BatchCacheSize), "1Mb") + viper.SetDefault(string(BatchCacheTTL), "5m") viper.SetDefault(string(BatchManagerReadPageSize), 100) viper.SetDefault(string(BatchManagerReadPollTimeout), "30s") viper.SetDefault(string(BatchRetryFactor), 2.0) @@ -315,8 +345,13 @@ func Reset() { viper.SetDefault(string(CorsMaxAge), 600) viper.SetDefault(string(DataexchangeType), "https") viper.SetDefault(string(DebugPort), -1) + viper.SetDefault(string(DownloadWorkerCount), 10) + viper.SetDefault(string(DownloadRetryMaxAttempts), 100) + viper.SetDefault(string(DownloadRetryInitDelay), "100ms") + viper.SetDefault(string(DownloadRetryMaxDelay), "1m") + viper.SetDefault(string(DownloadRetryFactor), 2.0) viper.SetDefault(string(EventAggregatorFirstEvent), fftypes.SubOptsFirstEventOldest) - viper.SetDefault(string(EventAggregatorBatchSize), 50) + viper.SetDefault(string(EventAggregatorBatchSize), 200) viper.SetDefault(string(EventAggregatorBatchTimeout), "250ms") viper.SetDefault(string(EventAggregatorPollTimeout), "30s") viper.SetDefault(string(EventAggregatorRetryFactor), 2.0) @@ -325,10 +360,12 @@ func Reset() { viper.SetDefault(string(EventAggregatorOpCorrelationRetries), 3) viper.SetDefault(string(EventDBEventsBufferSize), 100) viper.SetDefault(string(EventDispatcherBufferLength), 5) - viper.SetDefault(string(EventDispatcherBatchTimeout), "0") + viper.SetDefault(string(EventDispatcherBatchTimeout), "250ms") viper.SetDefault(string(EventDispatcherPollTimeout), "30s") viper.SetDefault(string(EventTransportsEnabled), []string{"websockets", "webhooks"}) viper.SetDefault(string(EventTransportsDefault), "websockets") + viper.SetDefault(string(EventListenerTopicCacheSize), "100Kb") + viper.SetDefault(string(EventListenerTopicCacheTTL), "5m") viper.SetDefault(string(GroupCacheSize), "1Mb") viper.SetDefault(string(GroupCacheTTL), "1h") viper.SetDefault(string(AdminEnabled), false) @@ -340,6 +377,11 @@ func Reset() { viper.SetDefault(string(LogFilesize), "100m") viper.SetDefault(string(LogMaxAge), "24h") viper.SetDefault(string(LogMaxBackups), 2) + viper.SetDefault(string(MessageCacheSize), "50Mb") + viper.SetDefault(string(MessageCacheTTL), "5m") + viper.SetDefault(string(MessageWriterBatchMaxInserts), 200) + viper.SetDefault(string(MessageWriterBatchTimeout), "10ms") + viper.SetDefault(string(MessageWriterCount), 5) viper.SetDefault(string(NamespacesDefault), "default") viper.SetDefault(string(NamespacesPredefined), fftypes.JSONObjectArray{{"name": "default", "description": "Default predefined namespace"}}) viper.SetDefault(string(OrchestratorStartupAttempts), 5) @@ -356,9 +398,8 @@ func Reset() { viper.SetDefault(string(SubscriptionsRetryInitialDelay), "250ms") viper.SetDefault(string(SubscriptionsRetryMaxDelay), "30s") viper.SetDefault(string(SubscriptionsRetryFactor), 2.0) - viper.SetDefault(string(AssetManagerRetryInitialDelay), "250ms") - viper.SetDefault(string(AssetManagerRetryMaxDelay), "30s") - viper.SetDefault(string(AssetManagerRetryFactor), 2.0) + viper.SetDefault(string(TransactionCacheSize), "1Mb") + viper.SetDefault(string(TransactionCacheTTL), "5m") viper.SetDefault(string(UIEnabled), true) viper.SetDefault(string(ValidatorCacheSize), "1Mb") viper.SetDefault(string(ValidatorCacheTTL), "1h") diff --git a/internal/contracts/manager.go b/internal/contracts/manager.go index 5710d9761d..729555659a 100644 --- a/internal/contracts/manager.go +++ b/internal/contracts/manager.go @@ -24,6 +24,7 @@ import ( "github.com/hyperledger/firefly/internal/broadcast" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/identity" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/database" @@ -32,6 +33,8 @@ import ( ) type Manager interface { + fftypes.Named + BroadcastFFI(ctx context.Context, ns string, ffi *fftypes.FFI, waitConfirm bool) (output *fftypes.FFI, err error) GetFFI(ctx context.Context, ns, name, version string) (*fftypes.FFI, error) GetFFIByID(ctx context.Context, id *fftypes.UUID) (*fftypes.FFI, error) @@ -51,6 +54,10 @@ type Manager interface { GetContractListeners(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.ContractListener, *database.FilterResult, error) DeleteContractListenerByNameOrID(ctx context.Context, ns, nameOrID string) error GenerateFFI(ctx context.Context, ns string, generationRequest *fftypes.FFIGenerationRequest) (*fftypes.FFI, error) + + // From operations.OperationHandler + PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) + RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) } type contractManager struct { @@ -60,24 +67,37 @@ type contractManager struct { identity identity.Manager blockchain blockchain.Plugin ffiParamValidator fftypes.FFIParamValidator + operations operations.Manager } -func NewContractManager(ctx context.Context, database database.Plugin, broadcast broadcast.Manager, identity identity.Manager, blockchain blockchain.Plugin) (Manager, error) { - if database == nil || broadcast == nil || identity == nil || blockchain == nil { +func NewContractManager(ctx context.Context, di database.Plugin, bm broadcast.Manager, im identity.Manager, bi blockchain.Plugin, om operations.Manager, txHelper txcommon.Helper) (Manager, error) { + if di == nil || bm == nil || im == nil || bi == nil || om == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } - v, err := blockchain.GetFFIParamValidator(ctx) + v, err := bi.GetFFIParamValidator(ctx) if err != nil { return nil, i18n.WrapError(ctx, err, i18n.MsgPluginInitializationFailed) } - return &contractManager{ - database: database, - txHelper: txcommon.NewTransactionHelper(database), - broadcast: broadcast, - identity: identity, - blockchain: blockchain, + + cm := &contractManager{ + database: di, + txHelper: txHelper, + broadcast: bm, + identity: im, + blockchain: bi, ffiParamValidator: v, - }, nil + operations: om, + } + + om.RegisterHandler(ctx, cm, []fftypes.OpType{ + fftypes.OpTypeBlockchainInvoke, + }) + + return cm, nil +} + +func (cm *contractManager) Name() string { + return "ContractManager" } func (cm *contractManager) newFFISchemaCompiler() *jsonschema.Compiler { @@ -156,7 +176,7 @@ func (cm *contractManager) GetFFIs(ctx context.Context, ns string, filter databa return cm.database.GetFFIs(ctx, ns, filter) } -func (cm *contractManager) writeInvokeTransaction(ctx context.Context, ns string, input fftypes.JSONObject) (*fftypes.Operation, error) { +func (cm *contractManager) writeInvokeTransaction(ctx context.Context, ns string, req *fftypes.ContractCallRequest) (*fftypes.Operation, error) { txid, err := cm.txHelper.SubmitNewTransaction(ctx, ns, fftypes.TransactionTypeContractInvoke) if err != nil { return nil, err @@ -167,8 +187,10 @@ func (cm *contractManager) writeInvokeTransaction(ctx context.Context, ns string ns, txid, fftypes.OpTypeBlockchainInvoke) - op.Input = input - return op, cm.database.InsertOperation(ctx, op) + if err = addBlockchainInvokeInputs(op, req); err == nil { + err = cm.database.InsertOperation(ctx, op) + } + return op, err } func (cm *contractManager) InvokeContract(ctx context.Context, ns string, req *fftypes.ContractCallRequest) (res interface{}, err error) { @@ -186,7 +208,7 @@ func (cm *contractManager) InvokeContract(ctx context.Context, ns string, req *f return err } if req.Type == fftypes.CallTypeInvoke { - op, err = cm.writeInvokeTransaction(ctx, ns, req.Input) + op, err = cm.writeInvokeTransaction(ctx, ns, req) if err != nil { return err } @@ -199,18 +221,13 @@ func (cm *contractManager) InvokeContract(ctx context.Context, ns string, req *f switch req.Type { case fftypes.CallTypeInvoke: - err = cm.blockchain.InvokeContract(ctx, op.ID, req.Key, req.Location, req.Method, req.Input) res = &fftypes.ContractCallResponse{ID: op.ID} + return res, cm.operations.RunOperation(ctx, opBlockchainInvoke(op, req)) case fftypes.CallTypeQuery: - res, err = cm.blockchain.QueryContract(ctx, req.Location, req.Method, req.Input) + return cm.blockchain.QueryContract(ctx, req.Location, req.Method, req.Input) default: panic(fmt.Sprintf("unknown call type: %s", req.Type)) } - - if op != nil && err != nil { - cm.txHelper.WriteOperationFailure(ctx, op.ID, err) - } - return res, err } func (cm *contractManager) InvokeContractAPI(ctx context.Context, ns, apiName, methodPath string, req *fftypes.ContractCallRequest) (interface{}, error) { @@ -441,6 +458,12 @@ func (cm *contractManager) AddContractListener(ctx context.Context, ns string, l return nil, err } + if listener.Options == nil { + listener.Options = cm.getDefaultContractListenerOptions() + } else if listener.Options.FirstEvent == "" { + listener.Options.FirstEvent = cm.getDefaultContractListenerOptions().FirstEvent + } + err = cm.database.RunAsGroup(ctx, func(ctx context.Context) (err error) { if listener.Name != "" { if err := fftypes.ValidateFFNameField(ctx, listener.Name, "name"); err != nil { @@ -496,7 +519,7 @@ func (cm *contractManager) AddContractListener(ctx context.Context, ns string, l if err := cm.validateFFIEvent(ctx, &listener.Event.FFIEventDefinition); err != nil { return nil, err } - if err = cm.blockchain.AddSubscription(ctx, listener); err != nil { + if err = cm.blockchain.AddContractListener(ctx, listener); err != nil { return nil, err } if listener.Name == "" { @@ -537,7 +560,7 @@ func (cm *contractManager) DeleteContractListenerByNameOrID(ctx context.Context, if err != nil { return err } - if err = cm.blockchain.DeleteSubscription(ctx, listener); err != nil { + if err = cm.blockchain.DeleteContractListener(ctx, listener); err != nil { return err } return cm.database.DeleteContractListenerByID(ctx, listener.ID) @@ -565,3 +588,9 @@ func (cm *contractManager) GenerateFFI(ctx context.Context, ns string, generatio generationRequest.Namespace = ns return cm.blockchain.GenerateFFI(ctx, generationRequest) } + +func (cm *contractManager) getDefaultContractListenerOptions() *fftypes.ContractListenerOptions { + return &fftypes.ContractListenerOptions{ + FirstEvent: string(fftypes.SubOptsFirstEventNewest), + } +} diff --git a/internal/contracts/manager_test.go b/internal/contracts/manager_test.go index f67997f588..1b4837b8bc 100644 --- a/internal/contracts/manager_test.go +++ b/internal/contracts/manager_test.go @@ -23,10 +23,13 @@ import ( "github.com/hyperledger/firefly/internal/blockchain/ethereum" "github.com/hyperledger/firefly/internal/identity" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/blockchainmocks" "github.com/hyperledger/firefly/mocks/broadcastmocks" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" + "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" @@ -36,47 +39,63 @@ import ( ) func newTestContractManager() *contractManager { - mdb := &databasemocks.Plugin{} + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} mbm := &broadcastmocks.Manager{} mim := &identitymanagermocks.Manager{} mbi := &blockchainmocks.Plugin{} + mom := &operationmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mbi.On("GetFFIParamValidator", mock.Anything).Return(nil, nil) + mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) mbi.On("Name").Return("mockblockchain").Maybe() - rag := mdb.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() + rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() rag.RunFn = func(a mock.Arguments) { rag.ReturnArguments = mock.Arguments{ a[1].(func(context.Context) error)(a[0].(context.Context)), } } - cm, _ := NewContractManager(context.Background(), mdb, mbm, mim, mbi) + cm, _ := NewContractManager(context.Background(), mdi, mbm, mim, mbi, mom, txHelper) cm.(*contractManager).txHelper = &txcommonmocks.Helper{} return cm.(*contractManager) } func TestNewContractManagerFail(t *testing.T) { - _, err := NewContractManager(context.Background(), nil, nil, nil, nil) + _, err := NewContractManager(context.Background(), nil, nil, nil, nil, nil, nil) assert.Regexp(t, "FF10128", err) } +func TestName(t *testing.T) { + cm := newTestContractManager() + assert.Equal(t, "ContractManager", cm.Name()) +} + func TestNewContractManagerFFISchemaLoaderFail(t *testing.T) { - mdb := &databasemocks.Plugin{} + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} mbm := &broadcastmocks.Manager{} mim := &identitymanagermocks.Manager{} mbi := &blockchainmocks.Plugin{} + mom := &operationmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mbi.On("GetFFIParamValidator", mock.Anything).Return(nil, fmt.Errorf("pop")) - _, err := NewContractManager(context.Background(), mdb, mbm, mim, mbi) + _, err := NewContractManager(context.Background(), mdi, mbm, mim, mbi, mom, txHelper) assert.Regexp(t, "pop", err) } func TestNewContractManagerFFISchemaLoader(t *testing.T) { - mdb := &databasemocks.Plugin{} + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} mbm := &broadcastmocks.Manager{} mim := &identitymanagermocks.Manager{} mbi := &blockchainmocks.Plugin{} + mom := &operationmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mbi.On("GetFFIParamValidator", mock.Anything).Return(ðereum.FFIParamValidator{}, nil) - _, err := NewContractManager(context.Background(), mdb, mbm, mim, mbi) + mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) + _, err := NewContractManager(context.Background(), mdi, mbm, mim, mbi, mom, txHelper) assert.NoError(t, err) } @@ -557,10 +576,11 @@ func TestAddContractListenerInline(t *testing.T) { }, }, }, + Options: &fftypes.ContractListenerOptions{}, }, } - mbi.On("AddSubscription", context.Background(), sub).Return(nil) + mbi.On("AddContractListener", context.Background(), sub).Return(nil) mdi.On("UpsertContractListener", context.Background(), &sub.ContractListener).Return(nil) result, err := cm.AddContractListener(context.Background(), "ns", sub) @@ -609,7 +629,7 @@ func TestAddContractListenerByRef(t *testing.T) { }, } - mbi.On("AddSubscription", context.Background(), sub).Return(nil) + mbi.On("AddContractListener", context.Background(), sub).Return(nil) mdi.On("GetFFIByID", context.Background(), interfaceID).Return(&fftypes.FFI{}, nil) mdi.On("GetFFIEvent", context.Background(), "ns1", mock.Anything, sub.Event.Name).Return(event, nil) mdi.On("UpsertContractListener", context.Background(), &sub.ContractListener).Return(nil) @@ -653,7 +673,7 @@ func TestAddContractListenerByEventID(t *testing.T) { EventID: eventID, } - mbi.On("AddSubscription", context.Background(), sub).Return(nil) + mbi.On("AddContractListener", context.Background(), sub).Return(nil) mdi.On("GetFFIEventByID", context.Background(), sub.EventID).Return(event, nil) mdi.On("UpsertContractListener", context.Background(), &sub.ContractListener).Return(nil) @@ -959,7 +979,7 @@ func TestAddContractListenerBlockchainFail(t *testing.T) { }, } - mbi.On("AddSubscription", context.Background(), sub).Return(fmt.Errorf("pop")) + mbi.On("AddContractListener", context.Background(), sub).Return(fmt.Errorf("pop")) _, err := cm.AddContractListener(context.Background(), "ns", sub) assert.EqualError(t, err, "pop") @@ -992,7 +1012,7 @@ func TestAddContractListenerUpsertSubFail(t *testing.T) { }, } - mbi.On("AddSubscription", context.Background(), sub).Return(nil) + mbi.On("AddContractListener", context.Background(), sub).Return(nil) mdi.On("UpsertContractListener", context.Background(), &sub.ContractListener).Return(fmt.Errorf("pop")) _, err := cm.AddContractListener(context.Background(), "ns", sub) @@ -1116,10 +1136,10 @@ func TestGetFFIs(t *testing.T) { func TestInvokeContract(t *testing.T) { cm := newTestContractManager() - mbi := cm.blockchain.(*blockchainmocks.Plugin) mim := cm.identity.(*identitymanagermocks.Manager) mdi := cm.database.(*databasemocks.Plugin) mth := cm.txHelper.(*txcommonmocks.Helper) + mom := cm.operations.(*operationmocks.Manager) req := &fftypes.ContractCallRequest{ Type: fftypes.CallTypeInvoke, @@ -1135,25 +1155,31 @@ func TestInvokeContract(t *testing.T) { } mth.On("SubmitNewTransaction", mock.Anything, "ns1", fftypes.TransactionTypeContractInvoke).Return(fftypes.NewUUID(), nil) - mim.On("NormalizeSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) mdi.On("InsertOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.Operation) bool { return op.Namespace == "ns1" && op.Type == fftypes.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" })).Return(nil) - mbi.On("InvokeContract", mock.Anything, mock.AnythingOfType("*fftypes.UUID"), "key-resolved", req.Location, req.Method, req.Input).Return(nil) + mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(blockchainInvokeData) + return op.Type == fftypes.OpTypeBlockchainInvoke && data.Request == req + })).Return(nil) _, err := cm.InvokeContract(context.Background(), "ns1", req) assert.NoError(t, err) + mth.AssertExpectations(t) + mim.AssertExpectations(t) + mdi.AssertExpectations(t) + mom.AssertExpectations(t) } func TestInvokeContractFail(t *testing.T) { cm := newTestContractManager() - mbi := cm.blockchain.(*blockchainmocks.Plugin) mim := cm.identity.(*identitymanagermocks.Manager) mdi := cm.database.(*databasemocks.Plugin) mth := cm.txHelper.(*txcommonmocks.Helper) + mom := cm.operations.(*operationmocks.Manager) req := &fftypes.ContractCallRequest{ Type: fftypes.CallTypeInvoke, @@ -1169,18 +1195,23 @@ func TestInvokeContractFail(t *testing.T) { } mth.On("SubmitNewTransaction", mock.Anything, "ns1", fftypes.TransactionTypeContractInvoke).Return(fftypes.NewUUID(), nil) - mim.On("NormalizeSigningKey", mock.Anything, "", identity.KeyNormalizationBlockchainPlugin).Return("key-resolved", nil) mdi.On("InsertOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.Operation) bool { return op.Namespace == "ns1" && op.Type == fftypes.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" })).Return(nil) - mbi.On("InvokeContract", mock.Anything, mock.AnythingOfType("*fftypes.UUID"), "key-resolved", req.Location, req.Method, req.Input).Return(fmt.Errorf("pop")) - mth.On("WriteOperationFailure", mock.Anything, mock.Anything, fmt.Errorf("pop")) + mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(blockchainInvokeData) + return op.Type == fftypes.OpTypeBlockchainInvoke && data.Request == req + })).Return(fmt.Errorf("pop")) _, err := cm.InvokeContract(context.Background(), "ns1", req) assert.EqualError(t, err, "pop") + + mim.AssertExpectations(t) + mdi.AssertExpectations(t) mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestInvokeContractFailNormalizeSigningKey(t *testing.T) { @@ -1467,7 +1498,7 @@ func TestDeleteContractListener(t *testing.T) { } mdi.On("GetContractListener", context.Background(), "ns", "sub1").Return(sub, nil) - mbi.On("DeleteSubscription", context.Background(), sub).Return(nil) + mbi.On("DeleteContractListener", context.Background(), sub).Return(nil) mdi.On("DeleteContractListenerByID", context.Background(), sub.ID).Return(nil) err := cm.DeleteContractListenerByNameOrID(context.Background(), "ns", "sub1") @@ -1484,7 +1515,7 @@ func TestDeleteContractListenerBlockchainFail(t *testing.T) { } mdi.On("GetContractListener", context.Background(), "ns", "sub1").Return(sub, nil) - mbi.On("DeleteSubscription", context.Background(), sub).Return(fmt.Errorf("pop")) + mbi.On("DeleteContractListener", context.Background(), sub).Return(fmt.Errorf("pop")) mdi.On("DeleteContractListenerByID", context.Background(), sub.ID).Return(nil) err := cm.DeleteContractListenerByNameOrID(context.Background(), "ns", "sub1") @@ -1505,9 +1536,9 @@ func TestInvokeContractAPI(t *testing.T) { cm := newTestContractManager() mdb := cm.database.(*databasemocks.Plugin) mim := cm.identity.(*identitymanagermocks.Manager) - mbi := cm.blockchain.(*blockchainmocks.Plugin) mdi := cm.database.(*databasemocks.Plugin) mth := cm.txHelper.(*txcommonmocks.Helper) + mom := cm.operations.(*operationmocks.Manager) req := &fftypes.ContractCallRequest{ Type: fftypes.CallTypeInvoke, @@ -1533,11 +1564,20 @@ func TestInvokeContractAPI(t *testing.T) { mdi.On("InsertOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.Operation) bool { return op.Namespace == "ns1" && op.Type == fftypes.OpTypeBlockchainInvoke && op.Plugin == "mockblockchain" })).Return(nil) - mbi.On("InvokeContract", mock.Anything, mock.AnythingOfType("*fftypes.UUID"), "key-resolved", req.Location, mock.AnythingOfType("*fftypes.FFIMethod"), req.Input).Return(nil) + mom.On("RunOperation", mock.Anything, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(blockchainInvokeData) + return op.Type == fftypes.OpTypeBlockchainInvoke && data.Request == req + })).Return(nil) _, err := cm.InvokeContractAPI(context.Background(), "ns1", "banana", "peel", req) assert.NoError(t, err) + + mdb.AssertExpectations(t) + mim.AssertExpectations(t) + mdi.AssertExpectations(t) + mth.AssertExpectations(t) + mom.AssertExpectations(t) } func TestInvokeContractAPIFailContractLookup(t *testing.T) { diff --git a/internal/contracts/operations.go b/internal/contracts/operations.go new file mode 100644 index 0000000000..f7ba46e5b3 --- /dev/null +++ b/internal/contracts/operations.go @@ -0,0 +1,79 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package contracts + +import ( + "context" + "encoding/json" + + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +type blockchainInvokeData struct { + Request *fftypes.ContractCallRequest `json:"request"` +} + +func addBlockchainInvokeInputs(op *fftypes.Operation, req *fftypes.ContractCallRequest) (err error) { + var reqJSON []byte + if reqJSON, err = json.Marshal(req); err == nil { + err = json.Unmarshal(reqJSON, &op.Input) + } + return err +} + +func retrieveBlockchainInvokeInputs(ctx context.Context, op *fftypes.Operation) (*fftypes.ContractCallRequest, error) { + var req fftypes.ContractCallRequest + s := op.Input.String() + if err := json.Unmarshal([]byte(s), &req); err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, s) + } + return &req, nil +} + +func (cm *contractManager) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + switch op.Type { + case fftypes.OpTypeBlockchainInvoke: + req, err := retrieveBlockchainInvokeInputs(ctx, op) + if err != nil { + return nil, err + } + return opBlockchainInvoke(op, req), nil + + default: + return nil, i18n.NewError(ctx, i18n.MsgOperationNotSupported, op.Type) + } +} + +func (cm *contractManager) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { + switch data := op.Data.(type) { + case blockchainInvokeData: + req := data.Request + return nil, false, cm.blockchain.InvokeContract(ctx, op.ID, req.Key, req.Location, req.Method, req.Input) + + default: + return nil, false, i18n.NewError(ctx, i18n.MsgOperationDataIncorrect, op.Data) + } +} + +func opBlockchainInvoke(op *fftypes.Operation, req *fftypes.ContractCallRequest) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: blockchainInvokeData{Request: req}, + } +} diff --git a/internal/contracts/operations_test.go b/internal/contracts/operations_test.go new file mode 100644 index 0000000000..fee5fd8d0b --- /dev/null +++ b/internal/contracts/operations_test.go @@ -0,0 +1,94 @@ +// Copyright © 2021 Kaleido, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in comdiliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or imdilied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package contracts + +import ( + "context" + "testing" + + "github.com/hyperledger/firefly/mocks/blockchainmocks" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestPrepareAndRunBlockchainInvoke(t *testing.T) { + cm := newTestContractManager() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeBlockchainInvoke, + ID: fftypes.NewUUID(), + } + req := &fftypes.ContractCallRequest{ + Key: "0x123", + Location: fftypes.JSONAnyPtr(`{"address":"0x1111"}`), + Method: &fftypes.FFIMethod{ + Name: "set", + }, + Input: map[string]interface{}{ + "value": "1", + }, + } + err := addBlockchainInvokeInputs(op, req) + assert.NoError(t, err) + + mbi := cm.blockchain.(*blockchainmocks.Plugin) + mbi.On("InvokeContract", context.Background(), op.ID, "0x123", mock.MatchedBy(func(loc *fftypes.JSONAny) bool { + return loc.String() == req.Location.String() + }), mock.MatchedBy(func(method *fftypes.FFIMethod) bool { + return method.Name == req.Method.Name + }), req.Input).Return(nil) + + po, err := cm.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, req, po.Data.(blockchainInvokeData).Request) + + _, complete, err := cm.RunOperation(context.Background(), po) + + assert.False(t, complete) + assert.NoError(t, err) + + mbi.AssertExpectations(t) +} + +func TestPrepareOperationNotSupported(t *testing.T) { + cm := newTestContractManager() + + po, err := cm.PrepareOperation(context.Background(), &fftypes.Operation{}) + + assert.Nil(t, po) + assert.Regexp(t, "FF10371", err) +} + +func TestPrepareOperationBlockchainInvokeBadInput(t *testing.T) { + cm := newTestContractManager() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeBlockchainInvoke, + Input: fftypes.JSONObject{"interface": "bad"}, + } + + _, err := cm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10151", err) +} + +func TestRunOperationNotSupported(t *testing.T) { + cm := newTestContractManager() + + _, complete, err := cm.RunOperation(context.Background(), &fftypes.PreparedOperation{}) + + assert.False(t, complete) + assert.Regexp(t, "FF10378", err) +} diff --git a/internal/data/blobstore.go b/internal/data/blobstore.go index a13931f7a3..e85da59457 100644 --- a/internal/data/blobstore.go +++ b/internal/data/blobstore.go @@ -38,7 +38,7 @@ type blobStore struct { exchange dataexchange.Plugin } -func (bs *blobStore) uploadVerifyBLOB(ctx context.Context, ns string, id *fftypes.UUID, expectedHash *fftypes.Bytes32, reader io.Reader) (hash *fftypes.Bytes32, written int64, payloadRef string, err error) { +func (bs *blobStore) uploadVerifyBLOB(ctx context.Context, ns string, id *fftypes.UUID, reader io.Reader) (hash *fftypes.Bytes32, written int64, payloadRef string, err error) { hashCalc := sha256.New() dxReader, dx := io.Pipe() storeAndHash := io.MultiWriter(hashCalc, dx) @@ -63,15 +63,11 @@ func (bs *blobStore) uploadVerifyBLOB(ctx context.Context, ns string, id *fftype } hash = fftypes.HashResult(hashCalc) - log.L(ctx).Debugf("Upload BLOB size=%d hashes: calculated=%s upload=%s (expected=%v) size=%d (expected=%d)", written, hash, uploadHash, expectedHash, uploadSize, written) + log.L(ctx).Debugf("Upload BLOB size=%d hashes: calculated=%s upload=%s (expected=%v) size=%d", written, hash, uploadHash, uploadSize, written) if !uploadHash.Equals(hash) { return nil, -1, "", i18n.NewError(ctx, i18n.MsgDXBadHash, uploadHash, hash) } - - if expectedHash != nil && !uploadHash.Equals(expectedHash) { - return nil, -1, "", i18n.NewError(ctx, i18n.MsgDXBadHash, uploadHash, expectedHash) - } if uploadSize > 0 && uploadSize != written { return nil, -1, "", i18n.NewError(ctx, i18n.MsgDXBadSize, uploadSize, written) } @@ -95,7 +91,7 @@ func (bs *blobStore) UploadBLOB(ctx context.Context, ns string, inData *fftypes. data.Namespace = ns data.Created = fftypes.Now() - hash, blobSize, payloadRef, err := bs.uploadVerifyBLOB(ctx, ns, data.ID, nil /* we don't have an expected hash for a new upload */, mpart.Data) + hash, blobSize, payloadRef, err := bs.uploadVerifyBLOB(ctx, ns, data.ID, mpart.Data) if err != nil { return nil, err } @@ -143,37 +139,6 @@ func (bs *blobStore) UploadBLOB(ctx context.Context, ns string, inData *fftypes. return data, nil } -func (bs *blobStore) CopyBlobPStoDX(ctx context.Context, data *fftypes.Data) (blob *fftypes.Blob, err error) { - - reader, err := bs.sharedstorage.RetrieveData(ctx, data.Blob.Public) - if err != nil { - return nil, err - } - if reader == nil { - log.L(ctx).Infof("Blob '%s' not found in shared storage", data.Blob.Public) - return nil, nil - } - defer reader.Close() - - hash, blobSize, payloadRef, err := bs.uploadVerifyBLOB(ctx, data.Namespace, data.ID, data.Blob.Hash, reader) - if err != nil { - return nil, err - } - log.L(ctx).Infof("Transferred blob '%s' (%s) from shared storage '%s' to local data exchange '%s'", hash, units.HumanSizeWithPrecision(float64(blobSize), 2), data.Blob.Public, payloadRef) - - blob = &fftypes.Blob{ - Hash: hash, - Size: blobSize, - PayloadRef: payloadRef, - Created: fftypes.Now(), - } - err = bs.database.InsertBlob(ctx, blob) - if err != nil { - return nil, err - } - return blob, nil -} - func (bs *blobStore) DownloadBLOB(ctx context.Context, ns, dataID string) (*fftypes.Blob, io.ReadCloser, error) { if err := fftypes.ValidateFFNameField(ctx, ns, "namespace"); err != nil { diff --git a/internal/data/blobstore_test.go b/internal/data/blobstore_test.go index b2fd389c16..7a9e2e6786 100644 --- a/internal/data/blobstore_test.go +++ b/internal/data/blobstore_test.go @@ -29,7 +29,6 @@ import ( "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/dataexchangemocks" - "github.com/hyperledger/firefly/mocks/sharedstoragemocks" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" @@ -189,7 +188,7 @@ func TestUploadBlobWriteFailDoesNotRead(t *testing.T) { } -func TestUploadBlobHashMismatch(t *testing.T) { +func TestUploadBlobHashMismatchCalculated(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() @@ -247,217 +246,6 @@ func TestUploadBlobUpsertFail(t *testing.T) { } -func TestCopyBlobPStoDXOk(t *testing.T) { - - dm, ctx, cancel := newTestDataManager(t) - defer cancel() - - payload := []byte(`some data`) - var hash fftypes.Bytes32 = sha256.Sum256(payload) - - mpi := dm.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", ctx, "public-ref").Return(io.NopCloser(bytes.NewReader(payload)), nil) - - mdx := dm.exchange.(*dataexchangemocks.Plugin) - dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything).Return("/private/loc", &hash, int64(len(payload)), nil) - dxUpload.RunFn = func(a mock.Arguments) { - _, err := ioutil.ReadAll(a[3].(io.Reader)) - assert.Nil(t, err) - } - - mdi := dm.database.(*databasemocks.Plugin) - mdi.On("InsertBlob", ctx, mock.Anything).Return(nil) - - blob, err := dm.CopyBlobPStoDX(ctx, &fftypes.Data{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Blob: &fftypes.BlobRef{ - Hash: &hash, - Public: "public-ref", - }, - }) - assert.NoError(t, err) - assert.Equal(t, "/private/loc", blob.PayloadRef) - assert.Equal(t, hash, *blob.Hash) - -} - -func TestCopyBlobPStoDXHashMismatch(t *testing.T) { - - dm, ctx, cancel := newTestDataManager(t) - defer cancel() - - payload := []byte(`some data`) - var hash fftypes.Bytes32 = sha256.Sum256(payload) - - mpi := dm.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", ctx, "public-ref").Return(io.NopCloser(bytes.NewReader(payload)), nil) - - mdx := dm.exchange.(*dataexchangemocks.Plugin) - dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything).Return("", fftypes.NewRandB32(), int64(len(payload)), nil) - dxUpload.RunFn = func(a mock.Arguments) { - _, err := ioutil.ReadAll(a[3].(io.Reader)) - assert.Nil(t, err) - } - - mdi := dm.database.(*databasemocks.Plugin) - mdi.On("InsertBlob", ctx, mock.Anything).Return(nil) - - _, err := dm.CopyBlobPStoDX(ctx, &fftypes.Data{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Blob: &fftypes.BlobRef{ - Hash: &hash, - Public: "public-ref", - }, - }) - assert.Regexp(t, "FF10238", err) - -} - -func TestCopyBlobPStoPublicHashMismatch(t *testing.T) { - - dm, ctx, cancel := newTestDataManager(t) - defer cancel() - - payload := []byte(`some data`) - var correctHash fftypes.Bytes32 = sha256.Sum256(payload) - - mpi := dm.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", ctx, "public-ref").Return(io.NopCloser(bytes.NewReader(payload)), nil) - - mdx := dm.exchange.(*dataexchangemocks.Plugin) - dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything).Return("", &correctHash, int64(len(payload)), nil) - dxUpload.RunFn = func(a mock.Arguments) { - _, err := ioutil.ReadAll(a[3].(io.Reader)) - assert.Nil(t, err) - } - - mdi := dm.database.(*databasemocks.Plugin) - mdi.On("InsertBlob", ctx, mock.Anything).Return(nil) - - _, err := dm.CopyBlobPStoDX(ctx, &fftypes.Data{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Blob: &fftypes.BlobRef{ - Hash: fftypes.NewRandB32(), - Public: "public-ref", - }, - }) - assert.Regexp(t, "FF10238", err) - -} - -func TestCopyBlobPStoDXInsertFail(t *testing.T) { - - dm, ctx, cancel := newTestDataManager(t) - defer cancel() - - payload := []byte(`some data`) - var hash fftypes.Bytes32 = sha256.Sum256(payload) - - mpi := dm.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", ctx, "public-ref").Return(io.NopCloser(bytes.NewReader(payload)), nil) - - mdx := dm.exchange.(*dataexchangemocks.Plugin) - dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything).Return("", &hash, int64(len(payload)), nil) - dxUpload.RunFn = func(a mock.Arguments) { - _, err := ioutil.ReadAll(a[3].(io.Reader)) - assert.Nil(t, err) - } - - mdi := dm.database.(*databasemocks.Plugin) - mdi.On("InsertBlob", ctx, mock.Anything).Return(fmt.Errorf("pop")) - - _, err := dm.CopyBlobPStoDX(ctx, &fftypes.Data{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Blob: &fftypes.BlobRef{ - Hash: &hash, - Public: "public-ref", - }, - }) - assert.EqualError(t, err, "pop") - -} - -func TestCopyBlobPStoUploadFail(t *testing.T) { - - dm, ctx, cancel := newTestDataManager(t) - defer cancel() - - payload := []byte(`some data`) - var hash fftypes.Bytes32 = sha256.Sum256(payload) - - mpi := dm.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", ctx, "public-ref").Return(io.NopCloser(bytes.NewReader(payload)), nil) - - mdx := dm.exchange.(*dataexchangemocks.Plugin) - dxUpload := mdx.On("UploadBLOB", ctx, "ns1", mock.Anything, mock.Anything).Return("", nil, int64(len(payload)), fmt.Errorf("pop")) - dxUpload.RunFn = func(a mock.Arguments) { - _, err := ioutil.ReadAll(a[3].(io.Reader)) - assert.Nil(t, err) - } - - _, err := dm.CopyBlobPStoDX(ctx, &fftypes.Data{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Blob: &fftypes.BlobRef{ - Hash: &hash, - Public: "public-ref", - }, - }) - assert.EqualError(t, err, "pop") - -} - -func TestCopyBlobPStoDownloadFail(t *testing.T) { - - dm, ctx, cancel := newTestDataManager(t) - defer cancel() - - payload := []byte(`some data`) - var hash fftypes.Bytes32 = sha256.Sum256(payload) - - mpi := dm.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", ctx, "public-ref").Return(nil, fmt.Errorf("pop")) - - _, err := dm.CopyBlobPStoDX(ctx, &fftypes.Data{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Blob: &fftypes.BlobRef{ - Hash: &hash, - Public: "public-ref", - }, - }) - assert.EqualError(t, err, "pop") - -} - -func TestCopyBlobPStoDownloadNotFound(t *testing.T) { - - dm, ctx, cancel := newTestDataManager(t) - defer cancel() - - payload := []byte(`some data`) - var hash fftypes.Bytes32 = sha256.Sum256(payload) - - mpi := dm.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", ctx, "public-ref").Return(nil, nil) - - blob, err := dm.CopyBlobPStoDX(ctx, &fftypes.Data{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Blob: &fftypes.BlobRef{ - Hash: &hash, - Public: "public-ref", - }, - }) - assert.NoError(t, err) - assert.Nil(t, blob) - -} - func TestDownloadBlobOk(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) diff --git a/internal/data/data_manager.go b/internal/data/data_manager.go index 7d2ac40b2a..c0bdfac0ef 100644 --- a/internal/data/data_manager.go +++ b/internal/data/data_manager.go @@ -34,16 +34,21 @@ import ( type Manager interface { CheckDatatype(ctx context.Context, ns string, datatype *fftypes.Datatype) error - ValidateAll(ctx context.Context, data []*fftypes.Data) (valid bool, err error) - GetMessageData(ctx context.Context, msg *fftypes.Message, withValue bool) (data []*fftypes.Data, foundAll bool, err error) - ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, error) - ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, []*fftypes.DataAndBlob, error) + ValidateAll(ctx context.Context, data fftypes.DataArray) (valid bool, err error) + GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data fftypes.DataArray, foundAllData bool, err error) + GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data fftypes.DataArray, foundAll bool, err error) + PeekMessageCache(ctx context.Context, id *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data fftypes.DataArray) + UpdateMessageCache(msg *fftypes.Message, data fftypes.DataArray) + UpdateMessageIfCached(ctx context.Context, msg *fftypes.Message) + ResolveInlineData(ctx context.Context, msg *NewMessage) error + WriteNewMessage(ctx context.Context, newMsg *NewMessage) error VerifyNamespaceExists(ctx context.Context, ns string) error UploadJSON(ctx context.Context, ns string, inData *fftypes.DataRefOrValue) (*fftypes.Data, error) UploadBLOB(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) - CopyBlobPStoDX(ctx context.Context, data *fftypes.Data) (blob *fftypes.Blob, err error) DownloadBLOB(ctx context.Context, ns, dataID string) (*fftypes.Blob, io.ReadCloser, error) + HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) + WaitStop() } type dataManager struct { @@ -52,8 +57,39 @@ type dataManager struct { exchange dataexchange.Plugin validatorCache *ccache.Cache validatorCacheTTL time.Duration + messageCache *ccache.Cache + messageCacheTTL time.Duration + messageWriter *messageWriter } +type messageCacheEntry struct { + msg *fftypes.Message + data []*fftypes.Data + size int64 +} + +func (mce *messageCacheEntry) Size() int64 { + return mce.size +} + +// Messages have fields that are mutable, in two categories +// +// 1) Can change multiple times like state - you cannot rely on the cache for these +// 2) Can go from being un-set, to being set, and once set are immutable. +// For (2) the cache provides a set of CacheReadOption modifiers that makes it safe to query the cache, +// even if the cache we slow to update asynchronously (active/active cluster being the ultimate example here, +// but from code inspection this is possible in the current cache). +type CacheReadOption int + +const ( + // If you use CRORequirePublicBlobRefs then the cache will return a miss, if all data blobs do not have a `public` reference string + CRORequirePublicBlobRefs CacheReadOption = iota + // If you use CRORequirePins then the cache will return a miss, if the number of pins does not match the number of topics in the message. + CRORequirePins + // If you use CRORequestBatchID then the cache will return a miss, if there is no BatchID set. + CRORequireBatchID +) + func NewDataManager(ctx context.Context, di database.Plugin, pi sharedstorage.Plugin, dx dataexchange.Plugin) (Manager, error) { if di == nil || pi == nil || dx == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) @@ -62,6 +98,7 @@ func NewDataManager(ctx context.Context, di database.Plugin, pi sharedstorage.Pl database: di, exchange: dx, validatorCacheTTL: config.GetDuration(config.ValidatorCacheTTL), + messageCacheTTL: config.GetDuration(config.MessageCacheTTL), } dm.blobStore = blobStore{ dm: dm, @@ -74,6 +111,17 @@ func NewDataManager(ctx context.Context, di database.Plugin, pi sharedstorage.Pl ccache.Configure(). MaxSize(config.GetByteSize(config.ValidatorCacheSize)), ) + dm.messageCache = ccache.New( + // We use a LRU cache with a size-aware max + ccache.Configure(). + MaxSize(config.GetByteSize(config.MessageCacheSize)), + ) + dm.messageWriter = newMessageWriter(ctx, di, &messageWriterConf{ + workerCount: config.GetInt(config.MessageWriterCount), + batchTimeout: config.GetDuration(config.MessageWriterBatchTimeout), + maxInserts: config.GetInt(config.MessageWriterBatchMaxInserts), + }) + dm.messageWriter.start() return dm, nil } @@ -131,15 +179,113 @@ func (dm *dataManager) getValidatorForDatatype(ctx context.Context, ns string, v return v, err } -// GetMessageData looks for all the data attached to the message. +// GetMessageWithData performs a cached lookup of a message with all of the associated data. +// - Use this in performance sensitive code, but note mutable fields like the status of the +// message CANNOT be relied upon (due to the caching). +func (dm *dataManager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data fftypes.DataArray, foundAllData bool, err error) { + if mce := dm.queryMessageCache(ctx, msgID, options...); mce != nil { + return mce.msg, mce.data, true, nil + } + msg, err = dm.database.GetMessageByID(ctx, msgID) + if err != nil || msg == nil { + return nil, nil, false, err + } + data, foundAllData, err = dm.dataLookupAndCache(ctx, msg) + return msg, data, foundAllData, err +} + +// GetMessageData looks for all the data attached to the message, including caching. // It only returns persistence errors. // For all cases where the data is not found (or the hashes mismatch) -func (dm *dataManager) GetMessageData(ctx context.Context, msg *fftypes.Message, withValue bool) (data []*fftypes.Data, foundAll bool, err error) { +func (dm *dataManager) GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...CacheReadOption) (data fftypes.DataArray, foundAll bool, err error) { + if mce := dm.queryMessageCache(ctx, msg.Header.ID, options...); mce != nil { + return mce.data, true, nil + } + return dm.dataLookupAndCache(ctx, msg) +} + +// cachedMessageAndDataLookup is the common function that can lookup and cache a message with its data +func (dm *dataManager) dataLookupAndCache(ctx context.Context, msg *fftypes.Message) (data fftypes.DataArray, foundAllData bool, err error) { + data, foundAllData, err = dm.getMessageData(ctx, msg) + if err != nil { + return nil, false, err + } + if !foundAllData { + return data, false, err + } + dm.UpdateMessageCache(msg, data) + return data, true, nil +} + +func (dm *dataManager) PeekMessageCache(ctx context.Context, id *fftypes.UUID, options ...CacheReadOption) (msg *fftypes.Message, data fftypes.DataArray) { + mce := dm.queryMessageCache(ctx, id, options...) + if mce != nil { + return mce.msg, mce.data + } + return nil, nil +} + +func (dm *dataManager) queryMessageCache(ctx context.Context, id *fftypes.UUID, options ...CacheReadOption) *messageCacheEntry { + cached := dm.messageCache.Get(id.String()) + if cached == nil { + log.L(context.Background()).Debugf("Cache miss for message %s", id) + return nil + } + mce := cached.Value().(*messageCacheEntry) + for _, opt := range options { + switch opt { + case CRORequirePublicBlobRefs: + for idx, d := range mce.data { + if d.Blob != nil && d.Blob.Public == "" { + log.L(ctx).Debugf("Cache miss for message %s - data %d (%s) is missing public blob ref", mce.msg.Header.ID, idx, d.ID) + return nil + } + } + case CRORequirePins: + if len(mce.msg.Header.Topics) != len(mce.msg.Pins) { + log.L(ctx).Debugf("Cache miss for message %s - missing pins (topics=%d,pins=%d)", mce.msg.Header.ID, len(mce.msg.Header.Topics), len(mce.msg.Pins)) + return nil + } + case CRORequireBatchID: + if mce.msg.BatchID == nil { + log.L(ctx).Debugf("Cache miss for message %s - missing batch ID", mce.msg.Header.ID) + return nil + } + } + } + log.L(ctx).Debugf("Cache hit for message %s", id) + cached.Extend(dm.messageCacheTTL) + return mce +} + +// UpdateMessageCache pushes an entry to the message cache. It is exposed out of the package, so that +// code which generates (or augments) message/data can populate the cache. +func (dm *dataManager) UpdateMessageCache(msg *fftypes.Message, data fftypes.DataArray) { + cacheEntry := &messageCacheEntry{ + msg: msg, + data: data, + size: msg.EstimateSize(true), + } + dm.messageCache.Set(msg.Header.ID.String(), cacheEntry, dm.messageCacheTTL) + log.L(context.Background()).Debugf("Added to message cache: %s (topics=%d,pins=%d)", msg.Header.ID.String(), len(msg.Header.Topics), len(msg.Pins)) +} + +// UpdateMessageIfCached is used in order to notify the fields of a message that are not initially filled in, have been filled in. +// It does not guarantee the cache is up to date, and the CacheReadOptions should be used to check you have the updated data. +// But calling this should reduce the possiblity of the CROs missing +func (dm *dataManager) UpdateMessageIfCached(ctx context.Context, msg *fftypes.Message) { + mce := dm.queryMessageCache(ctx, msg.Header.ID) + if mce != nil { + dm.UpdateMessageCache(msg, mce.data) + } +} + +func (dm *dataManager) getMessageData(ctx context.Context, msg *fftypes.Message) (data fftypes.DataArray, foundAll bool, err error) { // Load all the data - must all be present for us to send - data = make([]*fftypes.Data, 0, len(msg.Data)) + data = make(fftypes.DataArray, 0, len(msg.Data)) foundAll = true for i, dataRef := range msg.Data { - d, err := dm.resolveRef(ctx, msg.Header.Namespace, dataRef, withValue) + d, err := dm.resolveRef(ctx, msg.Header.Namespace, dataRef) if err != nil { return nil, false, err } @@ -153,7 +299,7 @@ func (dm *dataManager) GetMessageData(ctx context.Context, msg *fftypes.Message, return data, foundAll, nil } -func (dm *dataManager) ValidateAll(ctx context.Context, data []*fftypes.Data) (valid bool, err error) { +func (dm *dataManager) ValidateAll(ctx context.Context, data fftypes.DataArray) (valid bool, err error) { for _, d := range data { if d.Datatype != nil && d.Validator != fftypes.ValidatorTypeNone { v, err := dm.getValidatorForDatatype(ctx, d.Namespace, d.Validator, d.Datatype) @@ -173,12 +319,12 @@ func (dm *dataManager) ValidateAll(ctx context.Context, data []*fftypes.Data) (v return true, nil } -func (dm *dataManager) resolveRef(ctx context.Context, ns string, dataRef *fftypes.DataRef, withValue bool) (*fftypes.Data, error) { +func (dm *dataManager) resolveRef(ctx context.Context, ns string, dataRef *fftypes.DataRef) (*fftypes.Data, error) { if dataRef == nil || dataRef.ID == nil { log.L(ctx).Warnf("data is nil") return nil, nil } - d, err := dm.database.GetDataByID(ctx, dataRef.ID, withValue) + d, err := dm.database.GetDataByID(ctx, dataRef.ID, true) if err != nil { return nil, err } @@ -237,14 +383,20 @@ func (dm *dataManager) checkValidation(ctx context.Context, ns string, validator return nil } -func (dm *dataManager) validateAndStore(ctx context.Context, ns string, validator fftypes.ValidatorType, datatype *fftypes.DatatypeRef, value *fftypes.JSONAny, blobRef *fftypes.BlobRef) (data *fftypes.Data, blob *fftypes.Blob, err error) { +func (dm *dataManager) validateInputData(ctx context.Context, ns string, inData *fftypes.DataRefOrValue) (data *fftypes.Data, err error) { + + validator := inData.Validator + datatype := inData.Datatype + value := inData.Value + blobRef := inData.Blob if err := dm.checkValidation(ctx, ns, validator, datatype, value); err != nil { - return nil, nil, err + return nil, err } - if blob, err = dm.resolveBlob(ctx, blobRef); err != nil { - return nil, nil, err + blob, err := dm.resolveBlob(ctx, blobRef) + if err != nil { + return nil, err } // Ok, we're good to generate the full data payload and save it @@ -256,92 +408,121 @@ func (dm *dataManager) validateAndStore(ctx context.Context, ns string, validato Blob: blobRef, } err = data.Seal(ctx, blob) - if err == nil { - err = dm.database.UpsertData(ctx, data, database.UpsertOptimizationNew) - } if err != nil { - return nil, nil, err + return nil, err } - return data, blob, nil + return data, nil } -func (dm *dataManager) validateAndStoreInlined(ctx context.Context, ns string, value *fftypes.DataRefOrValue) (*fftypes.Data, *fftypes.Blob, *fftypes.DataRef, error) { - data, blob, err := dm.validateAndStore(ctx, ns, value.Validator, value.Datatype, value.Value, value.Blob) +func (dm *dataManager) UploadJSON(ctx context.Context, ns string, inData *fftypes.DataRefOrValue) (*fftypes.Data, error) { + data, err := dm.validateInputData(ctx, ns, inData) if err != nil { - return nil, nil, nil, err + return nil, err + } + if err = dm.messageWriter.WriteData(ctx, data); err != nil { + return nil, err } - - // Return a ref to the newly saved data - return data, blob, &fftypes.DataRef{ - ID: data.ID, - Hash: data.Hash, - ValueSize: data.ValueSize, - }, nil -} - -func (dm *dataManager) UploadJSON(ctx context.Context, ns string, inData *fftypes.DataRefOrValue) (*fftypes.Data, error) { - data, _, err := dm.validateAndStore(ctx, ns, inData.Validator, inData.Datatype, inData.Value, inData.Blob) return data, err } -func (dm *dataManager) ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (refs fftypes.DataRefs, err error) { - refs, _, err = dm.resolveInlineData(ctx, ns, inData, false) - return refs, err -} - -// ResolveInlineDataBroadcast ensures the data object are stored, and returns a list of any data that does not currently -// have a shared storage reference, and hence must be published to sharedstorage before a broadcast message can be sent. -// We deliberately do NOT perform those publishes inside of this action, as we expect to be in a RunAsGroup (trnasaction) -// at this point, and hence expensive things like a multi-megabyte upload should be decoupled by our caller. -func (dm *dataManager) ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (refs fftypes.DataRefs, dataToPublish []*fftypes.DataAndBlob, err error) { - return dm.resolveInlineData(ctx, ns, inData, true) -} +// ResolveInlineData processes an input message that is going to be stored, to see which of the data +// elements are new, and which are existing. It verifies everything that points to an existing +// reference, and returns a list of what data is new separately - so that it can be stored by the +// message writer when the sending code is ready. +func (dm *dataManager) ResolveInlineData(ctx context.Context, newMessage *NewMessage) (err error) { -func (dm *dataManager) resolveInlineData(ctx context.Context, ns string, inData fftypes.InlineData, broadcast bool) (refs fftypes.DataRefs, dataToPublish []*fftypes.DataAndBlob, err error) { - - refs = make(fftypes.DataRefs, len(inData)) - if broadcast { - dataToPublish = make([]*fftypes.DataAndBlob, 0, len(inData)) + if newMessage.Message == nil { + return i18n.NewError(ctx, i18n.MsgNilOrNullObject) } + + inData := newMessage.Message.InlineData + msg := newMessage.Message + newMessage.AllData = make(fftypes.DataArray, len(newMessage.Message.InlineData)) for i, dataOrValue := range inData { - var data *fftypes.Data - var blob *fftypes.Blob + var d *fftypes.Data switch { case dataOrValue.ID != nil: // If an ID is supplied, then it must be a reference to existing data - data, err = dm.resolveRef(ctx, ns, &dataOrValue.DataRef, false /* do not need the value */) + d, err = dm.resolveRef(ctx, msg.Header.Namespace, &dataOrValue.DataRef) if err != nil { - return nil, nil, err - } - if data == nil { - return nil, nil, i18n.NewError(ctx, i18n.MsgDataReferenceUnresolvable, i) + return err } - refs[i] = &fftypes.DataRef{ - ID: data.ID, - Hash: data.Hash, - ValueSize: data.ValueSize, + if d == nil { + return i18n.NewError(ctx, i18n.MsgDataReferenceUnresolvable, i) } - if blob, err = dm.resolveBlob(ctx, data.Blob); err != nil { - return nil, nil, err + if _, err = dm.resolveBlob(ctx, d.Blob); err != nil { + return err } case dataOrValue.Value != nil || dataOrValue.Blob != nil: // We've got a Value, so we can validate + store it - if data, blob, refs[i], err = dm.validateAndStoreInlined(ctx, ns, dataOrValue); err != nil { - return nil, nil, err + if d, err = dm.validateInputData(ctx, msg.Header.Namespace, dataOrValue); err != nil { + return err } + newMessage.NewData = append(newMessage.NewData, d) default: // We have nothing - this must be a mistake - return nil, nil, i18n.NewError(ctx, i18n.MsgDataMissing, i) + return i18n.NewError(ctx, i18n.MsgDataMissing, i) } + newMessage.AllData[i] = d + + } + newMessage.Message.Data = newMessage.AllData.Refs() + return nil +} + +// HydrateBatch fetches the full messages for a persisted batch, ready for transmission +func (dm *dataManager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) { + + var manifest fftypes.BatchManifest + err := persistedBatch.Manifest.Unmarshal(ctx, &manifest) + if err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, fmt.Sprintf("batch %s manifest", persistedBatch.ID)) + } - // If the data is being resolved for public broadcast, and there is a blob attachment, that blob - // needs to be published by our calller - if broadcast && blob != nil && data.Blob.Public == "" { - dataToPublish = append(dataToPublish, &fftypes.DataAndBlob{ - Data: data, - Blob: blob, - }) + batch := persistedBatch.GenInflight(make([]*fftypes.Message, len(manifest.Messages)), make(fftypes.DataArray, len(manifest.Data))) + + for i, mr := range manifest.Messages { + m, err := dm.database.GetMessageByID(ctx, mr.ID) + if err != nil || m == nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgFailedToRetrieve, "message", mr.ID) + } + // BatchMessage removes any fields that could change after the batch was first assembled on the sender + batch.Payload.Messages[i] = m.BatchMessage() + } + for i, dr := range manifest.Data { + d, err := dm.database.GetDataByID(ctx, dr.ID, true) + if err != nil || d == nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgFailedToRetrieve, "data", dr.ID) } + // BatchData removes any fields that could change after the batch was first assembled on the sender + batch.Payload.Data[i] = d.BatchData(persistedBatch.Type) + } + + return batch, nil +} + +// WriteNewMessage dispatches the writing of the message and assocated data, then blocks until the background +// worker (or foreground if no DB concurrency) has written. The caller MUST NOT call this inside of a +// DB RunAsGroup - because if a large number of routines enter the same function they could starve the background +// worker of the spare connection required to execute (and thus deadlock). +func (dm *dataManager) WriteNewMessage(ctx context.Context, newMsg *NewMessage) error { + + if newMsg.Message == nil { + return i18n.NewError(ctx, i18n.MsgNilOrNullObject) } - return refs, dataToPublish, nil + + // We add the message to the cache before we write it, because the batch aggregator might + // pick up our message from the message-writer before we return. The batch processor + // writes a more authoritative cache entry, with pings/batchID etc. + dm.UpdateMessageCache(&newMsg.Message.Message, newMsg.AllData) + + err := dm.messageWriter.WriteNewMessage(ctx, newMsg) + if err != nil { + return err + } + return nil +} + +func (dm *dataManager) WaitStop() { + dm.messageWriter.close() } diff --git a/internal/data/data_manager_test.go b/internal/data/data_manager_test.go index 6a88691775..fedc9bd687 100644 --- a/internal/data/data_manager_test.go +++ b/internal/data/data_manager_test.go @@ -32,13 +32,39 @@ import ( ) func newTestDataManager(t *testing.T) (*dataManager, context.Context, func()) { + config.Reset() + config.Set(config.MessageWriterCount, 1) ctx, cancel := context.WithCancel(context.Background()) mdi := &databasemocks.Plugin{} + mdi.On("Capabilities").Return(&database.Capabilities{ + Concurrency: true, + }) mdx := &dataexchangemocks.Plugin{} mps := &sharedstoragemocks.Plugin{} dm, err := NewDataManager(ctx, mdi, mps, mdx) assert.NoError(t, err) - return dm.(*dataManager), ctx, cancel + return dm.(*dataManager), ctx, func() { + cancel() + dm.WaitStop() + } +} + +func testNewMessage() (*fftypes.UUID, *fftypes.Bytes32, *NewMessage) { + dataID := fftypes.NewUUID() + dataHash := fftypes.NewRandB32() + return dataID, dataHash, &NewMessage{ + Message: &fftypes.MessageInOut{ + Message: fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + }, + }, + InlineData: fftypes.InlineData{ + {DataRef: fftypes.DataRef{ID: dataID, Hash: dataHash}}, + }, + }, + } } func TestValidateE2E(t *testing.T) { @@ -73,7 +99,7 @@ func TestValidateE2E(t *testing.T) { Version: "0.0.1", } mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(dt, nil) - isValid, err := dm.ValidateAll(ctx, []*fftypes.Data{data}) + isValid, err := dm.ValidateAll(ctx, fftypes.DataArray{data}) assert.Regexp(t, "FF10198", err) assert.False(t, isValid) @@ -86,12 +112,104 @@ func TestValidateE2E(t *testing.T) { err = v.Validate(ctx, data) assert.NoError(t, err) - isValid, err = dm.ValidateAll(ctx, []*fftypes.Data{data}) + isValid, err = dm.ValidateAll(ctx, fftypes.DataArray{data}) assert.NoError(t, err) assert.True(t, isValid) } +func TestWriteNewMessageE2E(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + dt := &fftypes.Datatype{ + ID: fftypes.NewUUID(), + Validator: fftypes.ValidatorTypeJSON, + Value: fftypes.JSONAnyPtr(`{}`), + Name: "customer", + Namespace: "0.0.1", + } + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(dt, nil).Once() + mdi.On("RunAsGroup", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + err := args[1].(func(context.Context) error)(ctx) + assert.NoError(t, err) + }).Return(nil) + mdi.On("InsertDataArray", mock.Anything, mock.Anything).Return(nil).Once() + + data1, err := dm.UploadJSON(ctx, "ns1", &fftypes.DataRefOrValue{ + Value: fftypes.JSONAnyPtr(`"message 1 - data A"`), + Validator: fftypes.ValidatorTypeJSON, + Datatype: &fftypes.DatatypeRef{ + Name: "customer", + Version: "0.0.1", + }, + }) + assert.NoError(t, err) + + mdi.On("GetDataByID", mock.Anything, data1.ID, true).Return(data1, nil).Once() + + _, _, newMsg1 := testNewMessage() + newMsg1.Message.InlineData = fftypes.InlineData{ + {DataRef: fftypes.DataRef{ + ID: data1.ID, + Hash: data1.Hash, + }}, + {Value: fftypes.JSONAnyPtr(`"message 1 - data B"`)}, + {Value: fftypes.JSONAnyPtr(`"message 1 - data C"`)}, + } + _, _, newMsg2 := testNewMessage() + newMsg2.Message.InlineData = fftypes.InlineData{ + {Value: fftypes.JSONAnyPtr(`"message 2 - data B"`)}, + {Value: fftypes.JSONAnyPtr(`"message 2 - data C"`)}, + } + + err = dm.ResolveInlineData(ctx, newMsg1) + assert.NoError(t, err) + err = dm.ResolveInlineData(ctx, newMsg2) + assert.NoError(t, err) + + allData := append(append(fftypes.DataArray{}, newMsg1.NewData...), newMsg2.NewData...) + assert.Len(t, allData, 4) + + mdi.On("InsertMessages", mock.Anything, mock.MatchedBy(func(msgs []*fftypes.Message) bool { + msgsByID := make(map[fftypes.UUID]bool) + for _, msg := range msgs { + msgsByID[*msg.Header.ID] = true + } + return len(msgs) == 2 && + msgsByID[*newMsg1.Message.Header.ID] && + msgsByID[*newMsg2.Message.Header.ID] + })).Return(nil).Once() + mdi.On("InsertDataArray", mock.Anything, mock.MatchedBy(func(dataArray fftypes.DataArray) bool { + dataByID := make(map[fftypes.UUID]bool) + for _, data := range dataArray { + dataByID[*data.ID] = true + } + return len(dataArray) == 4 && + dataByID[*newMsg1.AllData[1].ID] && + dataByID[*newMsg1.AllData[2].ID] && + dataByID[*newMsg2.AllData[0].ID] && + dataByID[*newMsg2.AllData[1].ID] + })).Return(nil).Once() + + results := make(chan error) + + go func() { + results <- dm.WriteNewMessage(ctx, newMsg1) + }() + go func() { + results <- dm.WriteNewMessage(ctx, newMsg2) + }() + + assert.NoError(t, <-results) + assert.NoError(t, <-results) + + mdi.AssertExpectations(t) +} + func TestInitBadDeps(t *testing.T) { _, err := NewDataManager(context.Background(), nil, nil, nil) assert.Regexp(t, "FF10128", err) @@ -149,7 +267,7 @@ func TestValidateBadHash(t *testing.T) { Namespace: "0.0.1", } mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(dt, nil).Once() - _, err := dm.ValidateAll(ctx, []*fftypes.Data{data}) + _, err := dm.ValidateAll(ctx, fftypes.DataArray{data}) assert.Regexp(t, "FF10201", err) } @@ -160,10 +278,10 @@ func TestGetMessageDataDBError(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(nil, fmt.Errorf("pop")) - data, foundAll, err := dm.GetMessageData(ctx, &fftypes.Message{ + data, foundAll, err := dm.GetMessageDataCached(ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, Data: fftypes.DataRefs{{ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}}, - }, true) + }) assert.Nil(t, data) assert.False(t, foundAll) assert.EqualError(t, err, "pop") @@ -176,10 +294,10 @@ func TestGetMessageDataNilEntry(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(nil, nil) - data, foundAll, err := dm.GetMessageData(ctx, &fftypes.Message{ + data, foundAll, err := dm.GetMessageDataCached(ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, Data: fftypes.DataRefs{nil}, - }, true) + }) assert.Empty(t, data) assert.False(t, foundAll) assert.NoError(t, err) @@ -192,10 +310,10 @@ func TestGetMessageDataNotFound(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(nil, nil) - data, foundAll, err := dm.GetMessageData(ctx, &fftypes.Message{ + data, foundAll, err := dm.GetMessageDataCached(ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, Data: fftypes.DataRefs{{ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}}, - }, true) + }) assert.Empty(t, data) assert.False(t, foundAll) assert.NoError(t, err) @@ -212,10 +330,10 @@ func TestGetMessageDataHashMismatch(t *testing.T) { ID: dataID, Hash: fftypes.NewRandB32(), }, nil) - data, foundAll, err := dm.GetMessageData(ctx, &fftypes.Message{ + data, foundAll, err := dm.GetMessageDataCached(ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, Data: fftypes.DataRefs{{ID: dataID, Hash: fftypes.NewRandB32()}}, - }, true) + }) assert.Empty(t, data) assert.False(t, foundAll) assert.NoError(t, err) @@ -229,19 +347,29 @@ func TestGetMessageDataOk(t *testing.T) { mdi := dm.database.(*databasemocks.Plugin) dataID := fftypes.NewUUID() hash := fftypes.NewRandB32() + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, + Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, + } + mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(&fftypes.Data{ ID: dataID, Hash: hash, - }, nil) - data, foundAll, err := dm.GetMessageData(ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, - Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, - }, true) + }, nil).Once() + data, foundAll, err := dm.GetMessageDataCached(ctx, msg) + assert.NotEmpty(t, data) + assert.Equal(t, *dataID, *data[0].ID) + assert.True(t, foundAll) + assert.NoError(t, err) + + // Check cache kicks in for second call + data, foundAll, err = dm.GetMessageDataCached(ctx, msg) assert.NotEmpty(t, data) assert.Equal(t, *dataID, *data[0].ID) assert.True(t, foundAll) assert.NoError(t, err) + mdi.AssertExpectations(t) } func TestCheckDatatypeVerifiesTheSchema(t *testing.T) { @@ -256,9 +384,23 @@ func TestResolveInlineDataEmpty(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{}) + + newMsg := &NewMessage{ + Message: &fftypes.MessageInOut{ + Message: fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Namespace: "ns1", + }, + }, + InlineData: fftypes.InlineData{}, + }, + } + + err := dm.ResolveInlineData(ctx, newMsg) assert.NoError(t, err) - assert.Empty(t, refs) + assert.Empty(t, newMsg.AllData) + assert.Empty(t, newMsg.Message.Data) } @@ -267,34 +409,32 @@ func TestResolveInlineDataRefIDOnlyOK(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) - dataID := fftypes.NewUUID() - dataHash := fftypes.NewRandB32() + dataID, dataHash, newMsg := testNewMessage() - mdi.On("GetDataByID", ctx, dataID, false).Return(&fftypes.Data{ + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ ID: dataID, Namespace: "ns1", Hash: dataHash, }, nil) - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ - {DataRef: fftypes.DataRef{ID: dataID}}, - }) + err := dm.ResolveInlineData(ctx, newMsg) assert.NoError(t, err) - assert.Len(t, refs, 1) - assert.Equal(t, dataID, refs[0].ID) - assert.Equal(t, dataHash, refs[0].Hash) + assert.Len(t, newMsg.AllData, 1) + assert.Len(t, newMsg.Message.Data, 1) + assert.Equal(t, dataID, newMsg.AllData[0].ID) + assert.Equal(t, dataHash, newMsg.AllData[0].Hash) + assert.Empty(t, newMsg.NewData) } -func TestResolveInlineDataBroadcastDataToPublish(t *testing.T) { +func TestResolveInlineDataDataToPublish(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() mdi := dm.database.(*databasemocks.Plugin) - dataID := fftypes.NewUUID() - dataHash := fftypes.NewRandB32() + dataID, dataHash, newMsg := testNewMessage() blobHash := fftypes.NewRandB32() - mdi.On("GetDataByID", ctx, dataID, false).Return(&fftypes.Data{ + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ ID: dataID, Namespace: "ns1", Hash: dataHash, @@ -307,28 +447,24 @@ func TestResolveInlineDataBroadcastDataToPublish(t *testing.T) { PayloadRef: "blob/1", }, nil) - refs, dtp, err := dm.ResolveInlineDataBroadcast(ctx, "ns1", fftypes.InlineData{ - {DataRef: fftypes.DataRef{ID: dataID}}, - }) + err := dm.ResolveInlineData(ctx, newMsg) assert.NoError(t, err) - assert.Len(t, refs, 1) - assert.Equal(t, dataID, refs[0].ID) - assert.Equal(t, dataHash, refs[0].Hash) - assert.Len(t, dtp, 1) - assert.Equal(t, refs[0].ID, dtp[0].Data.ID) - assert.Equal(t, "blob/1", dtp[0].Blob.PayloadRef) + assert.Len(t, newMsg.AllData, 1) + assert.Len(t, newMsg.Message.Data, 1) + assert.Empty(t, newMsg.NewData) + assert.Equal(t, dataID, newMsg.AllData[0].ID) + assert.Equal(t, dataHash, newMsg.AllData[0].Hash) } -func TestResolveInlineDataBroadcastResolveBlobFail(t *testing.T) { +func TestResolveInlineDataResolveBlobFail(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() mdi := dm.database.(*databasemocks.Plugin) - dataID := fftypes.NewUUID() - dataHash := fftypes.NewRandB32() + dataID, dataHash, newMsg := testNewMessage() blobHash := fftypes.NewRandB32() - mdi.On("GetDataByID", ctx, dataID, false).Return(&fftypes.Data{ + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ ID: dataID, Namespace: "ns1", Hash: dataHash, @@ -338,9 +474,7 @@ func TestResolveInlineDataBroadcastResolveBlobFail(t *testing.T) { }, nil) mdi.On("GetBlobMatchingHash", ctx, blobHash).Return(nil, fmt.Errorf("pop")) - _, _, err := dm.ResolveInlineDataBroadcast(ctx, "ns1", fftypes.InlineData{ - {DataRef: fftypes.DataRef{ID: dataID}}, - }) + err := dm.ResolveInlineData(ctx, newMsg) assert.EqualError(t, err, "pop") } @@ -349,20 +483,16 @@ func TestResolveInlineDataRefBadNamespace(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) - dataID := fftypes.NewUUID() - dataHash := fftypes.NewRandB32() + dataID, dataHash, newMsg := testNewMessage() - mdi.On("GetDataByID", ctx, dataID, false).Return(&fftypes.Data{ + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ ID: dataID, Namespace: "ns2", Hash: dataHash, }, nil) - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ - {DataRef: fftypes.DataRef{ID: dataID, Hash: dataHash}}, - }) + err := dm.ResolveInlineData(ctx, newMsg) assert.Regexp(t, "FF10204", err) - assert.Empty(t, refs) } func TestResolveInlineDataRefBadHash(t *testing.T) { @@ -370,20 +500,24 @@ func TestResolveInlineDataRefBadHash(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) - dataID := fftypes.NewUUID() - dataHash := fftypes.NewRandB32() + dataID, dataHash, newMsg := testNewMessage() - mdi.On("GetDataByID", ctx, dataID, false).Return(&fftypes.Data{ + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ ID: dataID, Namespace: "ns2", Hash: dataHash, }, nil) - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ - {DataRef: fftypes.DataRef{ID: dataID, Hash: fftypes.NewRandB32()}}, - }) + err := dm.ResolveInlineData(ctx, newMsg) assert.Regexp(t, "FF10204", err) - assert.Empty(t, refs) +} + +func TestResolveInlineDataNilMsg(t *testing.T) { + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + err := dm.ResolveInlineData(ctx, &NewMessage{}) + assert.Regexp(t, "FF10368", err) } func TestResolveInlineDataRefLookkupFail(t *testing.T) { @@ -391,13 +525,11 @@ func TestResolveInlineDataRefLookkupFail(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) - dataID := fftypes.NewUUID() + dataID, _, newMsg := testNewMessage() - mdi.On("GetDataByID", ctx, dataID, false).Return(nil, fmt.Errorf("pop")) + mdi.On("GetDataByID", ctx, dataID, true).Return(nil, fmt.Errorf("pop")) - _, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ - {DataRef: fftypes.DataRef{ID: dataID, Hash: fftypes.NewRandB32()}}, - }) + err := dm.ResolveInlineData(ctx, newMsg) assert.EqualError(t, err, "pop") } @@ -408,26 +540,18 @@ func TestResolveInlineDataValueNoValidatorOK(t *testing.T) { mdi.On("UpsertData", ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ + _, _, newMsg := testNewMessage() + newMsg.Message.InlineData = fftypes.InlineData{ {Value: fftypes.JSONAnyPtr(`{"some":"json"}`)}, - }) - assert.NoError(t, err) - assert.Len(t, refs, 1) - assert.NotNil(t, refs[0].ID) - assert.NotNil(t, refs[0].Hash) -} - -func TestResolveInlineDataValueNoValidatorStoreFail(t *testing.T) { - dm, ctx, cancel := newTestDataManager(t) - defer cancel() - mdi := dm.database.(*databasemocks.Plugin) - - mdi.On("UpsertData", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) + } - _, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ - {Value: fftypes.JSONAnyPtr(`{"some":"json"}`)}, - }) - assert.EqualError(t, err, "pop") + err := dm.ResolveInlineData(ctx, newMsg) + assert.NoError(t, err) + assert.Len(t, newMsg.AllData, 1) + assert.Len(t, newMsg.NewData, 1) + assert.Len(t, newMsg.Message.Data, 1) + assert.NotNil(t, newMsg.AllData[0].ID) + assert.NotNil(t, newMsg.AllData[0].Hash) } func TestResolveInlineDataValueWithValidation(t *testing.T) { @@ -452,7 +576,8 @@ func TestResolveInlineDataValueWithValidation(t *testing.T) { }`), }, nil) - refs, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ + _, _, newMsg := testNewMessage() + newMsg.Message.InlineData = fftypes.InlineData{ { Datatype: &fftypes.DatatypeRef{ Name: "customer", @@ -460,13 +585,16 @@ func TestResolveInlineDataValueWithValidation(t *testing.T) { }, Value: fftypes.JSONAnyPtr(`{"field1":"value1"}`), }, - }) + } + + err := dm.ResolveInlineData(ctx, newMsg) assert.NoError(t, err) - assert.Len(t, refs, 1) - assert.NotNil(t, refs[0].ID) - assert.NotNil(t, refs[0].Hash) + assert.Len(t, newMsg.AllData, 1) + assert.Len(t, newMsg.NewData, 1) + assert.NotNil(t, newMsg.AllData[0].ID) + assert.NotNil(t, newMsg.AllData[0].Hash) - _, err = dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ + newMsg.Message.InlineData = fftypes.InlineData{ { Datatype: &fftypes.DatatypeRef{ Name: "customer", @@ -474,7 +602,8 @@ func TestResolveInlineDataValueWithValidation(t *testing.T) { }, Value: fftypes.JSONAnyPtr(`{"not_allowed":"value"}`), }, - }) + } + err = dm.ResolveInlineData(ctx, newMsg) assert.Regexp(t, "FF10198", err) } @@ -482,9 +611,12 @@ func TestResolveInlineDataNoRefOrValue(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() - _, err := dm.ResolveInlineDataPrivate(ctx, "ns1", fftypes.InlineData{ + _, _, newMsg := testNewMessage() + newMsg.Message.InlineData = fftypes.InlineData{ { /* missing */ }, - }) + } + + err := dm.ResolveInlineData(ctx, newMsg) assert.Regexp(t, "FF10205", err) } @@ -503,11 +635,21 @@ func TestUploadJSONLoadDatatypeFail(t *testing.T) { assert.EqualError(t, err, "pop") } +func TestUploadJSONLoadInsertDataFail(t *testing.T) { + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + dm.messageWriter.close() + _, err := dm.UploadJSON(ctx, "ns1", &fftypes.DataRefOrValue{ + Value: fftypes.JSONAnyPtr(`{}`), + }) + assert.Regexp(t, "FF10158", err) +} + func TestValidateAndStoreLoadNilRef(t *testing.T) { dm, ctx, cancel := newTestDataManager(t) defer cancel() - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Validator: fftypes.ValidatorTypeJSON, Datatype: nil, }) @@ -520,7 +662,7 @@ func TestValidateAndStoreLoadValidatorUnknown(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(nil, nil) - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Validator: "wrong!", Datatype: &fftypes.DatatypeRef{ Name: "customer", @@ -537,7 +679,7 @@ func TestValidateAndStoreLoadBadRef(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(nil, nil) - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Datatype: &fftypes.DatatypeRef{ // Missing name }, @@ -551,7 +693,7 @@ func TestValidateAndStoreNotFound(t *testing.T) { defer cancel() mdi := dm.database.(*databasemocks.Plugin) mdi.On("GetDatatypeByName", mock.Anything, "ns1", "customer", "0.0.1").Return(nil, nil) - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Datatype: &fftypes.DatatypeRef{ Name: "customer", Version: "0.0.1", @@ -567,7 +709,7 @@ func TestValidateAndStoreBlobError(t *testing.T) { mdi := dm.database.(*databasemocks.Plugin) blobHash := fftypes.NewRandB32() mdi.On("GetBlobMatchingHash", mock.Anything, blobHash).Return(nil, fmt.Errorf("pop")) - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Blob: &fftypes.BlobRef{ Hash: blobHash, }, @@ -582,7 +724,7 @@ func TestValidateAndStoreBlobNotFound(t *testing.T) { mdi := dm.database.(*databasemocks.Plugin) blobHash := fftypes.NewRandB32() mdi.On("GetBlobMatchingHash", mock.Anything, blobHash).Return(nil, nil) - _, _, _, err := dm.validateAndStoreInlined(ctx, "ns1", &fftypes.DataRefOrValue{ + _, err := dm.validateInputData(ctx, "ns1", &fftypes.DataRefOrValue{ Blob: &fftypes.BlobRef{ Hash: blobHash, }, @@ -606,7 +748,7 @@ func TestValidateAllLookupError(t *testing.T) { Value: fftypes.JSONAnyPtr(`anything`), } data.Seal(ctx, nil) - _, err := dm.ValidateAll(ctx, []*fftypes.Data{data}) + _, err := dm.ValidateAll(ctx, fftypes.DataArray{data}) assert.Regexp(t, "pop", err) } @@ -636,7 +778,7 @@ func TestValidateAllStoredValidatorInvalid(t *testing.T) { Version: "0.0.1", }, } - isValid, err := dm.ValidateAll(ctx, []*fftypes.Data{data}) + isValid, err := dm.ValidateAll(ctx, fftypes.DataArray{data}) assert.False(t, isValid) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -675,3 +817,339 @@ func TestVerifyNamespaceExistsOk(t *testing.T) { err := dm.VerifyNamespaceExists(ctx, "ns1") assert.NoError(t, err) } + +func TestHydrateBatchOK(t *testing.T) { + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + batchID := fftypes.NewUUID() + msgID := fftypes.NewUUID() + msgHash := fftypes.NewRandB32() + dataID := fftypes.NewUUID() + dataHash := fftypes.NewRandB32() + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Type: fftypes.BatchTypeBroadcast, + ID: batchID, + Namespace: "ns1", + }, + Manifest: fftypes.JSONAnyPtr(fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, + batchID, msgID, msgHash, dataID, dataHash, + )), + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, + } + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("GetMessageByID", ctx, msgID).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: msgID}, + Hash: msgHash, + Confirmed: fftypes.Now(), + }, nil) + mdi.On("GetDataByID", ctx, dataID, true).Return(&fftypes.Data{ + ID: dataID, + Hash: dataHash, + Created: fftypes.Now(), + }, nil) + + batch, err := dm.HydrateBatch(ctx, bp) + assert.NoError(t, err) + assert.Equal(t, bp.BatchHeader, batch.BatchHeader) + assert.Equal(t, bp.TX, batch.Payload.TX) + assert.Equal(t, msgID, batch.Payload.Messages[0].Header.ID) + assert.Equal(t, msgHash, batch.Payload.Messages[0].Hash) + assert.Nil(t, batch.Payload.Messages[0].Confirmed) + assert.Equal(t, dataID, batch.Payload.Data[0].ID) + assert.Equal(t, dataHash, batch.Payload.Data[0].Hash) + assert.Equal(t, dataHash, batch.Payload.Data[0].Hash) + assert.NotNil(t, batch.Payload.Data[0].Created) + + mdi.AssertExpectations(t) +} + +func TestHydrateBatchDataFail(t *testing.T) { + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + batchID := fftypes.NewUUID() + msgID := fftypes.NewUUID() + msgHash := fftypes.NewRandB32() + dataID := fftypes.NewUUID() + dataHash := fftypes.NewRandB32() + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Type: fftypes.BatchTypeBroadcast, + ID: batchID, + Namespace: "ns1", + }, + Manifest: fftypes.JSONAnyPtr(fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, + batchID, msgID, msgHash, dataID, dataHash, + )), + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, + } + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("GetMessageByID", ctx, msgID).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: msgID}, + Hash: msgHash, + Confirmed: fftypes.Now(), + }, nil) + mdi.On("GetDataByID", ctx, dataID, true).Return(nil, fmt.Errorf("pop")) + + _, err := dm.HydrateBatch(ctx, bp) + assert.Regexp(t, "FF10372.*pop", err) + + mdi.AssertExpectations(t) +} + +func TestHydrateBatchMsgNotFound(t *testing.T) { + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + batchID := fftypes.NewUUID() + msgID := fftypes.NewUUID() + msgHash := fftypes.NewRandB32() + dataID := fftypes.NewUUID() + dataHash := fftypes.NewRandB32() + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Type: fftypes.BatchTypeBroadcast, + ID: batchID, + Namespace: "ns1", + }, + Manifest: fftypes.JSONAnyPtr(fmt.Sprintf(`{"id":"%s","messages":[{"id":"%s","hash":"%s"}],"data":[{"id":"%s","hash":"%s"}]}`, + batchID, msgID, msgHash, dataID, dataHash, + )), + TX: fftypes.TransactionRef{ + ID: fftypes.NewUUID(), + }, + } + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("GetMessageByID", ctx, msgID).Return(nil, nil) + + _, err := dm.HydrateBatch(ctx, bp) + assert.Regexp(t, "FF10372", err) + + mdi.AssertExpectations(t) +} + +func TestHydrateBatchMsgBadManifest(t *testing.T) { + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + bp := &fftypes.BatchPersisted{ + Manifest: fftypes.JSONAnyPtr(`!json`), + } + + _, err := dm.HydrateBatch(ctx, bp) + assert.Regexp(t, "FF10151", err) +} + +func TestGetMessageWithDataOk(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + mdi := dm.database.(*databasemocks.Plugin) + dataID := fftypes.NewUUID() + hash := fftypes.NewRandB32() + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, + Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, + } + + mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, nil).Once() + mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(&fftypes.Data{ + ID: dataID, + Hash: hash, + }, nil).Once() + msgRet, data, foundAll, err := dm.GetMessageWithDataCached(ctx, msg.Header.ID) + assert.Equal(t, msg, msgRet) + assert.NotEmpty(t, data) + assert.Equal(t, *dataID, *data[0].ID) + assert.True(t, foundAll) + assert.NoError(t, err) + + // Check cache kicks in for second call + msgRet, data, foundAll, err = dm.GetMessageWithDataCached(ctx, msg.Header.ID) + assert.Equal(t, msg, msgRet) + assert.NotEmpty(t, data) + assert.Equal(t, *dataID, *data[0].ID) + assert.True(t, foundAll) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestGetMessageWithDataCRORequirePublicBlobRefs(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + mdi := dm.database.(*databasemocks.Plugin) + dataID := fftypes.NewUUID() + hash := fftypes.NewRandB32() + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, + Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, + } + + mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, nil).Twice() + mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(&fftypes.Data{ + ID: dataID, + Hash: hash, + Blob: &fftypes.BlobRef{ + Hash: fftypes.NewRandB32(), + }, + }, nil).Twice() + msgRet, data, foundAll, err := dm.GetMessageWithDataCached(ctx, msg.Header.ID) + assert.Equal(t, msg, msgRet) + assert.NotEmpty(t, data) + assert.Equal(t, *dataID, *data[0].ID) + assert.True(t, foundAll) + assert.NoError(t, err) + + // Check cache does not kick in as we have missing blob ref + msgRet, data, foundAll, err = dm.GetMessageWithDataCached(ctx, msg.Header.ID, CRORequirePublicBlobRefs) + assert.Equal(t, msg, msgRet) + assert.NotEmpty(t, data) + assert.Equal(t, *dataID, *data[0].ID) + assert.True(t, foundAll) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestGetMessageWithDataReadDataFail(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + mdi := dm.database.(*databasemocks.Plugin) + dataID := fftypes.NewUUID() + hash := fftypes.NewRandB32() + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, + Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, + } + + mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, nil) + mdi.On("GetDataByID", mock.Anything, mock.Anything, true).Return(nil, fmt.Errorf("pop")) + _, _, _, err := dm.GetMessageWithDataCached(ctx, msg.Header.ID) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) +} + +func TestGetMessageWithDataReadMessageFail(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + mdi := dm.database.(*databasemocks.Plugin) + dataID := fftypes.NewUUID() + hash := fftypes.NewRandB32() + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, + Data: fftypes.DataRefs{{ID: dataID, Hash: hash}}, + } + + mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, fmt.Errorf("pop")) + _, _, _, err := dm.GetMessageWithDataCached(ctx, msg.Header.ID) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) +} + +func TestUpdateMessageCacheCRORequirePins(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + data := fftypes.DataArray{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, + } + msgNoPins := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Topics: fftypes.FFStringArray{"topic1"}, + }, + Data: data.Refs(), + } + msgWithPins := &fftypes.Message{ + Header: msgNoPins.Header, + Data: data.Refs(), + Pins: fftypes.FFStringArray{"pin1"}, + } + + msg, _ := dm.PeekMessageCache(ctx, msgWithPins.Header.ID) + assert.Nil(t, msg) + + dm.UpdateMessageCache(msgNoPins, data) + + msg, _ = dm.PeekMessageCache(ctx, msgWithPins.Header.ID) + assert.NotNil(t, msg) + + mce := dm.queryMessageCache(ctx, msgNoPins.Header.ID, CRORequirePins) + assert.Nil(t, mce) + + dm.UpdateMessageIfCached(ctx, msgWithPins) + for mce == nil { + mce = dm.queryMessageCache(ctx, msgNoPins.Header.ID, CRORequirePins) + } + +} + +func TestUpdateMessageCacheCRORequireBatchID(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + data := fftypes.DataArray{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, + } + msgNoPins := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Topics: fftypes.FFStringArray{"topic1"}, + }, + Data: data.Refs(), + } + msgWithBatch := &fftypes.Message{ + Header: msgNoPins.Header, + Data: data.Refs(), + BatchID: fftypes.NewUUID(), + } + + dm.UpdateMessageCache(msgNoPins, data) + + mce := dm.queryMessageCache(ctx, msgNoPins.Header.ID, CRORequireBatchID) + assert.Nil(t, mce) + + dm.UpdateMessageIfCached(ctx, msgWithBatch) + for mce == nil { + mce = dm.queryMessageCache(ctx, msgNoPins.Header.ID, CRORequireBatchID) + } + +} + +func TestWriteNewMessageFailNil(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + + err := dm.WriteNewMessage(ctx, &NewMessage{}) + assert.Regexp(t, "FF10368", err) +} + +func TestWriteNewMessageFailClosed(t *testing.T) { + + dm, ctx, cancel := newTestDataManager(t) + defer cancel() + dm.messageWriter.close() + + err := dm.WriteNewMessage(ctx, &NewMessage{ + Message: &fftypes.MessageInOut{}, + }) + assert.Regexp(t, "FF10158", err) +} diff --git a/internal/data/message_writer.go b/internal/data/message_writer.go new file mode 100644 index 0000000000..3c0c2cc8fc --- /dev/null +++ b/internal/data/message_writer.go @@ -0,0 +1,203 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "context" + "time" + + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +type NewMessage struct { + Message *fftypes.MessageInOut + AllData fftypes.DataArray + NewData fftypes.DataArray +} + +// writeRequest is a combination of a message and a list of data that is new and needs to be +// inserted into the database. +type writeRequest struct { + newMessage *fftypes.Message + newData fftypes.DataArray + result chan error +} + +type messageWriterBatch struct { + messages []*fftypes.Message + data fftypes.DataArray + listeners []chan error + timeoutContext context.Context + timeoutCancel func() +} + +// messageWriter manages writing messages to the database. +// +// Where supported, it starts background workers to perform batch commits against the database, +// to allow high throughput insertion of messages + data. +// +// Multiple message writers can be started, to combine +// concurrency with batching and tune for maximum throughput. +type messageWriter struct { + ctx context.Context + cancelFunc func() + database database.Plugin + workQueue chan *writeRequest + workersDone []chan struct{} + conf *messageWriterConf + closed bool +} + +type messageWriterConf struct { + workerCount int + batchTimeout time.Duration + maxInserts int +} + +func newMessageWriter(ctx context.Context, di database.Plugin, conf *messageWriterConf) *messageWriter { + if !di.Capabilities().Concurrency { + log.L(ctx).Infof("Database plugin not configured for concurrency. Batched message writing disabled") + conf.workerCount = 0 + } + mw := &messageWriter{ + conf: conf, + database: di, + } + mw.ctx, mw.cancelFunc = context.WithCancel(ctx) + return mw +} + +// WriteNewMessage is the external interface, which depending on whether we have a non-zero +// worker count will dispatch the work to the pool and wait for it to complete on a background +// transaction, or just run it in-line on the context passed ini. +func (mw *messageWriter) WriteNewMessage(ctx context.Context, newMsg *NewMessage) error { + if mw.conf.workerCount > 0 { + // Dispatch to background worker + nmi := &writeRequest{ + newMessage: &newMsg.Message.Message, + newData: newMsg.NewData, + result: make(chan error), + } + select { + case mw.workQueue <- nmi: + case <-mw.ctx.Done(): + return i18n.NewError(ctx, i18n.MsgContextCanceled) + } + return <-nmi.result + } + // Otherwise do it in-line on this context + return mw.database.RunAsGroup(ctx, func(ctx context.Context) error { + return mw.writeMessages(ctx, []*fftypes.Message{&newMsg.Message.Message}, newMsg.NewData) + }) +} + +// WriteData writes a piece of data independently of a message +func (mw *messageWriter) WriteData(ctx context.Context, data *fftypes.Data) error { + if mw.conf.workerCount > 0 { + // Dispatch to background worker + nmi := &writeRequest{ + newData: fftypes.DataArray{data}, + result: make(chan error), + } + select { + case mw.workQueue <- nmi: + case <-mw.ctx.Done(): + return i18n.NewError(ctx, i18n.MsgContextCanceled) + } + return <-nmi.result + } + // Otherwise do it in-line on this context + return mw.database.UpsertData(ctx, data, database.UpsertOptimizationNew) +} + +func (mw *messageWriter) start() { + if mw.conf.workerCount > 0 { + mw.workQueue = make(chan *writeRequest) + mw.workersDone = make([]chan struct{}, mw.conf.workerCount) + for i := 0; i < mw.conf.workerCount; i++ { + mw.workersDone[i] = make(chan struct{}) + go mw.writerLoop(i) + } + } +} + +func (mw *messageWriter) writerLoop(index int) { + defer close(mw.workersDone[index]) + + var batch *messageWriterBatch + for !mw.closed { + var timeoutContext context.Context + var timedOut bool + if batch != nil { + timeoutContext = batch.timeoutContext + } else { + timeoutContext = mw.ctx + } + select { + case work := <-mw.workQueue: + if batch == nil { + batch = &messageWriterBatch{} + batch.timeoutContext, batch.timeoutCancel = context.WithTimeout(mw.ctx, mw.conf.batchTimeout) + } + if work.newMessage != nil { + batch.messages = append(batch.messages, work.newMessage) + } + batch.data = append(batch.data, work.newData...) + batch.listeners = append(batch.listeners, work.result) + case <-timeoutContext.Done(): + timedOut = true + } + + if batch != nil && (timedOut || (len(batch.messages)+len(batch.data) >= mw.conf.maxInserts)) { + batch.timeoutCancel() + err := mw.database.RunAsGroup(mw.ctx, func(ctx context.Context) error { + return mw.writeMessages(ctx, batch.messages, batch.data) + }) + for _, l := range batch.listeners { + l <- err + } + batch = nil + } + } +} + +func (mw *messageWriter) writeMessages(ctx context.Context, msgs []*fftypes.Message, data fftypes.DataArray) error { + if len(data) > 0 { + if err := mw.database.InsertDataArray(ctx, data); err != nil { + return err + } + } + if len(msgs) > 0 { + if err := mw.database.InsertMessages(ctx, msgs); err != nil { + return err + } + } + return nil +} + +func (mw *messageWriter) close() { + if !mw.closed { + mw.closed = true + mw.cancelFunc() + for _, workerDone := range mw.workersDone { + <-workerDone + } + } +} diff --git a/internal/data/message_writer_test.go b/internal/data/message_writer_test.go new file mode 100644 index 0000000000..59ffb007b7 --- /dev/null +++ b/internal/data/message_writer_test.go @@ -0,0 +1,142 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func newTestMessageWriter(t *testing.T) *messageWriter { + return newTestMessageWriterConf(t, &messageWriterConf{ + workerCount: 1, + batchTimeout: 100 * time.Millisecond, + maxInserts: 200, + }, &database.Capabilities{Concurrency: true}) +} + +func newTestMessageWriterNoConcrrency(t *testing.T) *messageWriter { + return newTestMessageWriterConf(t, &messageWriterConf{workerCount: 1}, &database.Capabilities{Concurrency: false}) +} + +func newTestMessageWriterConf(t *testing.T, conf *messageWriterConf, dbCapabilities *database.Capabilities) *messageWriter { + mdi := &databasemocks.Plugin{} + mdi.On("Capabilities").Return(dbCapabilities) + return newMessageWriter(context.Background(), mdi, conf) +} + +func TestNewMessageWriterNoConcurrency(t *testing.T) { + mw := newTestMessageWriterNoConcrrency(t) + assert.Zero(t, mw.conf.workerCount) +} + +func TestWriteNewMessageClosed(t *testing.T) { + mw := newTestMessageWriter(t) + mw.close() + err := mw.WriteNewMessage(mw.ctx, &NewMessage{ + Message: &fftypes.MessageInOut{}, + }) + assert.Regexp(t, "FF10158", err) +} + +func TestWriteDataClosed(t *testing.T) { + mw := newTestMessageWriter(t) + mw.close() + err := mw.WriteData(mw.ctx, &fftypes.Data{}) + assert.Regexp(t, "FF10158", err) +} + +func TestWriteNewMessageSyncFallback(t *testing.T) { + mw := newTestMessageWriterNoConcrrency(t) + customCtx := context.WithValue(context.Background(), "dbtx", "on this context") + + msg1 := &fftypes.MessageInOut{ + Message: fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + }, + }, + } + data1 := &fftypes.Data{ID: fftypes.NewUUID()} + + mdi := mw.database.(*databasemocks.Plugin) + mdi.On("RunAsGroup", customCtx, mock.Anything).Run(func(args mock.Arguments) { + err := args[1].(func(context.Context) error)(customCtx) + assert.NoError(t, err) + }).Return(nil) + mdi.On("InsertMessages", customCtx, []*fftypes.Message{&msg1.Message}).Return(nil) + mdi.On("InsertDataArray", customCtx, fftypes.DataArray{data1}).Return(nil) + + err := mw.WriteNewMessage(customCtx, &NewMessage{ + Message: msg1, + NewData: fftypes.DataArray{data1}, + }) + + assert.NoError(t, err) +} + +func TestWriteDataSyncFallback(t *testing.T) { + mw := newTestMessageWriterNoConcrrency(t) + customCtx := context.WithValue(context.Background(), "dbtx", "on this context") + + data1 := &fftypes.Data{ID: fftypes.NewUUID()} + + mdi := mw.database.(*databasemocks.Plugin) + mdi.On("UpsertData", customCtx, data1, database.UpsertOptimizationNew).Return(nil) + + err := mw.WriteData(customCtx, data1) + + assert.NoError(t, err) +} + +func TestWriteMessagesInsertMessagesFail(t *testing.T) { + mw := newTestMessageWriterNoConcrrency(t) + + msg1 := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + }, + } + + mdi := mw.database.(*databasemocks.Plugin) + mdi.On("InsertMessages", mw.ctx, []*fftypes.Message{msg1}).Return(fmt.Errorf("pop")) + + err := mw.writeMessages(mw.ctx, []*fftypes.Message{msg1}, fftypes.DataArray{}) + + assert.Regexp(t, "pop", err) +} + +func TestWriteMessagesInsertDataArrayFail(t *testing.T) { + mw := newTestMessageWriterNoConcrrency(t) + + data1 := &fftypes.Data{ID: fftypes.NewUUID()} + + mdi := mw.database.(*databasemocks.Plugin) + mdi.On("InsertDataArray", mw.ctx, fftypes.DataArray{data1}).Return(fmt.Errorf("pop")) + + err := mw.writeMessages(mw.ctx, []*fftypes.Message{}, fftypes.DataArray{data1}) + + assert.Regexp(t, "pop", err) +} diff --git a/internal/database/postgres/postgres.go b/internal/database/postgres/postgres.go index 115f4c5795..6cb99e5e1f 100644 --- a/internal/database/postgres/postgres.go +++ b/internal/database/postgres/postgres.go @@ -57,6 +57,7 @@ func (psql *Postgres) Features() sqlcommon.SQLFeatures { features.ExclusiveTableLockSQL = func(table string) string { return fmt.Sprintf(`LOCK TABLE "%s" IN EXCLUSIVE MODE;`, table) } + features.MultiRowInsert = true return features } diff --git a/internal/database/sqlcommon/batch_sql.go b/internal/database/sqlcommon/batch_sql.go index d4f72d4caf..3b1f059dbc 100644 --- a/internal/database/sqlcommon/batch_sql.go +++ b/internal/database/sqlcommon/batch_sql.go @@ -37,7 +37,7 @@ var ( "group_hash", "created", "hash", - "payload", + "manifest", "payload_ref", "confirmed", "tx_type", @@ -54,7 +54,7 @@ var ( } ) -func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.Batch) (err error) { +func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.BatchPersisted) (err error) { ctx, tx, autoCommit, err := s.beginOrUseTx(ctx) if err != nil { return err @@ -95,11 +95,11 @@ func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.Batch) (err Set("group_hash", batch.Group). Set("created", batch.Created). Set("hash", batch.Hash). - Set("payload", batch.Payload). + Set("manifest", batch.Manifest). Set("payload_ref", batch.PayloadRef). Set("confirmed", batch.Confirmed). - Set("tx_type", batch.Payload.TX.Type). - Set("tx_id", batch.Payload.TX.ID). + Set("tx_type", batch.TX.Type). + Set("tx_id", batch.TX.ID). Set("node_id", batch.Node). Where(sq.Eq{"id": batch.ID}), func() { @@ -122,11 +122,11 @@ func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.Batch) (err batch.Group, batch.Created, batch.Hash, - batch.Payload, + batch.Manifest, batch.PayloadRef, batch.Confirmed, - batch.Payload.TX.Type, - batch.Payload.TX.ID, + batch.TX.Type, + batch.TX.ID, batch.Node, ), func() { @@ -140,8 +140,8 @@ func (s *SQLCommon) UpsertBatch(ctx context.Context, batch *fftypes.Batch) (err return s.commitTx(ctx, tx, autoCommit) } -func (s *SQLCommon) batchResult(ctx context.Context, row *sql.Rows) (*fftypes.Batch, error) { - var batch fftypes.Batch +func (s *SQLCommon) batchResult(ctx context.Context, row *sql.Rows) (*fftypes.BatchPersisted, error) { + var batch fftypes.BatchPersisted err := row.Scan( &batch.ID, &batch.Type, @@ -151,11 +151,11 @@ func (s *SQLCommon) batchResult(ctx context.Context, row *sql.Rows) (*fftypes.Ba &batch.Group, &batch.Created, &batch.Hash, - &batch.Payload, + &batch.Manifest, &batch.PayloadRef, &batch.Confirmed, - &batch.Payload.TX.Type, - &batch.Payload.TX.ID, + &batch.TX.Type, + &batch.TX.ID, &batch.Node, ) if err != nil { @@ -164,7 +164,7 @@ func (s *SQLCommon) batchResult(ctx context.Context, row *sql.Rows) (*fftypes.Ba return &batch, nil } -func (s *SQLCommon) GetBatchByID(ctx context.Context, id *fftypes.UUID) (message *fftypes.Batch, err error) { +func (s *SQLCommon) GetBatchByID(ctx context.Context, id *fftypes.UUID) (message *fftypes.BatchPersisted, err error) { rows, _, err := s.query(ctx, sq.Select(batchColumns...). @@ -189,7 +189,7 @@ func (s *SQLCommon) GetBatchByID(ctx context.Context, id *fftypes.UUID) (message return batch, nil } -func (s *SQLCommon) GetBatches(ctx context.Context, filter database.Filter) (message []*fftypes.Batch, res *database.FilterResult, err error) { +func (s *SQLCommon) GetBatches(ctx context.Context, filter database.Filter) (message []*fftypes.BatchPersisted, res *database.FilterResult, err error) { query, fop, fi, err := s.filterSelect(ctx, "", sq.Select(batchColumns...).From("batches"), filter, batchFilterFieldMap, []interface{}{"sequence"}) if err != nil { @@ -202,7 +202,7 @@ func (s *SQLCommon) GetBatches(ctx context.Context, filter database.Filter) (mes } defer rows.Close() - batches := []*fftypes.Batch{} + batches := []*fftypes.BatchPersisted{} for rows.Next() { batch, err := s.batchResult(ctx, rows) if err != nil { diff --git a/internal/database/sqlcommon/batch_sql_test.go b/internal/database/sqlcommon/batch_sql_test.go index c814f5d758..48ae249967 100644 --- a/internal/database/sqlcommon/batch_sql_test.go +++ b/internal/database/sqlcommon/batch_sql_test.go @@ -38,25 +38,27 @@ func TestBatch2EWithDB(t *testing.T) { // Create a new batch entry batchID := fftypes.NewUUID() msgID1 := fftypes.NewUUID() - batch := &fftypes.Batch{ - ID: batchID, - Type: fftypes.MessageTypeBroadcast, - SignerRef: fftypes.SignerRef{ - Key: "0x12345", - Author: "did:firefly:org/abcd", - }, - Namespace: "ns1", - Hash: fftypes.NewRandB32(), - Created: fftypes.Now(), - Node: fftypes.NewUUID(), - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{ - {Header: fftypes.MessageHeader{ID: msgID1}}, - }, - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeUnpinned, + batch := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + Type: fftypes.BatchTypeBroadcast, + SignerRef: fftypes.SignerRef{ + Key: "0x12345", + Author: "did:firefly:org/abcd", }, + Namespace: "ns1", + Node: fftypes.NewUUID(), + Created: fftypes.Now(), }, + Hash: fftypes.NewRandB32(), + TX: fftypes.TransactionRef{ + Type: fftypes.TransactionTypeUnpinned, + }, + Manifest: fftypes.JSONAnyPtr((&fftypes.BatchManifest{ + Messages: []*fftypes.MessageManifestEntry{ + {MessageRef: fftypes.MessageRef{ID: msgID1}}, + }, + }).String()), } s.callbacks.On("UUIDCollectionNSEvent", database.CollectionBatches, fftypes.ChangeEventTypeCreated, "ns1", batchID, mock.Anything).Return() @@ -78,27 +80,29 @@ func TestBatch2EWithDB(t *testing.T) { txid := fftypes.NewUUID() msgID2 := fftypes.NewUUID() payloadRef := "" - batchUpdated := &fftypes.Batch{ - ID: batchID, - Type: fftypes.MessageTypeBroadcast, - SignerRef: fftypes.SignerRef{ - Key: "0x12345", - Author: "did:firefly:org/abcd", - }, - Namespace: "ns1", - Hash: fftypes.NewRandB32(), - Created: fftypes.Now(), - Node: fftypes.NewUUID(), - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - ID: txid, - Type: fftypes.TransactionTypeBatchPin, - }, - Messages: []*fftypes.Message{ - {Header: fftypes.MessageHeader{ID: msgID1}}, - {Header: fftypes.MessageHeader{ID: msgID2}}, + batchUpdated := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + Type: fftypes.BatchTypePrivate, + SignerRef: fftypes.SignerRef{ + Key: "0x12345", + Author: "did:firefly:org/abcd", }, + Namespace: "ns1", + Node: fftypes.NewUUID(), + Created: fftypes.Now(), }, + Hash: fftypes.NewRandB32(), + TX: fftypes.TransactionRef{ + ID: txid, + Type: fftypes.TransactionTypeBatchPin, + }, + Manifest: fftypes.JSONAnyPtr((&fftypes.BatchManifest{ + Messages: []*fftypes.MessageManifestEntry{ + {MessageRef: fftypes.MessageRef{ID: msgID1}}, + {MessageRef: fftypes.MessageRef{ID: msgID2}}, + }, + }).String()), PayloadRef: payloadRef, Confirmed: fftypes.Now(), } @@ -164,7 +168,7 @@ func TestBatch2EWithDB(t *testing.T) { func TestUpsertBatchFailBegin(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertBatch(context.Background(), &fftypes.Batch{}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{}) assert.Regexp(t, "FF10114", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -175,7 +179,7 @@ func TestUpsertBatchFailSelect(t *testing.T) { mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() batchID := fftypes.NewUUID() - err := s.UpsertBatch(context.Background(), &fftypes.Batch{ID: batchID}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{BatchHeader: fftypes.BatchHeader{ID: batchID}}) assert.Regexp(t, "FF10115", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -187,7 +191,7 @@ func TestUpsertBatchFailInsert(t *testing.T) { mock.ExpectExec("INSERT .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() batchID := fftypes.NewUUID() - err := s.UpsertBatch(context.Background(), &fftypes.Batch{ID: batchID}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{BatchHeader: fftypes.BatchHeader{ID: batchID}}) assert.Regexp(t, "FF10116", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -200,7 +204,7 @@ func TestUpsertBatchFailUpdate(t *testing.T) { mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"hash"}).AddRow(hash)) mock.ExpectExec("UPDATE .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() - err := s.UpsertBatch(context.Background(), &fftypes.Batch{ID: batchID, Hash: hash}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{BatchHeader: fftypes.BatchHeader{ID: batchID}, Hash: hash}) assert.Regexp(t, "FF10117", err) assert.NoError(t, mock.ExpectationsWereMet()) } @@ -212,7 +216,7 @@ func TestUpsertBatchFailCommit(t *testing.T) { mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"id"})) mock.ExpectExec("INSERT .*").WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit().WillReturnError(fmt.Errorf("pop")) - err := s.UpsertBatch(context.Background(), &fftypes.Batch{ID: batchID}) + err := s.UpsertBatch(context.Background(), &fftypes.BatchPersisted{BatchHeader: fftypes.BatchHeader{ID: batchID}}) assert.Regexp(t, "FF10119", err) assert.NoError(t, mock.ExpectationsWereMet()) } diff --git a/internal/database/sqlcommon/chart_sql.go b/internal/database/sqlcommon/chart_sql.go index a93f946552..c61c3c0f55 100644 --- a/internal/database/sqlcommon/chart_sql.go +++ b/internal/database/sqlcommon/chart_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -20,6 +20,7 @@ import ( "context" "database/sql" "fmt" + "strconv" sq "github.com/Masterminds/squirrel" "github.com/hyperledger/firefly/internal/i18n" @@ -27,13 +28,35 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (s *SQLCommon) getCaseQueries(ns string, intervals []fftypes.ChartHistogramInterval) (caseQueries []sq.CaseBuilder) { +func (s *SQLCommon) getCaseQueriesByInterval(ns string, intervals []fftypes.ChartHistogramInterval) (caseQueries []sq.CaseBuilder) { for _, interval := range intervals { + caseQueries = append(caseQueries, sq.Case(). + When( + sq.And{ + // Querying by 'timestamp' field for blockchain events + // If more tables are supported that have no "type" field, + // and a different date field name, + // this method will need to be refactored + sq.GtOrEq{"timestamp": interval.StartTime}, + sq.Lt{"timestamp": interval.EndTime}, + sq.Eq{"namespace": ns}, + }, + "1", + ). + Else("0")) + } + + return caseQueries +} + +func (s *SQLCommon) getCaseQueriesByType(ns string, dataTypes []string, interval fftypes.ChartHistogramInterval, typeColName string) (caseQueries []sq.CaseBuilder) { + for _, dataType := range dataTypes { caseQueries = append(caseQueries, sq.Case(). When( sq.And{ sq.GtOrEq{"created": interval.StartTime}, sq.Lt{"created": interval.EndTime}, + sq.Eq{typeColName: dataType}, sq.Eq{"namespace": ns}, }, "1", @@ -44,22 +67,64 @@ func (s *SQLCommon) getCaseQueries(ns string, intervals []fftypes.ChartHistogram return caseQueries } -func (s *SQLCommon) getTableNameFromCollection(ctx context.Context, collection database.CollectionName) (tableName string, err error) { +func (s *SQLCommon) getTableNameFromCollection(ctx context.Context, collection database.CollectionName) (tableName string, fieldMap map[string]string, err error) { switch collection { case database.CollectionName(database.CollectionMessages): - return "messages", nil + return "messages", msgFilterFieldMap, nil case database.CollectionName(database.CollectionTransactions): - return "transactions", nil + return "transactions", transactionFilterFieldMap, nil case database.CollectionName(database.CollectionOperations): - return "operations", nil + return "operations", opFilterFieldMap, nil case database.CollectionName(database.CollectionEvents): - return "events", nil + return "events", eventFilterFieldMap, nil + case database.CollectionName(database.CollectionTokenTransfers): + return "tokentransfer", tokenTransferFilterFieldMap, nil + case database.CollectionName(database.CollectionBlockchainEvents): + return "blockchainevents", blockchainEventFilterFieldMap, nil default: - return "", i18n.NewError(ctx, i18n.MsgUnsupportedCollection, collection) + return "", nil, i18n.NewError(ctx, i18n.MsgUnsupportedCollection, collection) + } +} + +func (s *SQLCommon) getDistinctTypesFromTable(ctx context.Context, tableName string, fieldMap map[string]string) ([]string, error) { + if _, ok := fieldMap["type"]; !ok { + return []string{}, nil + } + qb := sq.Select(fieldMap["type"]).Distinct().From(tableName) + + rows, _, err := s.query(ctx, qb.From(tableName)) + if err != nil { + return nil, err } + defer rows.Close() + + var dataTypes []string + for rows.Next() { + var dataType string + err := rows.Scan(&dataType) + if err != nil { + return []string{}, i18n.WrapError(ctx, err, i18n.MsgDBReadErr, tableName) + } + dataTypes = append(dataTypes, dataType) + } + rows.Close() + + return dataTypes, nil } -func (s *SQLCommon) histogramResult(ctx context.Context, rows *sql.Rows, cols []*fftypes.ChartHistogram) ([]*fftypes.ChartHistogram, error) { +func (s *SQLCommon) getBucketTotal(typeBuckets []*fftypes.ChartHistogramType) (string, error) { + total := 0 + for _, typeBucket := range typeBuckets { + typeBucketInt, err := strconv.Atoi(typeBucket.Count) + if err != nil { + return "", err + } + total += typeBucketInt + } + return strconv.Itoa(total), nil +} + +func (s *SQLCommon) histogramResultWithTypes(ctx context.Context, rows *sql.Rows, cols []*fftypes.ChartHistogramType, tableName string) ([]*fftypes.ChartHistogramType, error) { results := []interface{}{} for i := range cols { @@ -67,29 +132,40 @@ func (s *SQLCommon) histogramResult(ctx context.Context, rows *sql.Rows, cols [] } err := rows.Scan(results...) if err != nil { - return nil, i18n.NewError(ctx, i18n.MsgDBReadErr, "histogram") + return nil, i18n.NewError(ctx, i18n.MsgDBReadErr, tableName) } return cols, nil } -func (s *SQLCommon) GetChartHistogram(ctx context.Context, ns string, intervals []fftypes.ChartHistogramInterval, collection database.CollectionName) (histogram []*fftypes.ChartHistogram, err error) { - tableName, err := s.getTableNameFromCollection(ctx, collection) +func (s *SQLCommon) histogramResultNoType(ctx context.Context, rows *sql.Rows, cols []*fftypes.ChartHistogram, tableName string) ([]*fftypes.ChartHistogram, error) { + results := []interface{}{} + + for i := range cols { + results = append(results, &cols[i].Count) + } + err := rows.Scan(results...) if err != nil { - return nil, err + return nil, i18n.NewError(ctx, i18n.MsgDBReadErr, tableName) } + return cols, nil +} + +func (s *SQLCommon) getHistogramNoTypes(ctx context.Context, ns string, intervals []fftypes.ChartHistogramInterval, tableName string) (histogramList []*fftypes.ChartHistogram, err error) { qb := sq.Select() - for i, caseQuery := range s.getCaseQueries(ns, intervals) { + for i, caseQuery := range s.getCaseQueriesByInterval(ns, intervals) { query, args, _ := caseQuery.ToSql() - histogram = append(histogram, &fftypes.ChartHistogram{ - Count: "", + histogramList = append(histogramList, &fftypes.ChartHistogram{ + Count: "0", Timestamp: intervals[i].StartTime, + Types: make([]*fftypes.ChartHistogramType, 0), }) qb = qb.Column(sq.Alias(sq.Expr("SUM("+query+")", args...), fmt.Sprintf("case_%d", i))) + } rows, _, err := s.query(ctx, qb.From(tableName)) @@ -99,8 +175,86 @@ func (s *SQLCommon) GetChartHistogram(ctx context.Context, ns string, intervals defer rows.Close() if !rows.Next() { - return []*fftypes.ChartHistogram{}, nil + return histogramList, nil + } + + return s.histogramResultNoType(ctx, rows, histogramList, tableName) +} + +func (s *SQLCommon) getHistogramWithTypes(ctx context.Context, ns string, intervals []fftypes.ChartHistogramInterval, dataTypes []string, fieldMap map[string]string, tableName string) (histogramList []*fftypes.ChartHistogram, err error) { + for _, interval := range intervals { + qb := sq.Select() + histogramTypes := make([]*fftypes.ChartHistogramType, 0) + + for i, caseQuery := range s.getCaseQueriesByType(ns, dataTypes, interval, fieldMap["type"]) { + query, args, _ := caseQuery.ToSql() + histogramTypes = append(histogramTypes, &fftypes.ChartHistogramType{ + Count: "", + Type: dataTypes[i], + }) + + qb = qb.Column(sq.Alias(sq.Expr("SUM("+query+")", args...), fmt.Sprintf("case_%d", i))) + } + + rows, _, err := s.query(ctx, qb.From(tableName)) + if err != nil { + return nil, err + } + defer rows.Close() + + if rows.Next() { + hist, err := s.histogramResultWithTypes(ctx, rows, histogramTypes, tableName) + rows.Close() + if err != nil { + return nil, err + } + + total, err := s.getBucketTotal(hist) + if err != nil { + return nil, err + } + + histogramList = append(histogramList, &fftypes.ChartHistogram{ + Count: total, + Timestamp: interval.StartTime, + Types: hist, + }) + } else { + histogramList = append(histogramList, &fftypes.ChartHistogram{ + Count: "0", + Timestamp: interval.StartTime, + Types: make([]*fftypes.ChartHistogramType, 0), + }) + } + } + + return histogramList, nil +} + +func (s *SQLCommon) GetChartHistogram(ctx context.Context, ns string, intervals []fftypes.ChartHistogramInterval, collection database.CollectionName) (histogramList []*fftypes.ChartHistogram, err error) { + tableName, fieldMap, err := s.getTableNameFromCollection(ctx, collection) + if err != nil { + return nil, err + } + + dataTypes, err := s.getDistinctTypesFromTable(ctx, tableName, fieldMap) + if err != nil { + return nil, err + } + + if len(dataTypes) > 0 { + histogramList, err = s.getHistogramWithTypes(ctx, ns, intervals, dataTypes, fieldMap, tableName) + if err != nil { + return nil, err + } + + return histogramList, nil + } + + histogramList, err = s.getHistogramNoTypes(ctx, ns, intervals, tableName) + if err != nil { + return nil, err } - return s.histogramResult(ctx, rows, histogram) + return histogramList, nil } diff --git a/internal/database/sqlcommon/chart_sql_test.go b/internal/database/sqlcommon/chart_sql_test.go index e598d91a5f..38639b3c9a 100644 --- a/internal/database/sqlcommon/chart_sql_test.go +++ b/internal/database/sqlcommon/chart_sql_test.go @@ -28,24 +28,52 @@ import ( ) var ( - emptyHistogramResult = make([]*fftypes.ChartHistogram, 0) + emptyHistogramResult = []*fftypes.ChartHistogram{ + { + Count: "0", + Timestamp: fftypes.UnixTime(1000000000), + Types: make([]*fftypes.ChartHistogramType, 0), + }, + } expectedHistogramResult = []*fftypes.ChartHistogram{ { - Count: "123", + Count: "10", + Timestamp: fftypes.UnixTime(1000000000), + Types: []*fftypes.ChartHistogramType{ + { + Count: "5", + Type: "typeA", + }, + { + Count: "5", + Type: "typeB", + }, + }, + }, + } + expectedHistogramResultNoTypes = []*fftypes.ChartHistogram{ + { + Count: "10", Timestamp: fftypes.UnixTime(1000000000), + Types: make([]*fftypes.ChartHistogramType, 0), }, } + mockHistogramInterval = []fftypes.ChartHistogramInterval{ { StartTime: fftypes.UnixTime(1000000000), EndTime: fftypes.UnixTime(1000000001), }, } - validCollections = []string{ + validCollectionsWithTypes = []string{ "events", "messages", "operations", "transactions", + "tokentransfers", + } + validCollectionsNoTypes = []string{ + "blockchainevents", } ) @@ -56,12 +84,13 @@ func TestGetChartHistogramInvalidCollectionName(t *testing.T) { assert.Regexp(t, "FF10301", err) } -func TestGetChartHistogramValidCollectionName(t *testing.T) { - for i := range validCollections { +func TestGetChartHistogramValidCollectionNameWithTypes(t *testing.T) { + for i := range validCollectionsWithTypes { s, mock := newMockProvider().init() - mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0"}).AddRow("123")) + mock.ExpectQuery("SELECT DISTINCT .*").WillReturnRows(sqlmock.NewRows([]string{"type"}).AddRow("typeA").AddRow("typeB")) + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0", "case_1"}).AddRow("5", "5")) - histogram, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName(validCollections[i])) + histogram, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName(validCollectionsWithTypes[i])) assert.NoError(t, err) assert.Equal(t, histogram, expectedHistogramResult) @@ -69,27 +98,88 @@ func TestGetChartHistogramValidCollectionName(t *testing.T) { } } +func TestGetChartHistogramValidCollectionNameNoTypes(t *testing.T) { + for i := range validCollectionsNoTypes { + s, mock := newMockProvider().init() + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0"}).AddRow("10")) + + histogram, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName(validCollectionsNoTypes[i])) + assert.NoError(t, err) + assert.Equal(t, expectedHistogramResultNoTypes, histogram) + assert.NoError(t, mock.ExpectationsWereMet()) + } +} + func TestGetChartHistogramsQueryFail(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectQuery("SELECT DISTINCT .*").WillReturnRows(sqlmock.NewRows([]string{"type"}).AddRow("typeA").AddRow("typeB")) + mock.ExpectQuery("SELECT *").WillReturnError(fmt.Errorf("pop")) + + _, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("messages")) + assert.Regexp(t, "FF10115", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestGetChartHistogramsQueryFailNoTypes(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectQuery("SELECT *").WillReturnError(fmt.Errorf("pop")) + _, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("blockchainevents")) + assert.Regexp(t, "FF10115", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestGetChartHistogramQueryFailBadDistinctTypes(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectQuery("SELECT DISTINCT .*").WillReturnError(fmt.Errorf("pop")) + _, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("messages")) assert.Regexp(t, "FF10115", err) assert.NoError(t, mock.ExpectationsWereMet()) } +func TestGetChartHistogramScanFailInvalidRowType(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectQuery("SELECT DISTINCT .*").WillReturnRows(sqlmock.NewRows([]string{"type"}).AddRow(nil).AddRow("typeB")) + + _, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("messages")) + assert.Regexp(t, "FF10121", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + func TestGetChartHistogramScanFailTooManyCols(t *testing.T) { s, mock := newMockProvider().init() - mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0", "unexpected_column"}).AddRow("one", "two")) + mock.ExpectQuery("SELECT DISTINCT .*").WillReturnRows(sqlmock.NewRows([]string{"type"}).AddRow("typeA").AddRow("typeB")) + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0", "case_1", "unexpected_col"}).AddRow("one", "two", "three")) _, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("messages")) assert.Regexp(t, "FF10121", err) assert.NoError(t, mock.ExpectationsWereMet()) } +func TestGetChartHistogramScanFailTooManyColsNoTypes(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0", "unexpected"}).AddRow("10", "abc")) + + _, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("blockchainevents")) + assert.Regexp(t, "FF10121", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestGetChartHistogramFailStringToIntConversion(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectQuery("SELECT DISTINCT .*").WillReturnRows(sqlmock.NewRows([]string{"type"}).AddRow("typeA").AddRow("typeB")) + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0", "case_1"}).AddRow("5", "NotInt")) + + _, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("messages")) + assert.Error(t, err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + func TestGetChartHistogramSuccessNoRows(t *testing.T) { s, mock := newMockProvider().init() - mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0"})) + mock.ExpectQuery("SELECT DISTINCT .*").WillReturnRows(sqlmock.NewRows([]string{"type"}).AddRow("typeA").AddRow("typeB")) + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0", "case_1"})) histogram, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("messages")) assert.NoError(t, err) @@ -97,9 +187,31 @@ func TestGetChartHistogramSuccessNoRows(t *testing.T) { assert.NoError(t, mock.ExpectationsWereMet()) } +func TestGetChartHistogramSuccessNoRowsNoTypes(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0", "case_1"})) + + histogram, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("blockchainevents")) + assert.NoError(t, err) + + assert.Equal(t, emptyHistogramResult, histogram) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestGetChartHistogramSuccessNoTypes(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0"}).AddRow("10")) + + histogram, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("blockchainevents")) + assert.NoError(t, err) + assert.Equal(t, expectedHistogramResultNoTypes, histogram) + assert.NoError(t, mock.ExpectationsWereMet()) +} + func TestGetChartHistogramSuccess(t *testing.T) { s, mock := newMockProvider().init() - mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0"}).AddRow("123")) + mock.ExpectQuery("SELECT DISTINCT .*").WillReturnRows(sqlmock.NewRows([]string{"type"}).AddRow("typeA").AddRow("typeB")) + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"case_0", "case_1"}).AddRow("5", "5")) histogram, err := s.GetChartHistogram(context.Background(), "ns1", mockHistogramInterval, database.CollectionName("messages")) diff --git a/internal/database/sqlcommon/config.go b/internal/database/sqlcommon/config.go index c27a46244b..700ba03337 100644 --- a/internal/database/sqlcommon/config.go +++ b/internal/database/sqlcommon/config.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -31,6 +31,12 @@ const ( SQLConfDatasourceURL = "url" // SQLConfMaxConnections maximum connections to the database SQLConfMaxConnections = "maxConns" + // SQLConfMaxConnIdleTime maximum connections to the database + SQLConfMaxConnIdleTime = "maxConnIdleTime" + // SQLConfMaxIdleConns maximum connections to the database + SQLConfMaxIdleConns = "maxIdleConns" + // SQLConfMaxConnLifetime maximum connections to the database + SQLConfMaxConnLifetime = "maxConnLifetime" ) const ( @@ -41,5 +47,8 @@ func (s *SQLCommon) InitPrefix(provider Provider, prefix config.Prefix) { prefix.AddKnownKey(SQLConfMigrationsAuto, false) prefix.AddKnownKey(SQLConfDatasourceURL) prefix.AddKnownKey(SQLConfMigrationsDirectory, fmt.Sprintf(defaultMigrationsDirectoryTemplate, provider.MigrationsDir())) - prefix.AddKnownKey(SQLConfMaxConnections) // some providers may set a default + prefix.AddKnownKey(SQLConfMaxConnections) // some providers set a default + prefix.AddKnownKey(SQLConfMaxConnIdleTime, "1m") + prefix.AddKnownKey(SQLConfMaxIdleConns) // defaults to the max connections + prefix.AddKnownKey(SQLConfMaxConnLifetime) } diff --git a/internal/database/sqlcommon/contractlisteners_sql.go b/internal/database/sqlcommon/contractlisteners_sql.go index 88b7a1f7ee..40bc980719 100644 --- a/internal/database/sqlcommon/contractlisteners_sql.go +++ b/internal/database/sqlcommon/contractlisteners_sql.go @@ -37,6 +37,8 @@ var ( "name", "protocol_id", "location", + "topic", + "options", "created", } contractListenerFilterFieldMap = map[string]string{ @@ -77,6 +79,8 @@ func (s *SQLCommon) UpsertContractListener(ctx context.Context, sub *fftypes.Con Set("namespace", sub.Namespace). Set("name", sub.Name). Set("location", sub.Location). + Set("topic", sub.Topic). + Set("options", sub.Options). Where(sq.Eq{"protocol_id": sub.ProtocolID}), func() { s.callbacks.UUIDCollectionNSEvent(database.CollectionContractListeners, fftypes.ChangeEventTypeUpdated, sub.Namespace, sub.ID) @@ -97,6 +101,8 @@ func (s *SQLCommon) UpsertContractListener(ctx context.Context, sub *fftypes.Con sub.Name, sub.ProtocolID, sub.Location, + sub.Topic, + sub.Options, sub.Created, ), func() { @@ -122,6 +128,8 @@ func (s *SQLCommon) contractListenerResult(ctx context.Context, row *sql.Rows) ( &sub.Name, &sub.ProtocolID, &sub.Location, + &sub.Topic, + &sub.Options, &sub.Created, ) if err != nil { diff --git a/internal/database/sqlcommon/contractlisteners_sql_test.go b/internal/database/sqlcommon/contractlisteners_sql_test.go index 382d739afb..94a34dc95f 100644 --- a/internal/database/sqlcommon/contractlisteners_sql_test.go +++ b/internal/database/sqlcommon/contractlisteners_sql_test.go @@ -50,6 +50,10 @@ func TestContractListenerE2EWithDB(t *testing.T) { Name: "sub1", ProtocolID: "sb-123", Location: fftypes.JSONAnyPtrBytes(locationJson), + Topic: "topic1", + Options: &fftypes.ContractListenerOptions{ + FirstEvent: "0", + }, } s.callbacks.On("UUIDCollectionNSEvent", database.CollectionContractListeners, fftypes.ChangeEventTypeCreated, "ns", sub.ID).Return() @@ -227,9 +231,35 @@ func TestContractListenerDeleteFail(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows(contractListenerColumns).AddRow( - fftypes.NewUUID(), nil, []byte("{}"), "ns1", "sub1", "123", "{}", fftypes.Now()), + fftypes.NewUUID(), nil, []byte("{}"), "ns1", "sub1", "123", "{}", "topic1", nil, fftypes.Now()), ) mock.ExpectExec("DELETE .*").WillReturnError(fmt.Errorf("pop")) err := s.DeleteContractListenerByID(context.Background(), fftypes.NewUUID()) assert.Regexp(t, "FF10118", err) } + +func TestContractListenerOptions(t *testing.T) { + s, cleanup := newSQLiteTestProvider(t) + defer cleanup() + ctx := context.Background() + + l := &fftypes.ContractListener{ + ID: fftypes.NewUUID(), + Namespace: "ns", + Event: &fftypes.FFISerializedEvent{}, + Location: fftypes.JSONAnyPtr("{}"), + Options: &fftypes.ContractListenerOptions{ + FirstEvent: string(fftypes.SubOptsFirstEventOldest), + }, + } + + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionContractListeners, fftypes.ChangeEventTypeCreated, "ns", l.ID).Return() + + err := s.UpsertContractListener(ctx, l) + assert.NoError(t, err) + + li, err := s.GetContractListenerByID(ctx, l.ID) + assert.NoError(t, err) + + assert.Equal(t, l.Options, li.Options) +} diff --git a/internal/database/sqlcommon/data_sql.go b/internal/database/sqlcommon/data_sql.go index 79c05f7dce..60a53f0229 100644 --- a/internal/database/sqlcommon/data_sql.go +++ b/internal/database/sqlcommon/data_sql.go @@ -54,8 +54,15 @@ var ( } ) -func (s *SQLCommon) attemptDataUpdate(ctx context.Context, tx *txWrapper, data *fftypes.Data, datatype *fftypes.DatatypeRef, blob *fftypes.BlobRef) (int64, error) { - data.ValueSize = data.Value.Length() +func (s *SQLCommon) attemptDataUpdate(ctx context.Context, tx *txWrapper, data *fftypes.Data) (int64, error) { + datatype := data.Datatype + if datatype == nil { + datatype = &fftypes.DatatypeRef{} + } + blob := data.Blob + if blob == nil { + blob = &fftypes.BlobRef{} + } return s.updateTx(ctx, tx, sq.Update("data"). Set("validator", string(data.Validator)). @@ -79,26 +86,35 @@ func (s *SQLCommon) attemptDataUpdate(ctx context.Context, tx *txWrapper, data * }) } -func (s *SQLCommon) attemptDataInsert(ctx context.Context, tx *txWrapper, data *fftypes.Data, datatype *fftypes.DatatypeRef, blob *fftypes.BlobRef, requestConflictEmptyResult bool) (int64, error) { - data.ValueSize = data.Value.Length() +func (s *SQLCommon) setDataInsertValues(query sq.InsertBuilder, data *fftypes.Data) sq.InsertBuilder { + datatype := data.Datatype + if datatype == nil { + datatype = &fftypes.DatatypeRef{} + } + blob := data.Blob + if blob == nil { + blob = &fftypes.BlobRef{} + } + return query.Values( + data.ID, + string(data.Validator), + data.Namespace, + datatype.Name, + datatype.Version, + data.Hash, + data.Created, + blob.Hash, + blob.Public, + blob.Name, + blob.Size, + data.ValueSize, + data.Value, + ) +} + +func (s *SQLCommon) attemptDataInsert(ctx context.Context, tx *txWrapper, data *fftypes.Data, requestConflictEmptyResult bool) (int64, error) { return s.insertTxExt(ctx, tx, - sq.Insert("data"). - Columns(dataColumnsWithValue...). - Values( - data.ID, - string(data.Validator), - data.Namespace, - datatype.Name, - datatype.Version, - data.Hash, - data.Created, - blob.Hash, - blob.Public, - blob.Name, - blob.Size, - data.ValueSize, - data.Value, - ), + s.setDataInsertValues(sq.Insert("data").Columns(dataColumnsWithValue...), data), func() { s.callbacks.UUIDCollectionNSEvent(database.CollectionData, fftypes.ChangeEventTypeCreated, data.Namespace, data.ID) }, requestConflictEmptyResult) @@ -111,15 +127,6 @@ func (s *SQLCommon) UpsertData(ctx context.Context, data *fftypes.Data, optimiza } defer s.rollbackTx(ctx, tx, autoCommit) - datatype := data.Datatype - if datatype == nil { - datatype = &fftypes.DatatypeRef{} - } - blob := data.Blob - if blob == nil { - blob = &fftypes.BlobRef{} - } - // This is a performance critical function, as we stream data into the database for every message, in every batch. // // First attempt the operation based on the optimization passed in. @@ -127,10 +134,10 @@ func (s *SQLCommon) UpsertData(ctx context.Context, data *fftypes.Data, optimiza // as only recovery paths require us to go down the un-optimized route. optimized := false if optimization == database.UpsertOptimizationNew { - _, opErr := s.attemptDataInsert(ctx, tx, data, datatype, blob, true /* we want a failure here we can progress past */) + _, opErr := s.attemptDataInsert(ctx, tx, data, true /* we want a failure here we can progress past */) optimized = opErr == nil } else if optimization == database.UpsertOptimizationExisting { - rowsAffected, opErr := s.attemptDataUpdate(ctx, tx, data, datatype, blob) + rowsAffected, opErr := s.attemptDataUpdate(ctx, tx, data) optimized = opErr == nil && rowsAffected == 1 } @@ -158,11 +165,11 @@ func (s *SQLCommon) UpsertData(ctx context.Context, data *fftypes.Data, optimiza dataRows.Close() if existing { - if _, err = s.attemptDataUpdate(ctx, tx, data, datatype, blob); err != nil { + if _, err = s.attemptDataUpdate(ctx, tx, data); err != nil { return err } } else { - if _, err = s.attemptDataInsert(ctx, tx, data, datatype, blob, false); err != nil { + if _, err = s.attemptDataInsert(ctx, tx, data, false); err != nil { return err } } @@ -171,6 +178,42 @@ func (s *SQLCommon) UpsertData(ctx context.Context, data *fftypes.Data, optimiza return s.commitTx(ctx, tx, autoCommit) } +func (s *SQLCommon) InsertDataArray(ctx context.Context, dataArray fftypes.DataArray) (err error) { + + ctx, tx, autoCommit, err := s.beginOrUseTx(ctx) + if err != nil { + return err + } + defer s.rollbackTx(ctx, tx, autoCommit) + + if s.features.MultiRowInsert { + query := sq.Insert("data").Columns(dataColumnsWithValue...) + for _, data := range dataArray { + query = s.setDataInsertValues(query, data) + } + sequences := make([]int64, len(dataArray)) + err := s.insertTxRows(ctx, tx, query, func() { + for _, data := range dataArray { + s.callbacks.UUIDCollectionNSEvent(database.CollectionData, fftypes.ChangeEventTypeCreated, data.Namespace, data.ID) + } + }, sequences, true /* we want the caller to be able to retry with individual upserts */) + if err != nil { + return err + } + } else { + // Fall back to individual inserts grouped in a TX + for _, data := range dataArray { + _, err := s.attemptDataInsert(ctx, tx, data, false) + if err != nil { + return err + } + } + } + + return s.commitTx(ctx, tx, autoCommit) + +} + func (s *SQLCommon) dataResult(ctx context.Context, row *sql.Rows, withValue bool) (*fftypes.Data, error) { data := fftypes.Data{ Datatype: &fftypes.DatatypeRef{}, @@ -237,7 +280,7 @@ func (s *SQLCommon) GetDataByID(ctx context.Context, id *fftypes.UUID, withValue return data, nil } -func (s *SQLCommon) GetData(ctx context.Context, filter database.Filter) (message []*fftypes.Data, res *database.FilterResult, err error) { +func (s *SQLCommon) GetData(ctx context.Context, filter database.Filter) (message fftypes.DataArray, res *database.FilterResult, err error) { query, fop, fi, err := s.filterSelect(ctx, "", sq.Select(dataColumnsWithValue...).From("data"), filter, dataFilterFieldMap, []interface{}{"sequence"}) if err != nil { @@ -250,7 +293,7 @@ func (s *SQLCommon) GetData(ctx context.Context, filter database.Filter) (messag } defer rows.Close() - data := []*fftypes.Data{} + data := fftypes.DataArray{} for rows.Next() { d, err := s.dataResult(ctx, rows, true) if err != nil { diff --git a/internal/database/sqlcommon/data_sql_test.go b/internal/database/sqlcommon/data_sql_test.go index 82c1e45b16..284cbc1fec 100644 --- a/internal/database/sqlcommon/data_sql_test.go +++ b/internal/database/sqlcommon/data_sql_test.go @@ -52,6 +52,7 @@ func TestDataE2EWithDB(t *testing.T) { Hash: fftypes.NewRandB32(), Created: fftypes.Now(), Value: fftypes.JSONAnyPtr(val.String()), + ValueSize: 12345, } s.callbacks.On("UUIDCollectionNSEvent", database.CollectionData, fftypes.ChangeEventTypeCreated, "ns1", dataID, mock.Anything).Return() @@ -72,7 +73,7 @@ func TestDataE2EWithDB(t *testing.T) { dataJson, _ := json.Marshal(&data) dataReadJson, _ := json.Marshal(&dataRead) assert.Equal(t, string(dataJson), string(dataReadJson)) - assert.Equal(t, int64(data.Value.Length()), dataRead.ValueSize) + assert.Equal(t, int64(12345), dataRead.ValueSize) // Update the data (this is testing what's possible at the database layer, // and does not account for the verification that happens at the higher level) @@ -118,7 +119,6 @@ func TestDataE2EWithDB(t *testing.T) { dataJson, _ = json.Marshal(&dataUpdated) dataReadJson, _ = json.Marshal(&dataRead) assert.Equal(t, string(dataJson), string(dataReadJson)) - assert.Equal(t, int64(dataUpdated.Value.Length()), dataRead.ValueSize) valRestored, ok := dataRead.Value.JSONObjectOk() assert.True(t, ok) @@ -222,6 +222,61 @@ func TestUpsertDataFailCommit(t *testing.T) { assert.NoError(t, mock.ExpectationsWereMet()) } +func TestInsertDataArrayBeginFail(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.InsertDataArray(context.Background(), fftypes.DataArray{}) + assert.Regexp(t, "FF10114", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertDataArrayMultiRowOK(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + + data1 := &fftypes.Data{ID: fftypes.NewUUID(), Namespace: "ns1"} + data2 := &fftypes.Data{ID: fftypes.NewUUID(), Namespace: "ns1"} + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionData, fftypes.ChangeEventTypeCreated, "ns1", data1.ID) + s.callbacks.On("UUIDCollectionNSEvent", database.CollectionData, fftypes.ChangeEventTypeCreated, "ns1", data2.ID) + + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*").WillReturnRows(sqlmock.NewRows([]string{sequenceColumn}). + AddRow(int64(1001)). + AddRow(int64(1002)), + ) + mock.ExpectCommit() + err := s.InsertDataArray(context.Background(), fftypes.DataArray{data1, data2}) + assert.NoError(t, err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertDataArrayMultiRowFail(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + data1 := &fftypes.Data{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertDataArray(context.Background(), fftypes.DataArray{data1}) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertDataArraySingleRowFail(t *testing.T) { + s, mock := newMockProvider().init() + data1 := &fftypes.Data{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin() + mock.ExpectExec("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertDataArray(context.Background(), fftypes.DataArray{data1}) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + func TestGetDataByIDSelectFail(t *testing.T) { s, mock := newMockProvider().init() dataID := fftypes.NewUUID() diff --git a/internal/database/sqlcommon/event_sql.go b/internal/database/sqlcommon/event_sql.go index 95b8257c80..33850dcfce 100644 --- a/internal/database/sqlcommon/event_sql.go +++ b/internal/database/sqlcommon/event_sql.go @@ -35,6 +35,7 @@ var ( "ref", "cid", "tx_id", + "topic", "created", } eventFilterFieldMap = map[string]string{ @@ -68,7 +69,25 @@ func (s *SQLCommon) InsertEvent(ctx context.Context, event *fftypes.Event) (err return s.commitTx(ctx, tx, autoCommit) } -func (s *SQLCommon) insertEventPreCommit(ctx context.Context, tx *txWrapper, event *fftypes.Event) (err error) { +func (s *SQLCommon) setEventInsertValues(query sq.InsertBuilder, event *fftypes.Event) sq.InsertBuilder { + return query.Values( + event.ID, + string(event.Type), + event.Namespace, + event.Reference, + event.Correlator, + event.Transaction, + event.Topic, + event.Created, + ) +} + +func (s *SQLCommon) eventInserted(ctx context.Context, event *fftypes.Event) { + s.callbacks.OrderedUUIDCollectionNSEvent(database.CollectionEvents, fftypes.ChangeEventTypeCreated, event.Namespace, event.ID, event.Sequence) + log.L(ctx).Infof("Emitted %s event %s for %s:%s (correlator=%v,topic=%s)", event.Type, event.ID, event.Namespace, event.Reference, event.Correlator, event.Topic) +} + +func (s *SQLCommon) insertEventsPreCommit(ctx context.Context, tx *txWrapper, events []*fftypes.Event) (err error) { // We take the cost of a full table lock on the events table. // This allows us to rely on the sequence to always be increasing, even when writing events @@ -77,23 +96,35 @@ func (s *SQLCommon) insertEventPreCommit(ctx context.Context, tx *txWrapper, eve return err } - event.Sequence, err = s.insertTx(ctx, tx, - sq.Insert("events"). - Columns(eventColumns...). - Values( - event.ID, - string(event.Type), - event.Namespace, - event.Reference, - event.Correlator, - event.Transaction, - event.Created, - ), - func() { - s.callbacks.OrderedUUIDCollectionNSEvent(database.CollectionEvents, fftypes.ChangeEventTypeCreated, event.Namespace, event.ID, event.Sequence) - }, - ) - return err + if s.features.MultiRowInsert { + query := sq.Insert("events").Columns(eventColumns...) + for _, event := range events { + query = s.setEventInsertValues(query, event) + } + sequences := make([]int64, len(events)) + err := s.insertTxRows(ctx, tx, query, func() { + for i, event := range events { + event.Sequence = sequences[i] + s.eventInserted(ctx, event) + } + }, sequences, true /* we want the caller to be able to retry with individual upserts */) + if err != nil { + return err + } + } else { + // Fall back to individual inserts grouped in a TX + for _, event := range events { + query := s.setEventInsertValues(sq.Insert("events").Columns(eventColumns...), event) + event.Sequence, err = s.insertTx(ctx, tx, query, func() { + s.eventInserted(ctx, event) + }) + if err != nil { + return err + } + } + } + + return nil } func (s *SQLCommon) eventResult(ctx context.Context, row *sql.Rows) (*fftypes.Event, error) { @@ -105,6 +136,7 @@ func (s *SQLCommon) eventResult(ctx context.Context, row *sql.Rows) (*fftypes.Ev &event.Reference, &event.Correlator, &event.Transaction, + &event.Topic, &event.Created, // Must be added to the list of columns in all selects &event.Sequence, diff --git a/internal/database/sqlcommon/event_sql_test.go b/internal/database/sqlcommon/event_sql_test.go index 3fce7b8e1d..d4777628a0 100644 --- a/internal/database/sqlcommon/event_sql_test.go +++ b/internal/database/sqlcommon/event_sql_test.go @@ -44,6 +44,7 @@ func TestEventE2EWithDB(t *testing.T) { Type: fftypes.EventTypeMessageConfirmed, Reference: fftypes.NewUUID(), Correlator: fftypes.NewUUID(), + Topic: "topic1", Created: fftypes.Now(), } @@ -146,6 +147,64 @@ func TestInsertEventFailCommit(t *testing.T) { assert.NoError(t, mock.ExpectationsWereMet()) } +func TestInsertEventsPreCommitMultiRowOK(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + + ev1 := &fftypes.Event{ID: fftypes.NewUUID(), Namespace: "ns1"} + ev2 := &fftypes.Event{ID: fftypes.NewUUID(), Namespace: "ns1"} + s.callbacks.On("OrderedUUIDCollectionNSEvent", database.CollectionEvents, fftypes.ChangeEventTypeCreated, "ns1", ev1.ID, int64(1001)) + s.callbacks.On("OrderedUUIDCollectionNSEvent", database.CollectionEvents, fftypes.ChangeEventTypeCreated, "ns1", ev2.ID, int64(1002)) + + mock.ExpectBegin() + mock.ExpectExec("LOCK .*").WillReturnResult(driver.ResultNoRows) + mock.ExpectQuery("INSERT.*").WillReturnRows(sqlmock.NewRows([]string{sequenceColumn}). + AddRow(int64(1001)). + AddRow(int64(1002)), + ) + mock.ExpectCommit() + ctx, tx, autoCommit, err := s.beginOrUseTx(context.Background()) + tx.preCommitEvents = []*fftypes.Event{ev1, ev2} + assert.NoError(t, err) + err = s.commitTx(ctx, tx, autoCommit) + assert.NoError(t, err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertEventsPreCommitMultiRowFail(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + ev1 := &fftypes.Event{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin() + mock.ExpectExec("LOCK .*").WillReturnResult(driver.ResultNoRows) + mock.ExpectQuery("INSERT.*").WillReturnError(fmt.Errorf("pop")) + ctx, tx, autoCommit, err := s.beginOrUseTx(context.Background()) + tx.preCommitEvents = []*fftypes.Event{ev1} + assert.NoError(t, err) + err = s.commitTx(ctx, tx, autoCommit) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertEventsPreCommitSingleRowFail(t *testing.T) { + s, mock := newMockProvider().init() + ev1 := &fftypes.Event{ID: fftypes.NewUUID(), Namespace: "ns1"} + mock.ExpectBegin() + mock.ExpectExec("LOCK .*").WillReturnResult(driver.ResultNoRows) + mock.ExpectExec("INSERT.*").WillReturnError(fmt.Errorf("pop")) + ctx, tx, autoCommit, err := s.beginOrUseTx(context.Background()) + tx.preCommitEvents = []*fftypes.Event{ev1} + assert.NoError(t, err) + err = s.commitTx(ctx, tx, autoCommit) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + func TestGetEventByIDSelectFail(t *testing.T) { s, mock := newMockProvider().init() eventID := fftypes.NewUUID() diff --git a/internal/database/sqlcommon/message_sql.go b/internal/database/sqlcommon/message_sql.go index 303aa22ef6..b27fbd7085 100644 --- a/internal/database/sqlcommon/message_sql.go +++ b/internal/database/sqlcommon/message_sql.go @@ -84,29 +84,31 @@ func (s *SQLCommon) attemptMessageUpdate(ctx context.Context, tx *txWrapper, mes }) } +func (s *SQLCommon) setMessageInsertValues(query sq.InsertBuilder, message *fftypes.Message) sq.InsertBuilder { + return query.Values( + message.Header.ID, + message.Header.CID, + string(message.Header.Type), + message.Header.Author, + message.Header.Key, + message.Header.Created, + message.Header.Namespace, + message.Header.Topics, + message.Header.Tag, + message.Header.Group, + message.Header.DataHash, + message.Hash, + message.Pins, + message.State, + message.Confirmed, + message.Header.TxType, + message.BatchID, + ) +} + func (s *SQLCommon) attemptMessageInsert(ctx context.Context, tx *txWrapper, message *fftypes.Message, requestConflictEmptyResult bool) (err error) { message.Sequence, err = s.insertTxExt(ctx, tx, - sq.Insert("messages"). - Columns(msgColumns...). - Values( - message.Header.ID, - message.Header.CID, - string(message.Header.Type), - message.Header.Author, - message.Header.Key, - message.Header.Created, - message.Header.Namespace, - message.Header.Topics, - message.Header.Tag, - message.Header.Group, - message.Header.DataHash, - message.Hash, - message.Pins, - message.State, - message.Confirmed, - message.Header.TxType, - message.BatchID, - ), + s.setMessageInsertValues(sq.Insert("messages").Columns(msgColumns...), message), func() { s.callbacks.OrderedUUIDCollectionNSEvent(database.CollectionMessages, fftypes.ChangeEventTypeCreated, message.Header.Namespace, message.Header.ID, message.Sequence) }, requestConflictEmptyResult) @@ -183,6 +185,69 @@ func (s *SQLCommon) UpsertMessage(ctx context.Context, message *fftypes.Message, return s.commitTx(ctx, tx, autoCommit) } +func (s *SQLCommon) InsertMessages(ctx context.Context, messages []*fftypes.Message) (err error) { + + ctx, tx, autoCommit, err := s.beginOrUseTx(ctx) + if err != nil { + return err + } + defer s.rollbackTx(ctx, tx, autoCommit) + + if s.features.MultiRowInsert { + msgQuery := sq.Insert("messages").Columns(msgColumns...) + dataRefQuery := sq.Insert("messages_data").Columns( + "message_id", + "data_id", + "data_hash", + "data_idx", + ) + dataRefCount := 0 + for _, message := range messages { + msgQuery = s.setMessageInsertValues(msgQuery, message) + for idx, dataRef := range message.Data { + dataRefQuery = dataRefQuery.Values(message.Header.ID, dataRef.ID, dataRef.Hash, idx) + dataRefCount++ + } + } + sequences := make([]int64, len(messages)) + + // Use a single multi-row insert for the messages + err := s.insertTxRows(ctx, tx, msgQuery, func() { + for i, message := range messages { + message.Sequence = sequences[i] + s.callbacks.OrderedUUIDCollectionNSEvent(database.CollectionMessages, fftypes.ChangeEventTypeCreated, message.Header.Namespace, message.Header.ID, message.Sequence) + } + }, sequences, true /* we want the caller to be able to retry with individual upserts */) + if err != nil { + return err + } + + // Use a single multi-row insert for the data refs + if dataRefCount > 0 { + dataRefSeqs := make([]int64, dataRefCount) + err = s.insertTxRows(ctx, tx, dataRefQuery, nil, dataRefSeqs, false) + if err != nil { + return err + } + } + } else { + // Fall back to individual inserts grouped in a TX + for _, message := range messages { + err := s.attemptMessageInsert(ctx, tx, message, false) + if err != nil { + return err + } + err = s.updateMessageDataRefs(ctx, tx, message, false) + if err != nil { + return err + } + } + } + + return s.commitTx(ctx, tx, autoCommit) + +} + // In SQL update+bump is a delete+insert within a TX func (s *SQLCommon) ReplaceMessage(ctx context.Context, message *fftypes.Message) (err error) { ctx, tx, autoCommit, err := s.beginOrUseTx(ctx) @@ -405,13 +470,41 @@ func (s *SQLCommon) getMessagesQuery(ctx context.Context, query sq.SelectBuilder return msgs, s.queryRes(ctx, tx, "messages", fop, fi), err } +func (s *SQLCommon) GetMessageIDs(ctx context.Context, filter database.Filter) (ids []*fftypes.IDAndSequence, err error) { + query, _, _, err := s.filterSelect(ctx, "", sq.Select("id", sequenceColumn).From("messages"), filter, msgFilterFieldMap, + []interface{}{ + &database.SortField{Field: "confirmed", Descending: true, Nulls: database.NullsFirst}, + "created", + }) + if err != nil { + return nil, err + } + + rows, _, err := s.query(ctx, query) + if err != nil { + return nil, err + } + defer rows.Close() + + ids = []*fftypes.IDAndSequence{} + for rows.Next() { + var id fftypes.IDAndSequence + err = rows.Scan(&id.ID, &id.Sequence) + if err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgDBReadErr, "messages") + } + ids = append(ids, &id) + } + return ids, nil +} + func (s *SQLCommon) GetMessages(ctx context.Context, filter database.Filter) (message []*fftypes.Message, fr *database.FilterResult, err error) { cols := append([]string{}, msgColumns...) cols = append(cols, sequenceColumn) query, fop, fi, err := s.filterSelect(ctx, "", sq.Select(cols...).From("messages"), filter, msgFilterFieldMap, []interface{}{ &database.SortField{Field: "confirmed", Descending: true, Nulls: database.NullsFirst}, - "created", + &database.SortField{Field: "created", Descending: true}, }) if err != nil { return nil, nil, err diff --git a/internal/database/sqlcommon/message_sql_test.go b/internal/database/sqlcommon/message_sql_test.go index 1bb8bf5dbd..6b4d223e09 100644 --- a/internal/database/sqlcommon/message_sql_test.go +++ b/internal/database/sqlcommon/message_sql_test.go @@ -159,6 +159,12 @@ func TestUpsertE2EWithDB(t *testing.T) { msgReadJson, _ = json.Marshal(msgs[0]) assert.Equal(t, string(msgJson), string(msgReadJson)) + msgIDs, err := s.GetMessageIDs(ctx, filter) + assert.NoError(t, err) + assert.Equal(t, 1, len(msgIDs)) + assert.Equal(t, msg.Header.ID, &msgIDs[0].ID) + assert.Equal(t, msg.Sequence, msgIDs[0].Sequence) + // Check we can get it with a filter on only mesasges with a particular data ref msgs, _, err = s.GetMessagesForData(ctx, dataID2, filter.Count(true)) assert.Regexp(t, "FF10267", err) // The left join means it will take non-trivial extra work to support this. So not supported for now @@ -276,6 +282,93 @@ func TestUpsertMessageFailCommit(t *testing.T) { assert.NoError(t, mock.ExpectationsWereMet()) } +func TestInsertMessagesBeginFail(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.InsertMessages(context.Background(), []*fftypes.Message{}) + assert.Regexp(t, "FF10114", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertMessagesMultiRowOK(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + + msg1 := &fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), Namespace: "ns1"}, Data: fftypes.DataRefs{{ID: fftypes.NewUUID()}}} + msg2 := &fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), Namespace: "ns1"}, Data: fftypes.DataRefs{{ID: fftypes.NewUUID()}}} + s.callbacks.On("OrderedUUIDCollectionNSEvent", database.CollectionMessages, fftypes.ChangeEventTypeCreated, "ns1", msg1.Header.ID, int64(1001)) + s.callbacks.On("OrderedUUIDCollectionNSEvent", database.CollectionMessages, fftypes.ChangeEventTypeCreated, "ns1", msg2.Header.ID, int64(1002)) + + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*messages").WillReturnRows(sqlmock.NewRows([]string{sequenceColumn}). + AddRow(int64(1001)). + AddRow(int64(1002)), + ) + mock.ExpectQuery("INSERT.*messages_data").WillReturnRows(sqlmock.NewRows([]string{sequenceColumn}). + AddRow(int64(1003)). + AddRow(int64(1004)), + ) + mock.ExpectCommit() + err := s.InsertMessages(context.Background(), []*fftypes.Message{msg1, msg2}) + assert.NoError(t, err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertMessagesMultiRowDataRefsFail(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + + msg1 := &fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), Namespace: "ns1"}, Data: fftypes.DataRefs{{ID: fftypes.NewUUID()}}} + + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*messages").WillReturnRows(sqlmock.NewRows([]string{sequenceColumn}).AddRow(int64(1001))) + mock.ExpectQuery("INSERT.*messages_data").WillReturnError(fmt.Errorf("pop")) + err := s.InsertMessages(context.Background(), []*fftypes.Message{msg1}) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertMessagesMultiRowFail(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + msg1 := &fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), Namespace: "ns1"}} + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertMessages(context.Background(), []*fftypes.Message{msg1}) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertMessagesSingleRowFail(t *testing.T) { + s, mock := newMockProvider().init() + msg1 := &fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), Namespace: "ns1"}} + mock.ExpectBegin() + mock.ExpectExec("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertMessages(context.Background(), []*fftypes.Message{msg1}) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertMessagesSingleRowFailDataRefs(t *testing.T) { + s, mock := newMockProvider().init() + msg1 := &fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), Namespace: "ns1"}, Data: fftypes.DataRefs{{ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}}} + mock.ExpectBegin() + mock.ExpectExec("INSERT.*messages").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectExec("INSERT.*messages_data").WillReturnError(fmt.Errorf("pop")) + err := s.InsertMessages(context.Background(), []*fftypes.Message{msg1}) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + func TestReplaceMessageFailBegin(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) @@ -498,6 +591,32 @@ func TestMessageUpdateBeginFail(t *testing.T) { assert.Regexp(t, "FF10114", err) } +func TestGetMessageIDsQueryFail(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) + f := database.MessageQueryFactory.NewFilter(context.Background()).Eq("id", "") + _, err := s.GetMessageIDs(context.Background(), f) + assert.Regexp(t, "FF10115", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestGetMessageIDsReadMessageFail(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow("bad id")) + f := database.MessageQueryFactory.NewFilter(context.Background()).Eq("id", "") + _, err := s.GetMessageIDs(context.Background(), f) + assert.Regexp(t, "FF10121", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + +func TestGetMessageIDsBadQuery(t *testing.T) { + s, mock := newMockProvider().init() + f := database.MessageQueryFactory.NewFilter(context.Background()).Eq("!wrong", "") + _, err := s.GetMessageIDs(context.Background(), f) + assert.Regexp(t, "FF10148", err) + assert.NoError(t, mock.ExpectationsWereMet()) +} + func TestMessageUpdateBuildQueryFail(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() diff --git a/internal/database/sqlcommon/operation_sql.go b/internal/database/sqlcommon/operation_sql.go index cd148792d1..051e4c8ae1 100644 --- a/internal/database/sqlcommon/operation_sql.go +++ b/internal/database/sqlcommon/operation_sql.go @@ -40,15 +40,17 @@ var ( "error", "input", "output", + "retry_id", } opFilterFieldMap = map[string]string{ "tx": "tx_id", "type": "optype", "status": "opstatus", + "retry": "retry_id", } ) -func (s *SQLCommon) InsertOperation(ctx context.Context, operation *fftypes.Operation) (err error) { +func (s *SQLCommon) InsertOperation(ctx context.Context, operation *fftypes.Operation, hooks ...database.PostCompletionHook) (err error) { ctx, tx, autoCommit, err := s.beginOrUseTx(ctx) if err != nil { return err @@ -70,9 +72,13 @@ func (s *SQLCommon) InsertOperation(ctx context.Context, operation *fftypes.Oper operation.Error, operation.Input, operation.Output, + operation.Retry, ), func() { s.callbacks.UUIDCollectionNSEvent(database.CollectionOperations, fftypes.ChangeEventTypeCreated, operation.Namespace, operation.ID) + for _, hook := range hooks { + hook() + } }, ); err != nil { return err @@ -95,6 +101,7 @@ func (s *SQLCommon) opResult(ctx context.Context, row *sql.Rows) (*fftypes.Opera &op.Error, &op.Input, &op.Output, + &op.Retry, ) if err != nil { return nil, i18n.WrapError(ctx, err, i18n.MsgDBReadErr, "operations") @@ -152,7 +159,7 @@ func (s *SQLCommon) GetOperations(ctx context.Context, filter database.Filter) ( return ops, s.queryRes(ctx, tx, "operations", fop, fi), err } -func (s *SQLCommon) updateOperation(ctx context.Context, id *fftypes.UUID, update database.Update) (err error) { +func (s *SQLCommon) UpdateOperation(ctx context.Context, id *fftypes.UUID, update database.Update) (err error) { ctx, tx, autoCommit, err := s.beginOrUseTx(ctx) if err != nil { @@ -182,5 +189,5 @@ func (s *SQLCommon) ResolveOperation(ctx context.Context, id *fftypes.UUID, stat if output != nil { update.Set("output", output) } - return s.updateOperation(ctx, id, update) + return s.UpdateOperation(ctx, id, update) } diff --git a/internal/database/sqlcommon/operation_sql_test.go b/internal/database/sqlcommon/operation_sql_test.go index 27b4fdc1ac..46b6c97889 100644 --- a/internal/database/sqlcommon/operation_sql_test.go +++ b/internal/database/sqlcommon/operation_sql_test.go @@ -39,7 +39,7 @@ func TestOperationE2EWithDB(t *testing.T) { operation := &fftypes.Operation{ ID: operationID, Namespace: "ns1", - Type: fftypes.OpTypeBlockchainBatchPin, + Type: fftypes.OpTypeBlockchainPinBatch, Transaction: fftypes.NewUUID(), Status: fftypes.OpStatusFailed, Plugin: "ethereum", @@ -50,8 +50,12 @@ func TestOperationE2EWithDB(t *testing.T) { Updated: fftypes.Now(), } s.callbacks.On("UUIDCollectionNSEvent", database.CollectionOperations, fftypes.ChangeEventTypeCreated, "ns1", operationID).Return() - err := s.InsertOperation(ctx, operation) + hookCalled := false + err := s.InsertOperation(ctx, operation, func() { + hookCalled = true + }) assert.NoError(t, err) + assert.True(t, hookCalled) // Query back the operation (by ID) operationRead, err := s.GetOperationByID(ctx, operationID) @@ -193,7 +197,7 @@ func TestOperationUpdateBeginFail(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) u := database.OperationQueryFactory.NewUpdate(context.Background()).Set("id", fftypes.NewUUID()) - err := s.updateOperation(context.Background(), fftypes.NewUUID(), u) + err := s.UpdateOperation(context.Background(), fftypes.NewUUID(), u) assert.Regexp(t, "FF10114", err) } @@ -201,7 +205,7 @@ func TestOperationUpdateBuildQueryFail(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() u := database.OperationQueryFactory.NewUpdate(context.Background()).Set("id", map[bool]bool{true: false}) - err := s.updateOperation(context.Background(), fftypes.NewUUID(), u) + err := s.UpdateOperation(context.Background(), fftypes.NewUUID(), u) assert.Regexp(t, "FF10149.*id", err) } @@ -211,6 +215,6 @@ func TestOperationUpdateFail(t *testing.T) { mock.ExpectExec("UPDATE .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() u := database.OperationQueryFactory.NewUpdate(context.Background()).Set("id", fftypes.NewUUID()) - err := s.updateOperation(context.Background(), fftypes.NewUUID(), u) + err := s.UpdateOperation(context.Background(), fftypes.NewUUID(), u) assert.Regexp(t, "FF10117", err) } diff --git a/internal/database/sqlcommon/pin_sql.go b/internal/database/sqlcommon/pin_sql.go index 8cd2008b4d..3ae81ca950 100644 --- a/internal/database/sqlcommon/pin_sql.go +++ b/internal/database/sqlcommon/pin_sql.go @@ -32,14 +32,16 @@ var ( "masked", "hash", "batch_id", + "batch_hash", "idx", "signer", "dispatched", "created", } pinFilterFieldMap = map[string]string{ - "batch": "batch_id", - "index": "idx", + "batch": "batch_id", + "batchhash": "batch_hash", + "index": "idx", } ) @@ -74,22 +76,7 @@ func (s *SQLCommon) UpsertPin(ctx context.Context, pin *fftypes.Pin) (err error) log.L(ctx).Debugf("Existing pin returned at sequence %d", pin.Sequence) } else { pinRows.Close() - if pin.Sequence, err = s.insertTx(ctx, tx, - sq.Insert("pins"). - Columns(pinColumns...). - Values( - pin.Masked, - pin.Hash, - pin.Batch, - pin.Index, - pin.Signer, - pin.Dispatched, - pin.Created, - ), - func() { - s.callbacks.OrderedCollectionEvent(database.CollectionPins, fftypes.ChangeEventTypeCreated, pin.Sequence) - }, - ); err != nil { + if err = s.attemptPinInsert(ctx, tx, pin); err != nil { return err } @@ -98,12 +85,71 @@ func (s *SQLCommon) UpsertPin(ctx context.Context, pin *fftypes.Pin) (err error) return s.commitTx(ctx, tx, autoCommit) } +func (s *SQLCommon) attemptPinInsert(ctx context.Context, tx *txWrapper, pin *fftypes.Pin) (err error) { + pin.Sequence, err = s.insertTx(ctx, tx, + s.setPinInsertValues(sq.Insert("pins").Columns(pinColumns...), pin), + func() { + log.L(ctx).Debugf("Triggering creation event for pin %d", pin.Sequence) + s.callbacks.OrderedCollectionEvent(database.CollectionPins, fftypes.ChangeEventTypeCreated, pin.Sequence) + }, + ) + return err +} + +func (s *SQLCommon) setPinInsertValues(query sq.InsertBuilder, pin *fftypes.Pin) sq.InsertBuilder { + return query.Values( + pin.Masked, + pin.Hash, + pin.Batch, + pin.BatchHash, + pin.Index, + pin.Signer, + pin.Dispatched, + pin.Created, + ) +} + +func (s *SQLCommon) InsertPins(ctx context.Context, pins []*fftypes.Pin) error { + ctx, tx, autoCommit, err := s.beginOrUseTx(ctx) + if err != nil { + return err + } + defer s.rollbackTx(ctx, tx, autoCommit) + + if s.features.MultiRowInsert { + query := sq.Insert("pins").Columns(pinColumns...) + for _, pin := range pins { + query = s.setPinInsertValues(query, pin) + } + sequences := make([]int64, len(pins)) + err := s.insertTxRows(ctx, tx, query, func() { + for i, pin := range pins { + pin.Sequence = sequences[i] + s.callbacks.OrderedCollectionEvent(database.CollectionPins, fftypes.ChangeEventTypeCreated, pin.Sequence) + } + }, sequences, true /* we want the caller to be able to retry with individual upserts */) + if err != nil { + return err + } + } else { + // Fall back to individual inserts grouped in a TX + for _, pin := range pins { + if err := s.attemptPinInsert(ctx, tx, pin); err != nil { + return err + } + } + } + + return s.commitTx(ctx, tx, autoCommit) +} + func (s *SQLCommon) pinResult(ctx context.Context, row *sql.Rows) (*fftypes.Pin, error) { pin := fftypes.Pin{} err := row.Scan( &pin.Masked, &pin.Hash, &pin.Batch, + &pin.BatchHash, &pin.Index, &pin.Signer, &pin.Dispatched, diff --git a/internal/database/sqlcommon/pin_sql_test.go b/internal/database/sqlcommon/pin_sql_test.go index b2510247fa..f866cfdb7c 100644 --- a/internal/database/sqlcommon/pin_sql_test.go +++ b/internal/database/sqlcommon/pin_sql_test.go @@ -41,6 +41,7 @@ func TestPinsE2EWithDB(t *testing.T) { Masked: true, Hash: fftypes.NewRandB32(), Batch: fftypes.NewUUID(), + BatchHash: fftypes.NewRandB32(), Index: 10, Created: fftypes.Now(), Signer: "0x12345", @@ -141,6 +142,61 @@ func TestUpsertPinFailCommit(t *testing.T) { assert.NoError(t, mock.ExpectationsWereMet()) } +func TestInsertPinsBeginFail(t *testing.T) { + s, mock := newMockProvider().init() + mock.ExpectBegin().WillReturnError(fmt.Errorf("pop")) + err := s.InsertPins(context.Background(), []*fftypes.Pin{}) + assert.Regexp(t, "FF10114", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertPinsMultiRowOK(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + + pin1 := &fftypes.Pin{Hash: fftypes.NewRandB32()} + pin2 := &fftypes.Pin{Hash: fftypes.NewRandB32()} + s.callbacks.On("OrderedCollectionEvent", database.CollectionPins, fftypes.ChangeEventTypeCreated, int64(1001)) + s.callbacks.On("OrderedCollectionEvent", database.CollectionPins, fftypes.ChangeEventTypeCreated, int64(1002)) + + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*").WillReturnRows(sqlmock.NewRows([]string{sequenceColumn}). + AddRow(int64(1001)). + AddRow(int64(1002)), + ) + mock.ExpectCommit() + err := s.InsertPins(context.Background(), []*fftypes.Pin{pin1, pin2}) + assert.NoError(t, err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertPinsMultiRowFail(t *testing.T) { + s, mock := newMockProvider().init() + s.features.MultiRowInsert = true + s.fakePSQLInsert = true + pin1 := &fftypes.Pin{Hash: fftypes.NewRandB32()} + mock.ExpectBegin() + mock.ExpectQuery("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertPins(context.Background(), []*fftypes.Pin{pin1}) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + +func TestInsertPinsSingleRowFail(t *testing.T) { + s, mock := newMockProvider().init() + pin1 := &fftypes.Pin{Hash: fftypes.NewRandB32()} + mock.ExpectBegin() + mock.ExpectExec("INSERT.*").WillReturnError(fmt.Errorf("pop")) + err := s.InsertPins(context.Background(), []*fftypes.Pin{pin1}) + assert.Regexp(t, "FF10116", err) + assert.NoError(t, mock.ExpectationsWereMet()) + s.callbacks.AssertExpectations(t) +} + func TestGetPinQueryFail(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectQuery("SELECT .*").WillReturnError(fmt.Errorf("pop")) diff --git a/internal/database/sqlcommon/provider.go b/internal/database/sqlcommon/provider.go index 2cd033029f..e4aa381385 100644 --- a/internal/database/sqlcommon/provider.go +++ b/internal/database/sqlcommon/provider.go @@ -29,6 +29,7 @@ const ( type SQLFeatures struct { UseILIKE bool + MultiRowInsert bool PlaceholderFormat sq.PlaceholderFormat ExclusiveTableLockSQL func(table string) string } @@ -36,6 +37,7 @@ type SQLFeatures struct { func DefaultSQLProviderFeatures() SQLFeatures { return SQLFeatures{ UseILIKE: false, + MultiRowInsert: false, PlaceholderFormat: sq.Dollar, } } diff --git a/internal/database/sqlcommon/provider_mock_test.go b/internal/database/sqlcommon/provider_mock_test.go index 2a4c404968..f07417e836 100644 --- a/internal/database/sqlcommon/provider_mock_test.go +++ b/internal/database/sqlcommon/provider_mock_test.go @@ -46,10 +46,14 @@ type mockProvider struct { } func newMockProvider() *mockProvider { + config.Reset() mp := &mockProvider{ - prefix: config.NewPluginConfig("unittest.mockdb"), + capabilities: &database.Capabilities{}, + callbacks: &databasemocks.Callbacks{}, + prefix: config.NewPluginConfig("unittest.mockdb"), } mp.SQLCommon.InitPrefix(mp, mp.prefix) + mp.prefix.Set(SQLConfMaxConnections, 10) mp.mockDB, mp.mdb, _ = sqlmock.New() return mp } diff --git a/internal/database/sqlcommon/sqlcommon.go b/internal/database/sqlcommon/sqlcommon.go index 24fa5818cc..84be79a8c2 100644 --- a/internal/database/sqlcommon/sqlcommon.go +++ b/internal/database/sqlcommon/sqlcommon.go @@ -20,6 +20,7 @@ import ( "context" "database/sql" "fmt" + "strings" sq "github.com/Masterminds/squirrel" "github.com/golang-migrate/migrate/v4" @@ -51,6 +52,23 @@ type txWrapper struct { tableLocks []string } +// shortenSQL grabs the first three words of a SQL statement, for minimal debug logging (SQL statements can be huge +// even without args in the example of a multi-row insert - so we reserve full logging for trace level only.) +func shortenSQL(sqlString string) string { + buff := strings.Builder{} + spaceCount := 0 + for _, c := range sqlString { + if c == ' ' { + spaceCount++ + if spaceCount >= 3 { + break + } + } + buff.WriteRune(c) + } + return buff.String() +} + func (s *SQLCommon) Init(ctx context.Context, provider Provider, prefix config.Prefix, callbacks database.Callbacks, capabilities *database.Capabilities) (err error) { s.capabilities = capabilities s.callbacks = callbacks @@ -69,6 +87,17 @@ func (s *SQLCommon) Init(ctx context.Context, provider Provider, prefix config.P connLimit := prefix.GetInt(SQLConfMaxConnections) if connLimit > 0 { s.db.SetMaxOpenConns(connLimit) + s.db.SetConnMaxIdleTime(prefix.GetDuration(SQLConfMaxConnIdleTime)) + maxIdleConns := prefix.GetInt(SQLConfMaxIdleConns) + if maxIdleConns <= 0 { + // By default we rely on the idle time, rather than a maximum number of conns to leave open + maxIdleConns = connLimit + } + s.db.SetMaxIdleConns(maxIdleConns) + s.db.SetConnMaxLifetime(prefix.GetDuration(SQLConfMaxConnLifetime)) + } + if connLimit > 1 { + capabilities.Concurrency = true } if prefix.GetBool(SQLConfMigrationsAuto) { @@ -165,7 +194,7 @@ func (s *SQLCommon) queryTx(ctx context.Context, tx *txWrapper, q sq.SelectBuild if err != nil { return nil, tx, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } - l.Debugf(`SQL-> query: %s`, sqlQuery) + l.Debugf(`SQL-> query: %s`, shortenSQL(sqlQuery)) l.Tracef(`SQL-> query args: %+v`, args) var rows *sql.Rows if tx != nil { @@ -201,7 +230,7 @@ func (s *SQLCommon) countQuery(ctx context.Context, tx *txWrapper, tableName str if err != nil { return count, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } - l.Debugf(`SQL-> count query: %s`, sqlQuery) + l.Debugf(`SQL-> count query: %s`, shortenSQL(sqlQuery)) l.Tracef(`SQL-> count query args: %+v`, args) var rows *sql.Rows if tx != nil { @@ -241,40 +270,58 @@ func (s *SQLCommon) insertTx(ctx context.Context, tx *txWrapper, q sq.InsertBuil } func (s *SQLCommon) insertTxExt(ctx context.Context, tx *txWrapper, q sq.InsertBuilder, postCommit func(), requestConflictEmptyResult bool) (int64, error) { + sequences := []int64{-1} + err := s.insertTxRows(ctx, tx, q, postCommit, sequences, requestConflictEmptyResult) + return sequences[0], err +} + +func (s *SQLCommon) insertTxRows(ctx context.Context, tx *txWrapper, q sq.InsertBuilder, postCommit func(), sequences []int64, requestConflictEmptyResult bool) error { l := log.L(ctx) q, useQuery := s.provider.ApplyInsertQueryCustomizations(q, requestConflictEmptyResult) sqlQuery, args, err := q.PlaceholderFormat(s.features.PlaceholderFormat).ToSql() if err != nil { - return -1, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) + return i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } - l.Debugf(`SQL-> insert: %s`, sqlQuery) - l.Tracef(`SQL-> insert args: %+v`, args) - var sequence int64 + l.Debugf(`SQL-> insert %s`, shortenSQL(sqlQuery)) + l.Tracef(`SQL-> insert query: %s (args: %+v)`, sqlQuery, args) if useQuery { - err := tx.sqlTX.QueryRowContext(ctx, sqlQuery, args...).Scan(&sequence) + result, err := tx.sqlTX.QueryContext(ctx, sqlQuery, args...) + for i := 0; i < len(sequences) && err == nil; i++ { + if result.Next() { + err = result.Scan(&sequences[i]) + } else { + err = i18n.NewError(ctx, i18n.MsgDBNoSequence, i+1) + } + } + if result != nil { + result.Close() + } if err != nil { level := logrus.DebugLevel if !requestConflictEmptyResult { level = logrus.ErrorLevel } l.Logf(level, `SQL insert failed (conflictEmptyRequested=%t): %s sql=[ %s ]: %s`, requestConflictEmptyResult, err, sqlQuery, err) - return -1, i18n.WrapError(ctx, err, i18n.MsgDBInsertFailed) + return i18n.WrapError(ctx, err, i18n.MsgDBInsertFailed) } } else { + if len(sequences) > 1 { + return i18n.WrapError(ctx, err, i18n.MsgDBMultiRowConfigError) + } res, err := tx.sqlTX.ExecContext(ctx, sqlQuery, args...) if err != nil { l.Errorf(`SQL insert failed: %s sql=[ %s ]: %s`, err, sqlQuery, err) - return -1, i18n.WrapError(ctx, err, i18n.MsgDBInsertFailed) + return i18n.WrapError(ctx, err, i18n.MsgDBInsertFailed) } - sequence, _ = res.LastInsertId() + sequences[0], _ = res.LastInsertId() } - l.Debugf(`SQL<- inserted sequence=%d`, sequence) + l.Debugf(`SQL<- inserted sequences=%v`, sequences) if postCommit != nil { s.postCommitEvent(tx, postCommit) } - return sequence, nil + return nil } func (s *SQLCommon) deleteTx(ctx context.Context, tx *txWrapper, q sq.DeleteBuilder, postCommit func()) error { @@ -283,8 +330,8 @@ func (s *SQLCommon) deleteTx(ctx context.Context, tx *txWrapper, q sq.DeleteBuil if err != nil { return i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } - l.Debugf(`SQL-> delete: %s`, sqlQuery) - l.Tracef(`SQL-> delete args: %+v`, args) + l.Debugf(`SQL-> delete: %s`, shortenSQL(sqlQuery)) + l.Tracef(`SQL-> delete query: %s args: %+v`, sqlQuery, args) res, err := tx.sqlTX.ExecContext(ctx, sqlQuery, args...) if err != nil { l.Errorf(`SQL delete failed: %s sql=[ %s ]: %s`, err, sqlQuery, err) @@ -308,8 +355,8 @@ func (s *SQLCommon) updateTx(ctx context.Context, tx *txWrapper, q sq.UpdateBuil if err != nil { return -1, i18n.WrapError(ctx, err, i18n.MsgDBQueryBuildFailed) } - l.Debugf(`SQL-> update: %s`, sqlQuery) - l.Tracef(`SQL-> update args: %+v`, args) + l.Debugf(`SQL-> update: %s`, shortenSQL(sqlQuery)) + l.Tracef(`SQL-> update query: %s (args: %+v)`, sqlQuery, args) res, err := tx.sqlTX.ExecContext(ctx, sqlQuery, args...) if err != nil { l.Errorf(`SQL update failed: %s sql=[ %s ]`, err, sqlQuery) @@ -346,7 +393,7 @@ func (s *SQLCommon) lockTableExclusiveTx(ctx context.Context, tx *txWrapper, tab if s.features.ExclusiveTableLockSQL != nil && !tx.tableIsLocked(table) { sqlQuery := s.features.ExclusiveTableLockSQL(table) - l.Debugf(`SQL-> lock: %s`, sqlQuery) + l.Debugf(`SQL-> lock: %s`, shortenSQL(sqlQuery)) _, err := tx.sqlTX.ExecContext(ctx, sqlQuery) if err != nil { l.Errorf(`SQL lock failed: %s sql=[ %s ]`, err, sqlQuery) @@ -384,12 +431,11 @@ func (s *SQLCommon) commitTx(ctx context.Context, tx *txWrapper, autoCommit bool // Only at this stage do we write to the special events Database table, so we know // regardless of the higher level logic, the events are always written at this point // at the end of the transaction - for _, event := range tx.preCommitEvents { - if err := s.insertEventPreCommit(ctx, tx, event); err != nil { + if len(tx.preCommitEvents) > 0 { + if err := s.insertEventsPreCommit(ctx, tx, tx.preCommitEvents); err != nil { s.rollbackTx(ctx, tx, false) return err } - l.Infof("Emitted %s event %s ref=%s (sequence=%d)", event.Type, event.ID, event.Reference, event.Sequence) } l.Debugf(`SQL-> commit`) diff --git a/internal/database/sqlcommon/sqlcommon_test.go b/internal/database/sqlcommon/sqlcommon_test.go index da73481c22..cb219419d7 100644 --- a/internal/database/sqlcommon/sqlcommon_test.go +++ b/internal/database/sqlcommon/sqlcommon_test.go @@ -321,3 +321,26 @@ func TestDoubleLock(t *testing.T) { assert.NoError(t, err) assert.NoError(t, mdb.ExpectationsWereMet()) } + +func TestInsertTxRowsBadConfig(t *testing.T) { + s, mdb := newMockProvider().init() + mdb.ExpectBegin() + ctx, tx, _, err := s.beginOrUseTx(context.Background()) + assert.NoError(t, err) + s.fakePSQLInsert = false + sb := sq.Insert("table").Columns("col1").Values(("val1")) + err = s.insertTxRows(ctx, tx, sb, nil, []int64{1, 2}, false) + assert.Regexp(t, "FF10374", err) +} + +func TestInsertTxRowsIncompleteReturn(t *testing.T) { + s, mdb := newMockProvider().init() + mdb.ExpectBegin() + mdb.ExpectQuery("INSERT.*").WillReturnRows(sqlmock.NewRows([]string{sequenceColumn}).AddRow(int64(1001))) + ctx, tx, _, err := s.beginOrUseTx(context.Background()) + assert.NoError(t, err) + s.fakePSQLInsert = true + sb := sq.Insert("table").Columns("col1").Values(("val1")) + err = s.insertTxRows(ctx, tx, sb, nil, []int64{1, 2}, false) + assert.Regexp(t, "FF10116", err) +} diff --git a/internal/database/sqlcommon/subscription_sql.go b/internal/database/sqlcommon/subscription_sql.go index ec50fb785a..f5e76f3681 100644 --- a/internal/database/sqlcommon/subscription_sql.go +++ b/internal/database/sqlcommon/subscription_sql.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -34,20 +34,12 @@ var ( "namespace", "name", "transport", - "filter_events", - "filter_topics", - "filter_tag", - "filter_group", + "filters", "options", "created", "updated", } - subscriptionFilterFieldMap = map[string]string{ - "filter.events": "filter_events", - "filter.topics": "filter_topics", - "filter.tag": "filter_tag", - "filter.group": "filter_group", - } + subscriptionFilterFieldMap = map[string]string{} ) func (s *SQLCommon) UpsertSubscription(ctx context.Context, subscription *fftypes.Subscription, allowExisting bool) (err error) { @@ -95,10 +87,7 @@ func (s *SQLCommon) UpsertSubscription(ctx context.Context, subscription *fftype Set("namespace", subscription.Namespace). Set("name", subscription.Name). Set("transport", subscription.Transport). - Set("filter_events", subscription.Filter.Events). - Set("filter_topics", subscription.Filter.Topics). - Set("filter_tag", subscription.Filter.Tag). - Set("filter_group", subscription.Filter.Group). + Set("filters", subscription.Filter). Set("options", subscription.Options). Set("created", subscription.Created). Set("updated", subscription.Updated). @@ -125,10 +114,7 @@ func (s *SQLCommon) UpsertSubscription(ctx context.Context, subscription *fftype subscription.Namespace, subscription.Name, subscription.Transport, - subscription.Filter.Events, - subscription.Filter.Topics, - subscription.Filter.Tag, - subscription.Filter.Group, + subscription.Filter, subscription.Options, subscription.Created, subscription.Updated, @@ -152,10 +138,7 @@ func (s *SQLCommon) subscriptionResult(ctx context.Context, row *sql.Rows) (*fft &subscription.Namespace, &subscription.Name, &subscription.Transport, - &subscription.Filter.Events, - &subscription.Filter.Topics, - &subscription.Filter.Tag, - &subscription.Filter.Group, + &subscription.Filter, &subscription.Options, &subscription.Created, &subscription.Updated, diff --git a/internal/database/sqlcommon/subscription_sql_test.go b/internal/database/sqlcommon/subscription_sql_test.go index b5ded4717d..d50b9892da 100644 --- a/internal/database/sqlcommon/subscription_sql_test.go +++ b/internal/database/sqlcommon/subscription_sql_test.go @@ -78,9 +78,11 @@ func TestSubscriptionsE2EWithDB(t *testing.T) { Transport: "websockets", Filter: fftypes.SubscriptionFilter{ Events: string(fftypes.EventTypeMessageConfirmed), - Topics: "topics.*", - Tag: "tag.*", - Group: "group.*", + Topic: "topics.*", + Message: fftypes.MessageFilter{ + Tag: "tag.*", + Group: "group.*", + }, }, Options: subOpts, Created: fftypes.Now(), @@ -258,7 +260,7 @@ func TestSubscriptionUpdateBuildQueryFail(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows(subscriptionColumns).AddRow( - fftypes.NewUUID(), "ns1", "sub1", "websockets", "", "", "", "", `{}`, fftypes.Now(), fftypes.Now()), + fftypes.NewUUID(), "ns1", "sub1", "websockets", `{}`, `{}`, fftypes.Now(), fftypes.Now()), ) u := database.SubscriptionQueryFactory.NewUpdate(context.Background()).Set("name", map[bool]bool{true: false}) err := s.UpdateSubscription(context.Background(), "ns1", "name1", u) @@ -289,7 +291,7 @@ func TestSubscriptionUpdateFail(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows(subscriptionColumns).AddRow( - fftypes.NewUUID(), "ns1", "sub1", "websockets", "", "", "", "", `{}`, fftypes.Now(), fftypes.Now()), + fftypes.NewUUID(), "ns1", "sub1", "websockets", `{}`, `{}`, fftypes.Now(), fftypes.Now()), ) mock.ExpectExec("UPDATE .*").WillReturnError(fmt.Errorf("pop")) mock.ExpectRollback() @@ -309,7 +311,7 @@ func TestSubscriptionDeleteFail(t *testing.T) { s, mock := newMockProvider().init() mock.ExpectBegin() mock.ExpectQuery("SELECT .*").WillReturnRows(sqlmock.NewRows(subscriptionColumns).AddRow( - fftypes.NewUUID(), "ns1", "sub1", "websockets", "", "", "", "", `{}`, fftypes.Now(), fftypes.Now()), + fftypes.NewUUID(), "ns1", "sub1", "websockets", `{}`, `{}`, fftypes.Now(), fftypes.Now()), ) mock.ExpectExec("DELETE .*").WillReturnError(fmt.Errorf("pop")) err := s.DeleteSubscriptionByID(context.Background(), fftypes.NewUUID()) diff --git a/internal/database/sqlcommon/tokenpool_sql.go b/internal/database/sqlcommon/tokenpool_sql.go index f1b3ff9131..1e239e2bf0 100644 --- a/internal/database/sqlcommon/tokenpool_sql.go +++ b/internal/database/sqlcommon/tokenpool_sql.go @@ -42,6 +42,7 @@ var ( "created", "tx_type", "tx_id", + "info", } tokenPoolFilterFieldMap = map[string]string{ "protocolid": "protocol_id", @@ -93,6 +94,7 @@ func (s *SQLCommon) UpsertTokenPool(ctx context.Context, pool *fftypes.TokenPool Set("state", pool.State). Set("tx_type", pool.TX.Type). Set("tx_id", pool.TX.ID). + Set("info", pool.Info). Where(sq.Eq{"id": pool.ID}), func() { s.callbacks.UUIDCollectionNSEvent(database.CollectionTokenPools, fftypes.ChangeEventTypeUpdated, pool.Namespace, pool.ID) @@ -119,6 +121,7 @@ func (s *SQLCommon) UpsertTokenPool(ctx context.Context, pool *fftypes.TokenPool pool.Created, pool.TX.Type, pool.TX.ID, + pool.Info, ), func() { s.callbacks.UUIDCollectionNSEvent(database.CollectionTokenPools, fftypes.ChangeEventTypeCreated, pool.Namespace, pool.ID) @@ -147,6 +150,7 @@ func (s *SQLCommon) tokenPoolResult(ctx context.Context, row *sql.Rows) (*fftype &pool.Created, &pool.TX.Type, &pool.TX.ID, + &pool.Info, ) if err != nil { return nil, i18n.WrapError(ctx, err, i18n.MsgDBReadErr, "tokenpool") diff --git a/internal/database/sqlcommon/tokenpool_sql_test.go b/internal/database/sqlcommon/tokenpool_sql_test.go index 39644a4b69..fd1c6612e5 100644 --- a/internal/database/sqlcommon/tokenpool_sql_test.go +++ b/internal/database/sqlcommon/tokenpool_sql_test.go @@ -52,6 +52,9 @@ func TestTokenPoolE2EWithDB(t *testing.T) { Type: fftypes.TransactionTypeTokenPool, ID: fftypes.NewUUID(), }, + Info: fftypes.JSONObject{ + "pool": "info", + }, } s.callbacks.On("UUIDCollectionNSEvent", database.CollectionTokenPools, fftypes.ChangeEventTypeCreated, "ns1", poolID, mock.Anything). diff --git a/internal/database/sqlcommon/tokentransfer_sql.go b/internal/database/sqlcommon/tokentransfer_sql.go index 279b20b0b1..813558b4b6 100644 --- a/internal/database/sqlcommon/tokentransfer_sql.go +++ b/internal/database/sqlcommon/tokentransfer_sql.go @@ -49,6 +49,7 @@ var ( "created", } tokenTransferFilterFieldMap = map[string]string{ + "type": "type", "localid": "local_id", "pool": "pool_id", "tokenindex": "token_index", diff --git a/internal/dataexchange/ffdx/ffdx.go b/internal/dataexchange/ffdx/ffdx.go index f5b13b6af6..be412a6fea 100644 --- a/internal/dataexchange/ffdx/ffdx.go +++ b/internal/dataexchange/ffdx/ffdx.go @@ -375,7 +375,7 @@ func (h *FFDX) eventLoop() { l.Errorf("Invalid hash received in DX event: '%s'", msg.Hash) err = nil // still confirm the message } else { - err = h.callbacks.BLOBReceived(msg.Sender, *hash, msg.Size, msg.Path) + err = h.callbacks.PrivateBLOBReceived(msg.Sender, *hash, msg.Size, msg.Path) } case blobAcknowledged: err = h.callbacks.TransferResult(msg.RequestID, fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{ diff --git a/internal/dataexchange/ffdx/ffdx_test.go b/internal/dataexchange/ffdx/ffdx_test.go index 5d29d6bdd2..8548a31d01 100644 --- a/internal/dataexchange/ffdx/ffdx_test.go +++ b/internal/dataexchange/ffdx/ffdx_test.go @@ -466,7 +466,7 @@ func TestEvents(t *testing.T) { assert.Equal(t, `{"action":"commit"}`, string(msg)) hash := fftypes.NewRandB32() - mcb.On("BLOBReceived", mock.Anything, mock.MatchedBy(func(b32 fftypes.Bytes32) bool { + mcb.On("PrivateBLOBReceived", mock.Anything, mock.MatchedBy(func(b32 fftypes.Bytes32) bool { return b32 == *hash }), int64(12345), fmt.Sprintf("ns1/%s", u.String())).Return(nil) fromServer <- fmt.Sprintf(`{"type":"blob-received","sender":"peer1","path":"ns1/%s","hash":"%s","size":12345}`, u.String(), hash.String()) diff --git a/internal/definitions/definition_handler.go b/internal/definitions/definition_handler.go index bd457d0a0f..99543ab4c5 100644 --- a/internal/definitions/definition_handler.go +++ b/internal/definitions/definition_handler.go @@ -37,7 +37,7 @@ import ( type DefinitionHandlers interface { privatemessaging.GroupManager - HandleDefinitionBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (HandlerResult, error) + HandleDefinitionBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (HandlerResult, error) SendReply(ctx context.Context, event *fftypes.Event, reply *fftypes.MessageInOut) } @@ -63,6 +63,21 @@ const ( ActionWait ) +func (dma DefinitionMessageAction) String() string { + switch dma { + case ActionReject: + return "reject" + case ActionConfirm: + return "confirm" + case ActionRetry: + return "retry" + case ActionWait: + return "wait" + default: + return "unknown" + } +} + // DefinitionBatchState tracks the state between definition handlers that run in-line on the pin processing route in the // aggregator as part of a batch of pins. They might have complex API calls, and interdependencies, that need to be managed via this state. // The actions to be taken at the end of a definition batch. @@ -120,9 +135,9 @@ func (dh *definitionHandlers) EnsureLocalGroup(ctx context.Context, group *fftyp return dh.messaging.EnsureLocalGroup(ctx, group) } -func (dh *definitionHandlers) HandleDefinitionBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (msgAction HandlerResult, err error) { +func (dh *definitionHandlers) HandleDefinitionBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (msgAction HandlerResult, err error) { l := log.L(ctx) - l.Infof("Confirming system definition broadcast '%s' [%s]", msg.Header.Tag, msg.Header.ID) + l.Infof("Processing system definition broadcast '%s' [%s]", msg.Header.Tag, msg.Header.ID) switch msg.Header.Tag { case fftypes.SystemTagDefineDatatype: return dh.handleDatatypeBroadcast(ctx, state, msg, data, tx) @@ -150,7 +165,7 @@ func (dh *definitionHandlers) HandleDefinitionBroadcast(ctx context.Context, sta } } -func (dh *definitionHandlers) getSystemBroadcastPayload(ctx context.Context, msg *fftypes.Message, data []*fftypes.Data, res fftypes.Definition) (valid bool) { +func (dh *definitionHandlers) getSystemBroadcastPayload(ctx context.Context, msg *fftypes.Message, data fftypes.DataArray, res fftypes.Definition) (valid bool) { l := log.L(ctx) if len(data) != 1 { l.Warnf("Unable to process system broadcast %s - expecting 1 attachment, found %d", msg.Header.ID, len(data)) diff --git a/internal/definitions/definition_handler_contracts.go b/internal/definitions/definition_handler_contracts.go index b718542d25..a3a5a5adab 100644 --- a/internal/definitions/definition_handler_contracts.go +++ b/internal/definitions/definition_handler_contracts.go @@ -65,7 +65,7 @@ func (dh *definitionHandlers) persistContractAPI(ctx context.Context, api *fftyp return err == nil, err } -func (dh *definitionHandlers) handleFFIBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandlers) handleFFIBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (HandlerResult, error) { l := log.L(ctx) var broadcast fftypes.FFI valid := dh.getSystemBroadcastPayload(ctx, msg, data, &broadcast) @@ -90,13 +90,13 @@ func (dh *definitionHandlers) handleFFIBroadcast(ctx context.Context, state Defi l.Infof("Contract interface created id=%s author=%s", broadcast.ID, msg.Header.Author) state.AddFinalize(func(ctx context.Context) error { - event := fftypes.NewEvent(fftypes.EventTypeContractInterfaceConfirmed, broadcast.Namespace, broadcast.ID, tx) + event := fftypes.NewEvent(fftypes.EventTypeContractInterfaceConfirmed, broadcast.Namespace, broadcast.ID, tx, broadcast.Topic()) return dh.database.InsertEvent(ctx, event) }) return HandlerResult{Action: ActionConfirm}, nil } -func (dh *definitionHandlers) handleContractAPIBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandlers) handleContractAPIBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (HandlerResult, error) { l := log.L(ctx) var broadcast fftypes.ContractAPI valid := dh.getSystemBroadcastPayload(ctx, msg, data, &broadcast) @@ -121,7 +121,7 @@ func (dh *definitionHandlers) handleContractAPIBroadcast(ctx context.Context, st l.Infof("Contract API created id=%s author=%s", broadcast.ID, msg.Header.Author) state.AddFinalize(func(ctx context.Context) error { - event := fftypes.NewEvent(fftypes.EventTypeContractAPIConfirmed, broadcast.Namespace, broadcast.ID, tx) + event := fftypes.NewEvent(fftypes.EventTypeContractAPIConfirmed, broadcast.Namespace, broadcast.ID, tx, fftypes.SystemTopicDefinitions) return dh.database.InsertEvent(ctx, event) }) return HandlerResult{Action: ActionConfirm}, nil diff --git a/internal/definitions/definition_handler_contracts_test.go b/internal/definitions/definition_handler_contracts_test.go index 2adddf091e..6fe263685c 100644 --- a/internal/definitions/definition_handler_contracts_test.go +++ b/internal/definitions/definition_handler_contracts_test.go @@ -107,7 +107,7 @@ func TestHandleFFIBroadcastOk(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineFFI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -135,7 +135,7 @@ func TestHandleFFIBroadcastReject(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineFFI, }, - }, []*fftypes.Data{}, fftypes.NewUUID()) + }, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -195,7 +195,7 @@ func TestHandleFFIBroadcastValidateFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineFFI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -218,7 +218,7 @@ func TestHandleFFIBroadcastPersistFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineFFI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) bs.assertNoFinalizers() @@ -241,7 +241,7 @@ func TestHandleContractAPIBroadcastOk(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineContractAPI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -295,7 +295,7 @@ func TestHandleContractAPIBroadcastValidateFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineContractAPI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -317,7 +317,7 @@ func TestHandleContractAPIBroadcastPersistFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineContractAPI, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) bs.assertNoFinalizers() diff --git a/internal/definitions/definition_handler_datatype.go b/internal/definitions/definition_handler_datatype.go index ab1eb58630..7f51bf7df3 100644 --- a/internal/definitions/definition_handler_datatype.go +++ b/internal/definitions/definition_handler_datatype.go @@ -23,7 +23,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleDatatypeBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandlers) handleDatatypeBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (HandlerResult, error) { l := log.L(ctx) var dt fftypes.Datatype @@ -56,7 +56,7 @@ func (dh *definitionHandlers) handleDatatypeBroadcast(ctx context.Context, state } state.AddFinalize(func(ctx context.Context) error { - event := fftypes.NewEvent(fftypes.EventTypeDatatypeConfirmed, dt.Namespace, dt.ID, tx) + event := fftypes.NewEvent(fftypes.EventTypeDatatypeConfirmed, dt.Namespace, dt.ID, tx, fftypes.SystemTopicDefinitions) return dh.database.InsertEvent(ctx, event) }) return HandlerResult{Action: ActionConfirm}, nil diff --git a/internal/definitions/definition_handler_datatype_test.go b/internal/definitions/definition_handler_datatype_test.go index 87c4fde596..adc267a047 100644 --- a/internal/definitions/definition_handler_datatype_test.go +++ b/internal/definitions/definition_handler_datatype_test.go @@ -57,7 +57,7 @@ func TestHandleDefinitionBroadcastDatatypeOk(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -95,7 +95,7 @@ func TestHandleDefinitionBroadcastDatatypeEventFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -126,7 +126,7 @@ func TestHandleDefinitionBroadcastDatatypeMissingID(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -156,7 +156,7 @@ func TestHandleDefinitionBroadcastBadSchema(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -181,7 +181,7 @@ func TestHandleDefinitionBroadcastMissingData(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{}, fftypes.NewUUID()) + }, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -214,7 +214,7 @@ func TestHandleDefinitionBroadcastDatatypeLookupFail(t *testing.T) { Namespace: fftypes.SystemNamespace, Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.EqualError(t, err, "pop") @@ -250,7 +250,7 @@ func TestHandleDefinitionBroadcastUpsertFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.EqualError(t, err, "pop") @@ -285,7 +285,7 @@ func TestHandleDefinitionBroadcastDatatypeDuplicate(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineDatatype, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_identity_claim.go b/internal/definitions/definition_handler_identity_claim.go index ec715357f1..35164157c1 100644 --- a/internal/definitions/definition_handler_identity_claim.go +++ b/internal/definitions/definition_handler_identity_claim.go @@ -25,7 +25,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleIdentityClaimBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, verificationID *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandlers) handleIdentityClaimBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, verificationID *fftypes.UUID) (HandlerResult, error) { var claim fftypes.IdentityClaim valid := dh.getSystemBroadcastPayload(ctx, msg, data, &claim) if !valid { @@ -102,7 +102,7 @@ func (dh *definitionHandlers) confirmVerificationForClaim(ctx context.Context, s } } for _, candidate := range candidates { - data, foundAll, err := dh.data.GetMessageData(ctx, candidate, true) + data, foundAll, err := dh.data.GetMessageDataCached(ctx, candidate) if err != nil { return nil, err } @@ -211,7 +211,7 @@ func (dh *definitionHandlers) handleIdentityClaim(ctx context.Context, state Def } state.AddFinalize(func(ctx context.Context) error { - event := fftypes.NewEvent(fftypes.EventTypeIdentityConfirmed, identity.Namespace, identity.ID, nil) + event := fftypes.NewEvent(fftypes.EventTypeIdentityConfirmed, identity.Namespace, identity.ID, nil, fftypes.SystemTopicDefinitions) return dh.database.InsertEvent(ctx, event) }) return HandlerResult{Action: ActionConfirm}, nil diff --git a/internal/definitions/definition_handler_identity_claim_test.go b/internal/definitions/definition_handler_identity_claim_test.go index 7ba5e66d2c..4637e14147 100644 --- a/internal/definitions/definition_handler_identity_claim_test.go +++ b/internal/definitions/definition_handler_identity_claim_test.go @@ -169,12 +169,12 @@ func TestHandleDefinitionIdentityClaimCustomWithExistingParentVerificationOk(t * })).Return(nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, false, nil).Once() - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, false, nil).Once() + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -215,12 +215,12 @@ func TestHandleDefinitionIdentityClaimIdempotentReplay(t *testing.T) { })).Return(nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, false, nil).Once() - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, false, nil).Once() + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -250,11 +250,11 @@ func TestHandleDefinitionIdentityClaimFailInsertIdentity(t *testing.T) { mdi.On("UpsertIdentity", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -280,11 +280,11 @@ func TestHandleDefinitionIdentityClaimVerificationDataFail(t *testing.T) { mdi.On("GetMessages", ctx, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -310,11 +310,11 @@ func TestHandleDefinitionIdentityClaimVerificationMissingData(t *testing.T) { mdi.On("GetMessages", ctx, mock.Anything).Return([]*fftypes.Message{}, nil, nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -341,11 +341,11 @@ func TestHandleDefinitionIdentityClaimFailInsertVerifier(t *testing.T) { mdi.On("UpsertVerifier", ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{verifyData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{verifyData}, true, nil) bs.pendingConfirms[*verifyMsg.Header.ID] = verifyMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -370,7 +370,7 @@ func TestHandleDefinitionIdentityClaimCustomMissingParentVerificationOk(t *testi mdi.On("GetVerifierByValue", ctx, fftypes.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) mdi.On("GetMessages", ctx, mock.Anything).Return([]*fftypes.Message{}, nil, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) // Just wait for the verification to come in later assert.NoError(t, err) @@ -394,7 +394,7 @@ func TestHandleDefinitionIdentityClaimCustomParentVerificationFail(t *testing.T) mdi.On("GetVerifierByValue", ctx, fftypes.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, nil) mdi.On("GetMessages", ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -419,7 +419,7 @@ func TestHandleDefinitionIdentityClaimVerifierClash(t *testing.T) { Hash: fftypes.NewRandB32(), }, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -442,7 +442,7 @@ func TestHandleDefinitionIdentityClaimVerifierError(t *testing.T) { mdi.On("GetIdentityByID", ctx, custom1.ID).Return(nil, nil) mdi.On("GetVerifierByValue", ctx, fftypes.VerifierTypeEthAddress, "ns1", "0x12345").Return(nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -467,7 +467,7 @@ func TestHandleDefinitionIdentityClaimIdentityClash(t *testing.T) { }, }, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -489,7 +489,7 @@ func TestHandleDefinitionIdentityClaimIdentityError(t *testing.T) { mdi.On("GetIdentityByName", ctx, custom1.Type, custom1.Namespace, custom1.Name).Return(nil, nil) mdi.On("GetIdentityByID", ctx, custom1.ID).Return(nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -508,7 +508,7 @@ func TestHandleDefinitionIdentityMissingAuthor(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -526,7 +526,7 @@ func TestHandleDefinitionIdentityClaimBadSignature(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("VerifyIdentityChain", ctx, custom1).Return(org1, false, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -544,7 +544,7 @@ func TestHandleDefinitionIdentityVerifyChainFail(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("VerifyIdentityChain", ctx, custom1).Return(nil, true, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -562,7 +562,7 @@ func TestHandleDefinitionIdentityVerifyChainInvalid(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("VerifyIdentityChain", ctx, custom1).Return(nil, false, fmt.Errorf("wrong")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{claimData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{claimData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -577,7 +577,7 @@ func TestHandleDefinitionIdentityClaimBadData(t *testing.T) { _, org1, claimMsg, _, _, _ := testCustomClaimAndVerification(t) claimMsg.Header.Author = org1.DID // should be the child for the claim - action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, []*fftypes.Data{}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, claimMsg, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_identity_update.go b/internal/definitions/definition_handler_identity_update.go index 5f1688e9d3..6bd4368584 100644 --- a/internal/definitions/definition_handler_identity_update.go +++ b/internal/definitions/definition_handler_identity_update.go @@ -24,7 +24,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleIdentityUpdateBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data) (HandlerResult, error) { +func (dh *definitionHandlers) handleIdentityUpdateBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray) (HandlerResult, error) { var update fftypes.IdentityUpdate valid := dh.getSystemBroadcastPayload(ctx, msg, data, &update) if !valid { @@ -63,7 +63,7 @@ func (dh *definitionHandlers) handleIdentityUpdateBroadcast(ctx context.Context, } state.AddFinalize(func(ctx context.Context) error { - event := fftypes.NewEvent(fftypes.EventTypeIdentityUpdated, identity.Namespace, identity.ID, nil) + event := fftypes.NewEvent(fftypes.EventTypeIdentityUpdated, identity.Namespace, identity.ID, nil, fftypes.SystemTopicDefinitions) return dh.database.InsertEvent(ctx, event) }) return HandlerResult{Action: ActionConfirm}, err diff --git a/internal/definitions/definition_handler_identity_update_test.go b/internal/definitions/definition_handler_identity_update_test.go index 91cb7d0549..51e2884212 100644 --- a/internal/definitions/definition_handler_identity_update_test.go +++ b/internal/definitions/definition_handler_identity_update_test.go @@ -86,7 +86,7 @@ func TestHandleDefinitionIdentityUpdateOk(t *testing.T) { return event.Type == fftypes.EventTypeIdentityUpdated })).Return(nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -109,7 +109,7 @@ func TestHandleDefinitionIdentityUpdateUpsertFail(t *testing.T) { mdi := dh.database.(*databasemocks.Plugin) mdi.On("UpsertIdentity", ctx, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -128,7 +128,7 @@ func TestHandleDefinitionIdentityInvalidIdentity(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -145,7 +145,7 @@ func TestHandleDefinitionIdentityNotFound(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -162,7 +162,7 @@ func TestHandleDefinitionIdentityLookupFail(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -199,7 +199,7 @@ func TestHandleDefinitionIdentityValidateFail(t *testing.T) { }, } - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{updateData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{updateData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -224,7 +224,7 @@ func TestHandleDefinitionIdentityMissingData(t *testing.T) { }, } - action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, []*fftypes.Data{}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, updateMsg, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_identity_verification.go b/internal/definitions/definition_handler_identity_verification.go index e9ef8d8105..f599433aa9 100644 --- a/internal/definitions/definition_handler_identity_verification.go +++ b/internal/definitions/definition_handler_identity_verification.go @@ -23,7 +23,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleIdentityVerificationBroadcast(ctx context.Context, state DefinitionBatchState, verifyMsg *fftypes.Message, data []*fftypes.Data) (HandlerResult, error) { +func (dh *definitionHandlers) handleIdentityVerificationBroadcast(ctx context.Context, state DefinitionBatchState, verifyMsg *fftypes.Message, data fftypes.DataArray) (HandlerResult, error) { var verification fftypes.IdentityVerification valid := dh.getSystemBroadcastPayload(ctx, verifyMsg, data, &verification) if !valid { @@ -68,7 +68,7 @@ func (dh *definitionHandlers) handleIdentityVerificationBroadcast(ctx context.Co log.L(ctx).Warnf("Invalid verification message %s - hash mismatch claim=%s verification=%s", verifyMsg.Header.ID, claimMsg.Hash, verification.Claim.Hash) return HandlerResult{Action: ActionReject}, nil } - data, foundAll, err := dh.data.GetMessageData(ctx, claimMsg, true) + data, foundAll, err := dh.data.GetMessageDataCached(ctx, claimMsg) if err != nil { return HandlerResult{Action: ActionRetry}, err } diff --git a/internal/definitions/definition_handler_identity_verification_test.go b/internal/definitions/definition_handler_identity_verification_test.go index 8d4455671a..76b4c7824e 100644 --- a/internal/definitions/definition_handler_identity_verification_test.go +++ b/internal/definitions/definition_handler_identity_verification_test.go @@ -62,11 +62,11 @@ func TestHandleDefinitionIdentityVerificationWithExistingClaimOk(t *testing.T) { })).Return(nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{claimData}, true, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{claimData}, true, nil) bs.pendingConfirms[*claimMsg.Header.ID] = claimMsg - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -92,9 +92,9 @@ func TestHandleDefinitionIdentityVerificationIncompleteClaimData(t *testing.T) { mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(claimMsg, nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return([]*fftypes.Data{}, false, nil) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(fftypes.DataArray{}, false, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -118,9 +118,9 @@ func TestHandleDefinitionIdentityVerificationClaimDataFail(t *testing.T) { mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(claimMsg, nil) mdm := dh.data.(*datamocks.Manager) - mdm.On("GetMessageData", ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + mdm.On("GetMessageDataCached", ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -144,7 +144,7 @@ func TestHandleDefinitionIdentityVerificationClaimHashMismatchl(t *testing.T) { mdi := dh.database.(*databasemocks.Plugin) mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(claimMsg, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -165,7 +165,7 @@ func TestHandleDefinitionIdentityVerificationBeforeClaim(t *testing.T) { mdi := dh.database.(*databasemocks.Plugin) mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(nil, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -186,7 +186,7 @@ func TestHandleDefinitionIdentityVerificationClaimLookupFail(t *testing.T) { mdi := dh.database.(*databasemocks.Plugin) mdi.On("GetMessageByID", ctx, claimMsg.Header.ID).Return(nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -205,7 +205,7 @@ func TestHandleDefinitionIdentityVerificationWrongSigner(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(org1, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -222,7 +222,7 @@ func TestHandleDefinitionIdentityVerificationCheckParentNotFound(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -239,7 +239,7 @@ func TestHandleDefinitionIdentityVerificationCheckParentFail(t *testing.T) { mim := dh.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookupByID", ctx, org1.ID).Return(nil, fmt.Errorf("pop")) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, []*fftypes.Data{verifyData}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, verifyMsg, fftypes.DataArray{verifyData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -267,7 +267,7 @@ func TestHandleDefinitionIdentityVerificationInvalidPayload(t *testing.T) { Type: fftypes.MessageTypeBroadcast, Tag: fftypes.SystemTagIdentityVerification, }, - }, []*fftypes.Data{emptyObjectData}, fftypes.NewUUID()) + }, fftypes.DataArray{emptyObjectData}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -284,7 +284,7 @@ func TestHandleDefinitionIdentityVerificationInvalidData(t *testing.T) { Type: fftypes.MessageTypeBroadcast, Tag: fftypes.SystemTagIdentityVerification, }, - }, []*fftypes.Data{}, fftypes.NewUUID()) + }, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_namespace.go b/internal/definitions/definition_handler_namespace.go index 55db98b405..faff9609bb 100644 --- a/internal/definitions/definition_handler_namespace.go +++ b/internal/definitions/definition_handler_namespace.go @@ -23,7 +23,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleNamespaceBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (HandlerResult, error) { +func (dh *definitionHandlers) handleNamespaceBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (HandlerResult, error) { l := log.L(ctx) var ns fftypes.Namespace @@ -56,7 +56,7 @@ func (dh *definitionHandlers) handleNamespaceBroadcast(ctx context.Context, stat } state.AddFinalize(func(ctx context.Context) error { - event := fftypes.NewEvent(fftypes.EventTypeNamespaceConfirmed, ns.Name, ns.ID, tx) + event := fftypes.NewEvent(fftypes.EventTypeNamespaceConfirmed, ns.Name, ns.ID, tx, fftypes.SystemTopicDefinitions) return dh.database.InsertEvent(ctx, event) }) return HandlerResult{Action: ActionConfirm}, nil diff --git a/internal/definitions/definition_handler_namespace_test.go b/internal/definitions/definition_handler_namespace_test.go index 794c858887..daac65f425 100644 --- a/internal/definitions/definition_handler_namespace_test.go +++ b/internal/definitions/definition_handler_namespace_test.go @@ -49,7 +49,7 @@ func TestHandleDefinitionBroadcastNSOk(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -79,7 +79,7 @@ func TestHandleDefinitionBroadcastNSEventFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -108,7 +108,7 @@ func TestHandleDefinitionBroadcastNSUpsertFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.EqualError(t, err, "pop") @@ -123,7 +123,7 @@ func TestHandleDefinitionBroadcastNSMissingData(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{}, fftypes.NewUUID()) + }, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -143,7 +143,7 @@ func TestHandleDefinitionBroadcastNSBadID(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -160,7 +160,7 @@ func TestHandleDefinitionBroadcastNSBadData(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -185,7 +185,7 @@ func TestHandleDefinitionBroadcastDuplicate(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -216,7 +216,7 @@ func TestHandleDefinitionBroadcastDuplicateOverrideLocal(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) err = bs.finalizers[0](context.Background()) @@ -246,7 +246,7 @@ func TestHandleDefinitionBroadcastDuplicateOverrideLocalFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.EqualError(t, err, "pop") @@ -273,7 +273,7 @@ func TestHandleDefinitionBroadcastDupCheckFail(t *testing.T) { Header: fftypes.MessageHeader{ Tag: fftypes.SystemTagDefineNamespace, }, - }, []*fftypes.Data{data}, fftypes.NewUUID()) + }, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.EqualError(t, err, "pop") diff --git a/internal/definitions/definition_handler_network_node.go b/internal/definitions/definition_handler_network_node.go index 6f8d8e4a04..42f896610b 100644 --- a/internal/definitions/definition_handler_network_node.go +++ b/internal/definitions/definition_handler_network_node.go @@ -23,7 +23,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleDeprecatedNodeBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data) (HandlerResult, error) { +func (dh *definitionHandlers) handleDeprecatedNodeBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray) (HandlerResult, error) { l := log.L(ctx) var nodeOld fftypes.DeprecatedNode diff --git a/internal/definitions/definition_handler_network_node_test.go b/internal/definitions/definition_handler_network_node_test.go index 2c15e39b54..a6baa8a717 100644 --- a/internal/definitions/definition_handler_network_node_test.go +++ b/internal/definitions/definition_handler_network_node_test.go @@ -129,7 +129,7 @@ func TestHandleDeprecatedNodeDefinitionOK(t *testing.T) { mdx := dh.exchange.(*dataexchangemocks.Plugin) mdx.On("AddPeer", ctx, node.DX.Endpoint).Return(nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, msg, []*fftypes.Data{data}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, msg, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -148,7 +148,7 @@ func TestHandleDeprecatedNodeDefinitionBadData(t *testing.T) { dh, bs := newTestDefinitionHandlers(t) ctx := context.Background() - action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, &fftypes.Message{}, []*fftypes.Data{}) + action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, &fftypes.Message{}, fftypes.DataArray{}) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) @@ -167,7 +167,7 @@ func TestHandleDeprecatedNodeDefinitionFailOrgLookup(t *testing.T) { Value: node.Owner, }).Return(nil, fmt.Errorf("pop")) - action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, msg, []*fftypes.Data{data}) + action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, msg, fftypes.DataArray{data}) assert.Equal(t, HandlerResult{Action: ActionRetry}, action) assert.Regexp(t, "pop", err) @@ -188,7 +188,7 @@ func TestHandleDeprecatedNodeDefinitionOrgNotFound(t *testing.T) { Value: node.Owner, }).Return(nil, nil) - action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, msg, []*fftypes.Data{data}) + action, err := dh.handleDeprecatedNodeBroadcast(ctx, bs, msg, fftypes.DataArray{data}) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_network_org.go b/internal/definitions/definition_handler_network_org.go index 93e3c08c55..540e4bdb5c 100644 --- a/internal/definitions/definition_handler_network_org.go +++ b/internal/definitions/definition_handler_network_org.go @@ -22,7 +22,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (dh *definitionHandlers) handleDeprecatedOrganizationBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data) (HandlerResult, error) { +func (dh *definitionHandlers) handleDeprecatedOrganizationBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray) (HandlerResult, error) { var orgOld fftypes.DeprecatedOrganization valid := dh.getSystemBroadcastPayload(ctx, msg, data, &orgOld) diff --git a/internal/definitions/definition_handler_network_org_test.go b/internal/definitions/definition_handler_network_org_test.go index d64c70b565..b7dec269d6 100644 --- a/internal/definitions/definition_handler_network_org_test.go +++ b/internal/definitions/definition_handler_network_org_test.go @@ -109,7 +109,7 @@ func TestHandleDeprecatedOrgDefinitionOK(t *testing.T) { return event.Type == fftypes.EventTypeIdentityConfirmed })).Return(nil) - action, err := dh.HandleDefinitionBroadcast(ctx, bs, msg, []*fftypes.Data{data}, fftypes.NewUUID()) + action, err := dh.HandleDefinitionBroadcast(ctx, bs, msg, fftypes.DataArray{data}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionConfirm}, action) assert.NoError(t, err) @@ -124,7 +124,7 @@ func TestHandleDeprecatedOrgDefinitionBadData(t *testing.T) { dh, bs := newTestDefinitionHandlers(t) ctx := context.Background() - action, err := dh.handleDeprecatedOrganizationBroadcast(ctx, bs, &fftypes.Message{}, []*fftypes.Data{}) + action, err := dh.handleDeprecatedOrganizationBroadcast(ctx, bs, &fftypes.Message{}, fftypes.DataArray{}) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) diff --git a/internal/definitions/definition_handler_test.go b/internal/definitions/definition_handler_test.go index 7d5d31d67b..35c7a96272 100644 --- a/internal/definitions/definition_handler_test.go +++ b/internal/definitions/definition_handler_test.go @@ -18,6 +18,7 @@ package definitions import ( "context" + "fmt" "testing" "github.com/hyperledger/firefly/mocks/assetmocks" @@ -85,7 +86,7 @@ func TestHandleDefinitionBroadcastUnknown(t *testing.T) { Header: fftypes.MessageHeader{ Tag: "unknown", }, - }, []*fftypes.Data{}, fftypes.NewUUID()) + }, fftypes.DataArray{}, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionReject}, action) assert.NoError(t, err) bs.assertNoFinalizers() @@ -97,7 +98,7 @@ func TestGetSystemBroadcastPayloadMissingData(t *testing.T) { Header: fftypes.MessageHeader{ Tag: "unknown", }, - }, []*fftypes.Data{}, nil) + }, fftypes.DataArray{}, nil) assert.False(t, valid) } @@ -107,7 +108,7 @@ func TestGetSystemBroadcastPayloadBadJSON(t *testing.T) { Header: fftypes.MessageHeader{ Tag: "unknown", }, - }, []*fftypes.Data{}, nil) + }, fftypes.DataArray{}, nil) assert.False(t, valid) } @@ -129,3 +130,11 @@ func TestPrivateMessagingPassthroughs(t *testing.T) { mpm.AssertExpectations(t) } + +func TestActionEnum(t *testing.T) { + assert.Equal(t, "confirm", fmt.Sprintf("%s", ActionConfirm)) + assert.Equal(t, "reject", fmt.Sprintf("%s", ActionReject)) + assert.Equal(t, "retry", fmt.Sprintf("%s", ActionRetry)) + assert.Equal(t, "wait", fmt.Sprintf("%s", ActionWait)) + assert.Equal(t, "unknown", fmt.Sprintf("%s", DefinitionMessageAction(999))) +} diff --git a/internal/definitions/definition_handler_tokenpool.go b/internal/definitions/definition_handler_tokenpool.go index 1def4a9048..16494696a0 100644 --- a/internal/definitions/definition_handler_tokenpool.go +++ b/internal/definitions/definition_handler_tokenpool.go @@ -26,8 +26,6 @@ import ( func (dh *definitionHandlers) persistTokenPool(ctx context.Context, announce *fftypes.TokenPoolAnnouncement) (valid bool, err error) { pool := announce.Pool - - // Create the pool in pending state pool.State = fftypes.TokenPoolStatePending err = dh.database.UpsertTokenPool(ctx, pool) if err != nil { @@ -38,11 +36,10 @@ func (dh *definitionHandlers) persistTokenPool(ctx context.Context, announce *ff log.L(ctx).Errorf("Failed to insert token pool '%s': %s", pool.ID, err) return false, err // retryable } - return true, nil } -func (dh *definitionHandlers) handleTokenPoolBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data) (HandlerResult, error) { +func (dh *definitionHandlers) handleTokenPoolBroadcast(ctx context.Context, state DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray) (HandlerResult, error) { var announce fftypes.TokenPoolAnnouncement if valid := dh.getSystemBroadcastPayload(ctx, msg, data, &announce); !valid { return HandlerResult{Action: ActionReject}, nil @@ -67,6 +64,7 @@ func (dh *definitionHandlers) handleTokenPoolBroadcast(ctx context.Context, stat return HandlerResult{Action: ActionConfirm, CustomCorrelator: correlator}, nil } + // Create the pool in pending state if valid, err := dh.persistTokenPool(ctx, &announce); err != nil { return HandlerResult{Action: ActionRetry}, err } else if !valid { @@ -76,7 +74,7 @@ func (dh *definitionHandlers) handleTokenPoolBroadcast(ctx context.Context, stat // Message will remain unconfirmed, but plugin will be notified to activate the pool // This will ultimately trigger a pool creation event and a rewind state.AddPreFinalize(func(ctx context.Context) error { - if err := dh.assets.ActivateTokenPool(ctx, pool, announce.Event); err != nil { + if err := dh.assets.ActivateTokenPool(ctx, pool, announce.Event.Info); err != nil { log.L(ctx).Errorf("Failed to activate token pool '%s': %s", pool.ID, err) return err } diff --git a/internal/definitions/definition_handler_tokenpool_test.go b/internal/definitions/definition_handler_tokenpool_test.go index 2d466a5258..2f2f10e0fe 100644 --- a/internal/definitions/definition_handler_tokenpool_test.go +++ b/internal/definitions/definition_handler_tokenpool_test.go @@ -44,12 +44,14 @@ func newPoolAnnouncement() *fftypes.TokenPoolAnnouncement { }, } return &fftypes.TokenPoolAnnouncement{ - Pool: pool, - Event: &fftypes.BlockchainEvent{}, + Pool: pool, + Event: &fftypes.BlockchainEvent{ + Info: fftypes.JSONObject{"some": "info"}, + }, } } -func buildPoolDefinitionMessage(announce *fftypes.TokenPoolAnnouncement) (*fftypes.Message, []*fftypes.Data, error) { +func buildPoolDefinitionMessage(announce *fftypes.TokenPoolAnnouncement) (*fftypes.Message, fftypes.DataArray, error) { msg := &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), @@ -60,7 +62,7 @@ func buildPoolDefinitionMessage(announce *fftypes.TokenPoolAnnouncement) (*fftyp if err != nil { return nil, nil, err } - data := []*fftypes.Data{{ + data := fftypes.DataArray{{ Value: fftypes.JSONAnyPtrBytes(b), }} return msg, data, nil @@ -80,7 +82,7 @@ func TestHandleDefinitionBroadcastTokenPoolActivateOK(t *testing.T) { mdi.On("UpsertTokenPool", context.Background(), mock.MatchedBy(func(p *fftypes.TokenPool) bool { return *p.ID == *pool.ID && p.Message == msg.Header.ID })).Return(nil) - mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*fftypes.TokenPool"), mock.AnythingOfType("*fftypes.BlockchainEvent")).Return(nil) + mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*fftypes.TokenPool"), announce.Event.Info).Return(nil) action, err := sh.HandleDefinitionBroadcast(context.Background(), bs, msg, data, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionWait, CustomCorrelator: pool.ID}, action) @@ -125,7 +127,7 @@ func TestHandleDefinitionBroadcastTokenPoolExisting(t *testing.T) { mdi.On("UpsertTokenPool", context.Background(), mock.MatchedBy(func(p *fftypes.TokenPool) bool { return *p.ID == *pool.ID && p.Message == msg.Header.ID })).Return(nil) - mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*fftypes.TokenPool"), mock.AnythingOfType("*fftypes.BlockchainEvent")).Return(nil) + mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*fftypes.TokenPool"), announce.Event.Info).Return(nil) action, err := sh.HandleDefinitionBroadcast(context.Background(), bs, msg, data, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionWait, CustomCorrelator: pool.ID}, action) @@ -215,7 +217,7 @@ func TestHandleDefinitionBroadcastTokenPoolActivateFail(t *testing.T) { mdi.On("UpsertTokenPool", context.Background(), mock.MatchedBy(func(p *fftypes.TokenPool) bool { return *p.ID == *pool.ID && p.Message == msg.Header.ID })).Return(nil) - mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*fftypes.TokenPool"), mock.AnythingOfType("*fftypes.BlockchainEvent")).Return(fmt.Errorf("pop")) + mam.On("ActivateTokenPool", context.Background(), mock.AnythingOfType("*fftypes.TokenPool"), announce.Event.Info).Return(fmt.Errorf("pop")) action, err := sh.HandleDefinitionBroadcast(context.Background(), bs, msg, data, fftypes.NewUUID()) assert.Equal(t, HandlerResult{Action: ActionWait, CustomCorrelator: pool.ID}, action) diff --git a/internal/events/aggregator.go b/internal/events/aggregator.go index cf42308142..364f830362 100644 --- a/internal/events/aggregator.go +++ b/internal/events/aggregator.go @@ -20,6 +20,9 @@ import ( "context" "crypto/sha256" "database/sql/driver" + "fmt" + "strings" + "time" "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/data" @@ -31,6 +34,7 @@ import ( "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/karlseguin/ccache" ) const ( @@ -45,11 +49,17 @@ type aggregator struct { data data.Manager eventPoller *eventPoller verifierType fftypes.VerifierType - newPins chan int64 - rewindBatches chan *fftypes.UUID - queuedRewinds chan *fftypes.UUID + rewindBatches chan fftypes.UUID + queuedRewinds chan fftypes.UUID retry *retry.Retry metrics metrics.Manager + batchCache *ccache.Cache + batchCacheTTL time.Duration +} + +type batchCacheEntry struct { + batch *fftypes.BatchPersisted + manifest *fftypes.BatchManifest } func newAggregator(ctx context.Context, di database.Plugin, bi blockchain.Plugin, sh definitions.DefinitionHandlers, im identity.Manager, dm data.Manager, en *eventNotifier, mm metrics.Manager) *aggregator { @@ -61,11 +71,16 @@ func newAggregator(ctx context.Context, di database.Plugin, bi blockchain.Plugin identity: im, data: dm, verifierType: bi.VerifierType(), - newPins: make(chan int64), - rewindBatches: make(chan *fftypes.UUID, 1), // hops to queuedRewinds with a shouldertab on the event poller - queuedRewinds: make(chan *fftypes.UUID, batchSize), + rewindBatches: make(chan fftypes.UUID, 1), // hops to queuedRewinds with a shouldertab on the event poller + queuedRewinds: make(chan fftypes.UUID, batchSize), metrics: mm, + batchCacheTTL: config.GetDuration(config.BatchCacheTTL), } + ag.batchCache = ccache.New( + // We use a LRU cache with a size-aware max + ccache.Configure(). + MaxSize(config.GetByteSize(config.BatchCacheSize)), + ) firstEvent := fftypes.SubOptsFirstEvent(config.GetString(config.EventAggregatorFirstEvent)) ag.eventPoller = newEventPoller(ctx, di, en, &eventPollerConf{ eventBatchSize: batchSize, @@ -111,18 +126,20 @@ func (ag *aggregator) batchRewindListener() { } func (ag *aggregator) rewindOffchainBatches() (rewind bool, offset int64) { + l := log.L(ag.ctx) + var batchIDs []driver.Value + draining := true + for draining { + select { + case batchID := <-ag.queuedRewinds: + batchIDs = append(batchIDs, batchID) + l.Debugf("Rewinding for batch %s", &batchID) + default: + draining = false + } + } // Retry idefinitely for database errors (until the context closes) _ = ag.retry.Do(ag.ctx, "check for off-chain batch deliveries", func(attempt int) (retry bool, err error) { - var batchIDs []driver.Value - draining := true - for draining { - select { - case batchID := <-ag.queuedRewinds: - batchIDs = append(batchIDs, batchID) - default: - draining = false - } - } if len(batchIDs) > 0 { fb := database.PinQueryFactory.NewFilter(ag.ctx) filter := fb.And( @@ -135,8 +152,7 @@ func (ag *aggregator) rewindOffchainBatches() (rewind bool, offset int64) { } if len(sequences) > 0 { rewind = true - offset = sequences[0].Sequence - 1 - log.L(ag.ctx).Debugf("Rewinding for off-chain data arrival. New local pin sequence %d", offset) + offset = sequences[0].Sequence - 1 // offset is set to the last event we saw (so one behind the next pin we want) } } return false, nil @@ -183,7 +199,8 @@ func (ag *aggregator) processPinsEventsHandler(items []fftypes.LocallySequenced) }) } -func (ag *aggregator) getPins(ctx context.Context, filter database.Filter) ([]fftypes.LocallySequenced, error) { +func (ag *aggregator) getPins(ctx context.Context, filter database.Filter, offset int64) ([]fftypes.LocallySequenced, error) { + log.L(ctx).Tracef("Reading page of pins > %d (first pin would be %d)", offset, offset+1) pins, _, err := ag.database.GetPins(ctx, filter) ls := make([]fftypes.LocallySequenced, len(pins)) for i, p := range pins { @@ -192,67 +209,141 @@ func (ag *aggregator) getPins(ctx context.Context, filter database.Filter) ([]ff return ls, err } -func (ag *aggregator) extractBatchMessagePin(batch *fftypes.Batch, requiredIndex int64) (totalBatchPins int64, msg *fftypes.Message, msgBaseIndex int64) { - for _, batchMsg := range batch.Payload.Messages { +func (ag *aggregator) extractBatchMessagePin(manifest *fftypes.BatchManifest, requiredIndex int64) (totalBatchPins int64, msgEntry *fftypes.MessageManifestEntry, msgBaseIndex int64) { + for _, batchMsg := range manifest.Messages { batchMsgBaseIdx := totalBatchPins - for i := 0; i < len(batchMsg.Header.Topics); i++ { + for i := 0; i < batchMsg.Topics; i++ { if totalBatchPins == requiredIndex { - msg = batchMsg + msgEntry = batchMsg msgBaseIndex = batchMsgBaseIdx } totalBatchPins++ } } - return totalBatchPins, msg, msgBaseIndex + return totalBatchPins, msgEntry, msgBaseIndex +} + +func (ag *aggregator) migrateManifest(ctx context.Context, persistedBatch *fftypes.BatchPersisted) *fftypes.BatchManifest { + // In version v0.13.x and earlier, we stored the full batch + var fullPayload fftypes.BatchPayload + err := persistedBatch.Manifest.Unmarshal(ctx, &fullPayload) + if err != nil { + log.L(ctx).Errorf("Invalid migration persisted batch: %s", err) + return nil + } + if len(fullPayload.Messages) == 0 { + log.L(ctx).Errorf("Invalid migration persisted batch: no payload") + return nil + } + + return persistedBatch.GenManifest(fullPayload.Messages, fullPayload.Data) +} + +func (ag *aggregator) extractManifest(ctx context.Context, batch *fftypes.BatchPersisted) *fftypes.BatchManifest { + + var manifest fftypes.BatchManifest + err := batch.Manifest.Unmarshal(ctx, &manifest) + if err != nil { + log.L(ctx).Errorf("Invalid manifest: %s", err) + return nil + } + switch manifest.Version { + case fftypes.ManifestVersionUnset: + return ag.migrateManifest(ctx, batch) + case fftypes.ManifestVersion1: + return &manifest + default: + log.L(ctx).Errorf("Invalid manifest version: %d", manifest.Version) + return nil + } +} + +func (ag *aggregator) getBatchCacheKey(id *fftypes.UUID, hash *fftypes.Bytes32) string { + return fmt.Sprintf("%s/%s", id, hash) +} + +func (ag *aggregator) GetBatchForPin(ctx context.Context, pin *fftypes.Pin) (*fftypes.BatchPersisted, *fftypes.BatchManifest, error) { + cacheKey := ag.getBatchCacheKey(pin.Batch, pin.BatchHash) + cached := ag.batchCache.Get(cacheKey) + if cached != nil { + cached.Extend(ag.batchCacheTTL) + bce := cached.Value().(*batchCacheEntry) + log.L(ag.ctx).Debugf("Batch cache hit %s", cacheKey) + return bce.batch, bce.manifest, nil + } + batch, err := ag.database.GetBatchByID(ctx, pin.Batch) + if err != nil { + return nil, nil, err + } + if batch == nil { + return nil, nil, nil + } + if !batch.Hash.Equals(pin.BatchHash) { + log.L(ctx).Errorf("Batch %s hash does not match the pin. OffChain=%s OnChain=%s", pin.Batch, batch.Hash, pin.Hash) + return nil, nil, nil + } + manifest := ag.extractManifest(ctx, batch) + if manifest == nil { + log.L(ctx).Errorf("Batch %s manifest could not be extracted - pin %s is parked", pin.Batch, pin.Hash) + return nil, nil, nil + } + ag.cacheBatch(cacheKey, batch, manifest) + return batch, manifest, nil +} + +func (ag *aggregator) cacheBatch(cacheKey string, batch *fftypes.BatchPersisted, manifest *fftypes.BatchManifest) { + bce := &batchCacheEntry{ + batch: batch, + manifest: manifest, + } + ag.batchCache.Set(cacheKey, bce, ag.batchCacheTTL) + log.L(ag.ctx).Debugf("Cached batch %s", cacheKey) } func (ag *aggregator) processPins(ctx context.Context, pins []*fftypes.Pin, state *batchState) (err error) { l := log.L(ctx) // Keep a batch cache for this list of pins - var batch *fftypes.Batch + var batch *fftypes.BatchPersisted + var manifest *fftypes.BatchManifest // As messages can have multiple topics, we need to avoid processing the message twice in the same poll loop. // We must check all the contexts in the message, and mark them dispatched together. dupMsgCheck := make(map[fftypes.UUID]bool) for _, pin := range pins { - if batch == nil || *batch.ID != *pin.Batch { - batch, err = ag.database.GetBatchByID(ctx, pin.Batch) + if batch == nil || !batch.ID.Equals(pin.Batch) { + batch, manifest, err = ag.GetBatchForPin(ctx, pin) if err != nil { return err } if batch == nil { - l.Debugf("Batch %s not available - pin %s is parked", pin.Batch, pin.Hash) + l.Debugf("Pin %.10d batch unavailable: batch=%s pinIndex=%d hash=%s masked=%t", pin.Sequence, pin.Batch, pin.Index, pin.Hash, pin.Masked) continue } } // Extract the message from the batch - where the index is of a topic within a message - batchPinCount, msg, msgBaseIndex := ag.extractBatchMessagePin(batch, pin.Index) - if msg == nil { + batchPinCount, msgEntry, msgBaseIndex := ag.extractBatchMessagePin(manifest, pin.Index) + if msgEntry == nil { l.Errorf("Pin %.10d outside of range: batch=%s pinCount=%d pinIndex=%d hash=%s masked=%t", pin.Sequence, pin.Batch, batchPinCount, pin.Index, pin.Hash, pin.Masked) continue } - l.Debugf("Aggregating pin %.10d batch=%s msg=%s pinIndex=%d msgBaseIndex=%d hash=%s masked=%t", pin.Sequence, pin.Batch, msg.Header.ID, pin.Index, msgBaseIndex, pin.Hash, pin.Masked) - if msg.Header.ID == nil { - l.Errorf("null message entry %d in batch '%s'", pin.Index, batch.ID) + l.Debugf("Aggregating pin %.10d batch=%s msg=%s pinIndex=%d msgBaseIndex=%d hash=%s masked=%t", pin.Sequence, pin.Batch, msgEntry.ID, pin.Index, msgBaseIndex, pin.Hash, pin.Masked) + if dupMsgCheck[*msgEntry.ID] { continue } - if dupMsgCheck[*msg.Header.ID] { - continue - } - dupMsgCheck[*msg.Header.ID] = true + dupMsgCheck[*msgEntry.ID] = true // Attempt to process the message (only returns errors for database persistence issues) - err := ag.processMessage(ctx, batch, pin, msgBaseIndex, msg, state) + err := ag.processMessage(ctx, manifest, pin, msgBaseIndex, msgEntry, state) if err != nil { return err } } - err = ag.eventPoller.commitOffset(ctx, pins[len(pins)-1].Sequence) - return err + ag.eventPoller.commitOffset(pins[len(pins)-1].Sequence) + return nil } func (ag *aggregator) checkOnchainConsistency(ctx context.Context, msg *fftypes.Message, pin *fftypes.Pin) (valid bool, err error) { @@ -295,9 +386,24 @@ func (ag *aggregator) checkOnchainConsistency(ctx context.Context, msg *fftypes. return true, nil } -func (ag *aggregator) processMessage(ctx context.Context, batch *fftypes.Batch, pin *fftypes.Pin, msgBaseIndex int64, msg *fftypes.Message, state *batchState) (err error) { +func (ag *aggregator) processMessage(ctx context.Context, manifest *fftypes.BatchManifest, pin *fftypes.Pin, msgBaseIndex int64, msgEntry *fftypes.MessageManifestEntry, state *batchState) (err error) { l := log.L(ctx) + var cro data.CacheReadOption + if pin.Masked { + cro = data.CRORequirePins + } else { + cro = data.CRORequirePublicBlobRefs + } + msg, data, dataAvailable, err := ag.data.GetMessageWithDataCached(ctx, msgEntry.ID, cro) + if err != nil { + return err + } + if !dataAvailable { + l.Errorf("Message '%s' in batch '%s' is missing data", msgEntry.ID, manifest.ID) + return nil + } + // Check if it's ready to be processed unmaskedContexts := make([]*fftypes.Bytes32, 0, len(msg.Header.Topics)) nextPins := make([]*nextPinState, 0, len(msg.Header.Topics)) @@ -305,17 +411,24 @@ func (ag *aggregator) processMessage(ctx context.Context, batch *fftypes.Batch, // Private messages have one or more masked "pin" hashes that allow us to work // out if it's the next message in the sequence, given the previous messages if msg.Header.Group == nil || len(msg.Pins) == 0 || len(msg.Header.Topics) != len(msg.Pins) { - l.Errorf("Message '%s' in batch '%s' has invalid pin data pins=%v topics=%v", msg.Header.ID, batch.ID, msg.Pins, msg.Header.Topics) + l.Errorf("Message '%s' in batch '%s' has invalid pin data pins=%v topics=%v", msg.Header.ID, manifest.ID, msg.Pins, msg.Header.Topics) return nil } for i, pinStr := range msg.Pins { var msgContext fftypes.Bytes32 - err := msgContext.UnmarshalText([]byte(pinStr)) + pinSplit := strings.Split(pinStr, ":") + nonceStr := "" + if len(pinSplit) > 1 { + // We introduced a "HASH:NONCE" syntax into the pin strings, to aid debug, but the inclusion of the + // nonce after the hash is not necessary. + nonceStr = pinSplit[1] + } + err := msgContext.UnmarshalText([]byte(pinSplit[0])) if err != nil { - l.Errorf("Message '%s' in batch '%s' has invalid pin at index %d: '%s'", msg.Header.ID, batch.ID, i, pinStr) + l.Errorf("Message '%s' in batch '%s' has invalid pin at index %d: '%s'", msg.Header.ID, manifest.ID, i, pinStr) return nil } - nextPin, err := state.CheckMaskedContextReady(ctx, msg, msg.Header.Topics[i], pin.Sequence, &msgContext) + nextPin, err := state.CheckMaskedContextReady(ctx, msg, msg.Header.Topics[i], pin.Sequence, &msgContext, nonceStr) if err != nil || nextPin == nil { return err } @@ -335,10 +448,14 @@ func (ag *aggregator) processMessage(ctx context.Context, batch *fftypes.Batch, } - l.Debugf("Attempt dispatch msg=%s broadcastContexts=%v privatePins=%v", msg.Header.ID, unmaskedContexts, msg.Pins) - dispatched, err := ag.attemptMessageDispatch(ctx, msg, batch.Payload.TX.ID, state, pin) - if err != nil { - return err + dispatched := false + var newState fftypes.MessageState + if dataAvailable { + l.Debugf("Attempt dispatch msg=%s broadcastContexts=%v privatePins=%v", msg.Header.ID, unmaskedContexts, msg.Pins) + newState, dispatched, err = ag.attemptMessageDispatch(ctx, msg, data, manifest.TX.ID, state, pin) + if err != nil { + return err + } } // Mark all message pins dispatched true/false @@ -348,7 +465,7 @@ func (ag *aggregator) processMessage(ctx context.Context, batch *fftypes.Batch, for _, np := range nextPins { np.IncrementNextPin(ctx) } - state.MarkMessageDispatched(ctx, batch.ID, msg, msgBaseIndex) + state.MarkMessageDispatched(ctx, manifest.ID, msg, msgBaseIndex, newState) } else { for _, unmaskedContext := range unmaskedContexts { state.SetContextBlockedBy(ctx, *unmaskedContext, pin.Sequence) @@ -358,22 +475,16 @@ func (ag *aggregator) processMessage(ctx context.Context, batch *fftypes.Batch, return nil } -func (ag *aggregator) attemptMessageDispatch(ctx context.Context, msg *fftypes.Message, tx *fftypes.UUID, state *batchState, pin *fftypes.Pin) (bool, error) { - - // If we don't find all the data, then we don't dispatch - data, foundAll, err := ag.data.GetMessageData(ctx, msg, true) - if err != nil || !foundAll { - return false, err - } +func (ag *aggregator) attemptMessageDispatch(ctx context.Context, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID, state *batchState, pin *fftypes.Pin) (newState fftypes.MessageState, valid bool, err error) { // Check the pin signer is valid for the message if valid, err := ag.checkOnchainConsistency(ctx, msg, pin); err != nil || !valid { - return false, err + return "", false, err } // Verify we have all the blobs for the data if resolved, err := ag.resolveBlobs(ctx, data); err != nil || !resolved { - return false, err + return "", false, err } // For transfers, verify the transfer has come through @@ -384,26 +495,27 @@ func (ag *aggregator) attemptMessageDispatch(ctx context.Context, msg *fftypes.M ) if transfers, _, err := ag.database.GetTokenTransfers(ctx, filter); err != nil || len(transfers) == 0 { log.L(ctx).Debugf("Transfer for message %s not yet available", msg.Header.ID) - return false, err + return "", false, err } else if !msg.Hash.Equals(transfers[0].MessageHash) { log.L(ctx).Errorf("Message hash %s does not match hash recorded in transfer: %s", msg.Hash, transfers[0].MessageHash) - return false, nil + return "", false, nil } } // Validate the message data - valid := true + valid = true var customCorrelator *fftypes.UUID switch { case msg.Header.Type == fftypes.MessageTypeDefinition: // We handle definition events in-line on the aggregator, as it would be confusing for apps to be // dispatched subsequent events before we have processed the definition events they depend on. handlerResult, err := ag.definitions.HandleDefinitionBroadcast(ctx, state, msg, data, tx) + log.L(ctx).Infof("Result of definition broadcast '%s' [%s]: %s", msg.Header.Tag, msg.Header.ID, handlerResult.Action) if handlerResult.Action == definitions.ActionRetry { - return false, err + return "", false, err } if handlerResult.Action == definitions.ActionWait { - return false, nil + return "", false, nil } customCorrelator = handlerResult.CustomCorrelator valid = handlerResult.Action == definitions.ActionConfirm @@ -414,51 +526,44 @@ func (ag *aggregator) attemptMessageDispatch(ctx context.Context, msg *fftypes.M case len(msg.Data) > 0: valid, err = ag.data.ValidateAll(ctx, data) if err != nil { - return false, err + return "", false, err } } - status := fftypes.MessageStateConfirmed + newState = fftypes.MessageStateConfirmed eventType := fftypes.EventTypeMessageConfirmed if valid { state.pendingConfirms[*msg.Header.ID] = msg } else { - status = fftypes.MessageStateRejected + newState = fftypes.MessageStateRejected eventType = fftypes.EventTypeMessageRejected } state.AddFinalize(func(ctx context.Context) error { - // This message is now confirmed - setConfirmed := database.MessageQueryFactory.NewUpdate(ctx). - Set("confirmed", fftypes.Now()). // the timestamp of the aggregator provides ordering - Set("state", status) // mark if the message was confirmed or rejected - if err = ag.database.UpdateMessage(ctx, msg.Header.ID, setConfirmed); err != nil { - return err - } - - // Generate the appropriate event - event := fftypes.NewEvent(eventType, msg.Header.Namespace, msg.Header.ID, tx) - event.Correlator = msg.Header.CID - if customCorrelator != nil { - // Definition handlers can set a custom event correlator (such as a token pool ID) - event.Correlator = customCorrelator - } - if err = ag.database.InsertEvent(ctx, event); err != nil { - return err + // Generate the appropriate event - one per topic (events cover a single topic) + for _, topic := range msg.Header.Topics { + event := fftypes.NewEvent(eventType, msg.Header.Namespace, msg.Header.ID, tx, topic) + event.Correlator = msg.Header.CID + if customCorrelator != nil { + // Definition handlers can set a custom event correlator (such as a token pool ID) + event.Correlator = customCorrelator + } + if err = ag.database.InsertEvent(ctx, event); err != nil { + return err + } } - log.L(ctx).Infof("Emitting %s %s for message %s:%s (correlator=%v)", eventType, event.ID, msg.Header.Namespace, msg.Header.ID, event.Correlator) return nil }) if ag.metrics.IsMetricsEnabled() { ag.metrics.MessageConfirmed(msg, eventType) } - return true, nil + return newState, true, nil } // resolveBlobs ensures that the blobs for all the attachments in the data array, have been received into the // local data exchange blob store. Either because of a private transfer, or by downloading them from the shared storage -func (ag *aggregator) resolveBlobs(ctx context.Context, data []*fftypes.Data) (resolved bool, err error) { +func (ag *aggregator) resolveBlobs(ctx context.Context, data fftypes.DataArray) (resolved bool, err error) { l := log.L(ctx) for _, d := range data { @@ -476,22 +581,9 @@ func (ag *aggregator) resolveBlobs(ctx context.Context, data []*fftypes.Data) (r continue } - // If there's a public reference, download it from there and stream it into the blob store - // We double check the hash on the way, to ensure the streaming from A->B worked ok. - if d.Blob.Public != "" { - blob, err = ag.data.CopyBlobPStoDX(ctx, d) - if err != nil { - return false, err - } - if blob != nil { - l.Debugf("Blob '%s' downloaded from shared storage to local DX with ref '%s'", blob.Hash, blob.PayloadRef) - continue - } - } - // If we've reached here, the data isn't available yet. // This isn't an error, we just need to wait for it to arrive. - l.Debugf("Blob '%s' not available", d.Blob.Hash) + l.Debugf("Blob '%s' not available for data %s", d.Blob.Hash, d.ID) return false, nil } @@ -499,3 +591,44 @@ func (ag *aggregator) resolveBlobs(ctx context.Context, data []*fftypes.Data) (r return true, nil } + +func (ag *aggregator) rewindForBlobArrival(ctx context.Context, blobHash *fftypes.Bytes32) error { + + batchIDs := make(map[fftypes.UUID]bool) + + // We need to work out what pins potentially are unblocked by the arrival of this data + + // Find any data associated with this blob + var data []*fftypes.DataRef + filter := database.DataQueryFactory.NewFilter(ctx).Eq("blob.hash", blobHash) + data, _, err := ag.database.GetDataRefs(ctx, filter) + if err != nil { + return err + } + + // Find the messages assocated with that data + var messages []*fftypes.Message + for _, data := range data { + fb := database.MessageQueryFactory.NewFilter(ctx) + filter := fb.And(fb.Eq("confirmed", nil)) + messages, _, err = ag.database.GetMessagesForData(ctx, data.ID, filter) + if err != nil { + return err + } + } + + // Find the unique batch IDs for all the messages + for _, msg := range messages { + if msg.BatchID != nil { + batchIDs[*msg.BatchID] = true + } + } + + // Initiate rewinds for all the batchIDs that are potentially completed by the arrival of this data + for bid := range batchIDs { + var batchID = bid // cannot use the address of the loop var + log.L(ag.ctx).Infof("Batch '%s' contains reference to received blob %s", &bid, blobHash) + ag.rewindBatches <- batchID + } + return nil +} diff --git a/internal/events/aggregator_batch_state.go b/internal/events/aggregator_batch_state.go index 00820b2e67..7bba0826eb 100644 --- a/internal/events/aggregator_batch_state.go +++ b/internal/events/aggregator_batch_state.go @@ -26,6 +26,7 @@ import ( "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/sirupsen/logrus" ) func newBatchState(ag *aggregator) *batchState { @@ -71,7 +72,9 @@ type dispatchedMessage struct { batchID *fftypes.UUID msgID *fftypes.UUID firstPinIndex int64 - lastPinIndex int64 + topicCount int + msgPins fftypes.FFStringArray + newState fftypes.MessageState } // batchState is the object that tracks the in-memory state that builds up while processing a batch of pins, @@ -175,7 +178,7 @@ func (bs *batchState) CheckUnmaskedContextReady(ctx context.Context, contextUnma } -func (bs *batchState) CheckMaskedContextReady(ctx context.Context, msg *fftypes.Message, topic string, firstMsgPinSequence int64, pin *fftypes.Bytes32) (*nextPinState, error) { +func (bs *batchState) CheckMaskedContextReady(ctx context.Context, msg *fftypes.Message, topic string, firstMsgPinSequence int64, pin *fftypes.Bytes32, nonceStr string) (*nextPinState, error) { l := log.L(ctx) // For masked pins, we can only process if: @@ -197,7 +200,7 @@ func (bs *batchState) CheckMaskedContextReady(ctx context.Context, msg *fftypes. } // This message must be the next hash for the author - l.Debugf("Group=%s Topic='%s' Sequence=%d Pin=%s NextPins=%v", msg.Header.Group, topic, firstMsgPinSequence, pin, npg.nextPins) + l.Debugf("Group=%s Topic='%s' Sequence=%d Pin=%s", msg.Header.Group, topic, firstMsgPinSequence, pin) var nextPin *fftypes.NextPin for _, np := range npg.nextPins { if *np.Hash == *pin { @@ -206,7 +209,12 @@ func (bs *batchState) CheckMaskedContextReady(ctx context.Context, msg *fftypes. } } if nextPin == nil || nextPin.Identity != msg.Header.Author { - l.Warnf("Mismatched nexthash or author group=%s topic=%s context=%s pin=%s nextHash=%+v author=%s", msg.Header.Group, topic, contextUnmasked, pin, nextPin, msg.Header.Author) + if logrus.IsLevelEnabled(logrus.DebugLevel) { + for _, np := range npg.nextPins { + l.Debugf("NextPin: context=%s author=%s nonce=%d hash=%s", np.Context, np.Identity, np.Nonce, np.Hash) + } + } + l.Warnf("Mismatched nexthash or author msg=%s group=%s topic=%s context=%s pin=%s nonce=%s nextHash=%+v author=%s", msg.Header.ID, msg.Header.Group, topic, contextUnmasked, pin, nonceStr, nextPin, msg.Header.Author) return nil, nil } return &nextPinState{ @@ -215,12 +223,14 @@ func (bs *batchState) CheckMaskedContextReady(ctx context.Context, msg *fftypes. }, err } -func (bs *batchState) MarkMessageDispatched(ctx context.Context, batchID *fftypes.UUID, msg *fftypes.Message, msgBaseIndex int64) { +func (bs *batchState) MarkMessageDispatched(ctx context.Context, batchID *fftypes.UUID, msg *fftypes.Message, msgBaseIndex int64, newState fftypes.MessageState) { bs.dispatchedMessages = append(bs.dispatchedMessages, &dispatchedMessage{ batchID: batchID, msgID: msg.Header.ID, firstPinIndex: msgBaseIndex, - lastPinIndex: msgBaseIndex + int64(len(msg.Header.Topics)) - 1, + topicCount: len(msg.Header.Topics), + msgPins: msg.Pins, + newState: newState, }) } @@ -236,6 +246,7 @@ func (bs *batchState) SetContextBlockedBy(ctx context.Context, unmaskedContext f } func (bs *batchState) flushPins(ctx context.Context) error { + l := log.L(ctx) // Update all the next pins for _, npg := range bs.maskedContexts { @@ -260,20 +271,49 @@ func (bs *batchState) flushPins(ctx context.Context) error { // using the index range of pins it owns within the batch it is a part of. // Note that this might include pins not in the batch we read from the database, as the page size // cannot be guaranteed to overlap with the set of indexes of a message within a batch. + pinsDispatched := make(map[fftypes.UUID][]driver.Value) + msgStateUpdates := make(map[fftypes.MessageState][]driver.Value) for _, dm := range bs.dispatchedMessages { + batchDispatched := pinsDispatched[*dm.batchID] + l.Debugf("Marking message dispatched batch=%s msg=%s firstIndex=%d topics=%d pins=%s", dm.batchID, dm.msgID, dm.firstPinIndex, dm.topicCount, dm.msgPins) + for i := 0; i < dm.topicCount; i++ { + batchDispatched = append(batchDispatched, dm.firstPinIndex+int64(i)) + } + if len(batchDispatched) > 0 { + pinsDispatched[*dm.batchID] = batchDispatched + } + msgStateUpdates[dm.newState] = append(msgStateUpdates[dm.newState], dm.msgID) + } + + // Build one uber update for DB efficiency + if len(pinsDispatched) > 0 { fb := database.PinQueryFactory.NewFilter(ctx) - filter := fb.And( - fb.Eq("batch", dm.batchID), - fb.Gte("index", dm.firstPinIndex), - fb.Lte("index", dm.lastPinIndex), - ) - log.L(ctx).Debugf("Marking message dispatched batch=%s msg=%s firstIndex=%d lastIndex=%d", dm.batchID, dm.msgID, dm.firstPinIndex, dm.lastPinIndex) + filter := fb.Or() + for batchID, indexes := range pinsDispatched { + filter.Condition(fb.And( + fb.Eq("batch", batchID), + fb.In("index", indexes), + )) + } update := database.PinQueryFactory.NewUpdate(ctx).Set("dispatched", true) if err := bs.database.UpdatePins(ctx, filter, update); err != nil { return err } } + // Also do the same for each type of state update, to mark messages dispatched with a new state + confirmTime := fftypes.Now() // All messages get the same confirmed timestamp the Events (not Messages directly) should be used for confirm sequence + for msgState, msgIDs := range msgStateUpdates { + fb := database.MessageQueryFactory.NewFilter(ctx) + filter := fb.In("id", msgIDs) + setConfirmed := database.MessageQueryFactory.NewUpdate(ctx). + Set("confirmed", confirmTime). + Set("state", msgState) + if err := bs.database.UpdateMessages(ctx, filter, setConfirmed); err != nil { + return err + } + } + return nil } diff --git a/internal/events/aggregator_batch_state_test.go b/internal/events/aggregator_batch_state_test.go index c35ec69783..52404889e7 100644 --- a/internal/events/aggregator_batch_state_test.go +++ b/internal/events/aggregator_batch_state_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/mock" ) -func TestFlushPinsFail(t *testing.T) { +func TestFlushPinsFailUpdatePins(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() bs := newBatchState(ag) @@ -36,9 +36,32 @@ func TestFlushPinsFail(t *testing.T) { bs.MarkMessageDispatched(ag.ctx, fftypes.NewUUID(), &fftypes.Message{ Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), + ID: fftypes.NewUUID(), + Topics: fftypes.FFStringArray{"topic1"}, }, - }, 0) + Pins: fftypes.FFStringArray{"pin1"}, + }, 0, fftypes.MessageStateConfirmed) + + err := bs.flushPins(ag.ctx) + assert.Regexp(t, "pop", err) +} + +func TestFlushPinsFailUpdateMessages(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + bs := newBatchState(ag) + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("UpdatePins", ag.ctx, mock.Anything, mock.Anything).Return(nil) + mdi.On("UpdateMessages", ag.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + + bs.MarkMessageDispatched(ag.ctx, fftypes.NewUUID(), &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Topics: fftypes.FFStringArray{"topic1"}, + }, + Pins: fftypes.FFStringArray{"pin1"}, + }, 0, fftypes.MessageStateConfirmed) err := bs.flushPins(ag.ctx) assert.Regexp(t, "pop", err) diff --git a/internal/events/aggregator_test.go b/internal/events/aggregator_test.go index 7b30307079..ffd608f98a 100644 --- a/internal/events/aggregator_test.go +++ b/internal/events/aggregator_test.go @@ -19,10 +19,12 @@ package events import ( "context" "crypto/sha256" + "encoding/json" "fmt" "testing" "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/definitions" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/mocks/blockchainmocks" @@ -38,6 +40,7 @@ import ( ) func newTestAggregatorCommon(metrics bool) (*aggregator, func()) { + config.Reset() mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} msh := &definitionsmocks.DefinitionHandlers{} @@ -62,6 +65,62 @@ func newTestAggregator() (*aggregator, func()) { return newTestAggregatorCommon(false) } +func newTestManifest(mType fftypes.MessageType, groupID *fftypes.Bytes32) (*fftypes.Message, *fftypes.Message, *fftypes.Identity, *fftypes.BatchManifest) { + org1 := newTestOrg("org1") + + msg1 := &fftypes.Message{ + Header: fftypes.MessageHeader{ + Type: mType, + ID: fftypes.NewUUID(), + Namespace: "any", + Group: groupID, + Topics: fftypes.FFStringArray{"topic1"}, + SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, + }, + Data: fftypes.DataRefs{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, + }, + } + msg2 := &fftypes.Message{ + Header: fftypes.MessageHeader{ + Type: mType, + ID: fftypes.NewUUID(), + Group: groupID, + Namespace: "any", + Topics: fftypes.FFStringArray{"topic1"}, + SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, + }, + Data: fftypes.DataRefs{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, + }, + } + + return msg1, msg2, org1, &fftypes.BatchManifest{ + Version: 1, + ID: fftypes.NewUUID(), + TX: fftypes.TransactionRef{ + Type: fftypes.TransactionTypeBatchPin, + ID: fftypes.NewUUID(), + }, + Messages: []*fftypes.MessageManifestEntry{ + { + MessageRef: fftypes.MessageRef{ + ID: msg1.Header.ID, + Hash: msg1.Hash, + }, + Topics: len(msg1.Header.Topics), + }, + { + MessageRef: fftypes.MessageRef{ + ID: msg2.Header.ID, + Hash: msg2.Hash, + }, + Topics: len(msg2.Header.Topics), + }, + }, + } +} + func TestAggregationMaskedZeroNonceMatch(t *testing.T) { ag, cancel := newTestAggregatorWithMetrics() @@ -95,9 +154,10 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { Value: member2key, }).Return(member2org, nil) - // Get the batch - mdi.On("GetBatchByID", ag.ctx, batchID).Return(&fftypes.Batch{ - ID: batchID, + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -111,14 +171,18 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { Key: member2key, }, }, - Pins: []string{member2NonceZero.String()}, + Pins: []string{fmt.Sprintf("%s:%.9d", member2NonceZero, 0)}, Data: fftypes.DataRefs{ {ID: fftypes.NewUUID()}, }, }, }, }, - }, nil) + } + bp, _ := batch.Confirmed() + + // Get the batch + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) // Look for existing nextpins - none found, first on context mdi.On("GetNextPins", ag.ctx, mock.Anything).Return([]*fftypes.NextPin{}, nil, nil).Once() // Get the group members @@ -144,7 +208,7 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { return *np.Hash == *member2NonceOne && np.Nonce == 1 })).Return(nil).Once() // Validate the message is ok - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePins).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) // Insert the confirmed event mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { @@ -153,7 +217,7 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { // Set the pin to dispatched mdi.On("UpdatePins", ag.ctx, mock.Anything, mock.Anything).Return(nil) // Update the message - mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.MatchedBy(func(u database.Update) bool { + mdi.On("UpdateMessages", ag.ctx, mock.Anything, mock.MatchedBy(func(u database.Update) bool { update, err := u.Finalize() assert.NoError(t, err) assert.Len(t, update.SetOperations, 2) @@ -170,8 +234,6 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { return true })).Return(nil) - // Confirm the offset - mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) err := ag.processPins(ag.ctx, []*fftypes.Pin{ { @@ -191,6 +253,9 @@ func TestAggregationMaskedZeroNonceMatch(t *testing.T) { assert.NotNil(t, bs.GetPendingConfirm()[*msgID]) + // Confirm the offset + assert.Equal(t, int64(10001), <-ag.eventPoller.offsetCommitted) + mdi.AssertExpectations(t) mdm.AssertExpectations(t) } @@ -232,9 +297,10 @@ func TestAggregationMaskedNextSequenceMatch(t *testing.T) { rag.ReturnArguments = mock.Arguments{a[1].(func(context.Context) error)(a[0].(context.Context))} } - // Get the batch - mdi.On("GetBatchByID", ag.ctx, batchID).Return(&fftypes.Batch{ - ID: batchID, + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -255,14 +321,18 @@ func TestAggregationMaskedNextSequenceMatch(t *testing.T) { }, }, }, - }, nil) + } + bp, _ := batch.Confirmed() + + // Get the batch + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) // Look for existing nextpins - none found, first on context mdi.On("GetNextPins", ag.ctx, mock.Anything).Return([]*fftypes.NextPin{ {Context: contextUnmasked, Identity: member1org.DID, Hash: member1Nonce100, Nonce: 100, Sequence: 929}, {Context: contextUnmasked, Identity: member2org.DID, Hash: member2Nonce500, Nonce: 500, Sequence: 424}, }, nil, nil).Once() // Validate the message is ok - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePins).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) // Insert the confirmed event mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { @@ -284,9 +354,7 @@ func TestAggregationMaskedNextSequenceMatch(t *testing.T) { // Set the pin to dispatched mdi.On("UpdatePins", ag.ctx, mock.Anything, mock.Anything).Return(nil) // Update the message - mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(nil) - // Confirm the offset - mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mdi.On("UpdateMessages", ag.ctx, mock.Anything, mock.Anything).Return(nil) _, err := ag.processPinsEventsHandler([]fftypes.LocallySequenced{ &fftypes.Pin{ @@ -301,6 +369,9 @@ func TestAggregationMaskedNextSequenceMatch(t *testing.T) { }) assert.NoError(t, err) + // Confirm the offset + assert.Equal(t, int64(10001), <-ag.eventPoller.offsetCommitted) + mdi.AssertExpectations(t) mdm.AssertExpectations(t) } @@ -330,9 +401,98 @@ func TestAggregationBroadcast(t *testing.T) { Value: member1key, }).Return(member1org, nil) + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, + Payload: fftypes.BatchPayload{ + Messages: []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + ID: msgID, + Topics: []string{topic}, + Namespace: "ns1", + SignerRef: fftypes.SignerRef{ + Author: member1org.DID, + Key: member1key, + }, + }, + Data: fftypes.DataRefs{ + {ID: fftypes.NewUUID()}, + }, + }, + }, + }, + } + bp, _ := batch.Confirmed() + // Get the batch - mdi.On("GetBatchByID", ag.ctx, batchID).Return(&fftypes.Batch{ - ID: batchID, + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) + // Do not resolve any pins earlier + mdi.On("GetPins", mock.Anything, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) + // Validate the message is ok + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePublicBlobRefs).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) + mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) + // Insert the confirmed event + mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { + return *e.Reference == *msgID && e.Type == fftypes.EventTypeMessageConfirmed + })).Return(nil) + // Set the pin to dispatched + mdi.On("UpdatePins", ag.ctx, mock.Anything, mock.Anything).Return(nil) + // Update the message + mdi.On("UpdateMessages", ag.ctx, mock.Anything, mock.Anything).Return(nil) + + err := ag.processPins(ag.ctx, []*fftypes.Pin{ + { + Sequence: 10001, + Hash: contextUnmasked, + Batch: batchID, + Index: 0, + Signer: member1key, + Dispatched: false, + }, + }, bs) + assert.NoError(t, err) + + err = bs.RunFinalize(ag.ctx) + assert.NoError(t, err) + + // Confirm the offset + assert.Equal(t, int64(10001), <-ag.eventPoller.offsetCommitted) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestAggregationMigratedBroadcast(t *testing.T) { + + ag, cancel := newTestAggregator() + defer cancel() + bs := newBatchState(ag) + + // Generate some pin data + member1org := newTestOrg("org1") + member1key := "0x12345" + topic := "some-topic" + batchID := fftypes.NewUUID() + msgID := fftypes.NewUUID() + h := sha256.New() + h.Write([]byte(topic)) + contextUnmasked := fftypes.HashResult(h) + + mdi := ag.database.(*databasemocks.Plugin) + mdm := ag.data.(*datamocks.Manager) + mim := ag.identity.(*identitymanagermocks.Manager) + + mim.On("FindIdentityForVerifier", ag.ctx, []fftypes.IdentityType{fftypes.IdentityTypeOrg, fftypes.IdentityTypeCustom}, "ns1", &fftypes.VerifierRef{ + Type: fftypes.VerifierTypeEthAddress, + Value: member1key, + }).Return(member1org, nil) + + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -351,11 +511,21 @@ func TestAggregationBroadcast(t *testing.T) { }, }, }, - }, nil) + } + payloadBinary, err := json.Marshal(&batch.Payload) + assert.NoError(t, err) + bp := &fftypes.BatchPersisted{ + TX: batch.Payload.TX, + BatchHeader: batch.BatchHeader, + Manifest: fftypes.JSONAnyPtr(string(payloadBinary)), + } + + // Get the batch + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) // Do not resolve any pins earlier mdi.On("GetPins", mock.Anything, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) // Validate the message is ok - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, batch.Payload.Messages[0].Header.ID, data.CRORequirePublicBlobRefs).Return(batch.Payload.Messages[0], fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) // Insert the confirmed event mdi.On("InsertEvent", ag.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { @@ -364,9 +534,141 @@ func TestAggregationBroadcast(t *testing.T) { // Set the pin to dispatched mdi.On("UpdatePins", ag.ctx, mock.Anything, mock.Anything).Return(nil) // Update the message - mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(nil) + mdi.On("UpdateMessages", ag.ctx, mock.Anything, mock.Anything).Return(nil) + + err = ag.processPins(ag.ctx, []*fftypes.Pin{ + { + Sequence: 10001, + Hash: contextUnmasked, + Batch: batchID, + Index: 0, + Signer: member1key, + Dispatched: false, + }, + }, bs) + assert.NoError(t, err) + + err = bs.RunFinalize(ag.ctx) + assert.NoError(t, err) + + // Confirm the offset + assert.Equal(t, int64(10001), <-ag.eventPoller.offsetCommitted) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestAggregationMigratedBroadcastNilMessageID(t *testing.T) { + + ag, cancel := newTestAggregator() + defer cancel() + bs := newBatchState(ag) + + // Generate some pin data + member1org := newTestOrg("org1") + member1key := "0x12345" + topic := "some-topic" + batchID := fftypes.NewUUID() + h := sha256.New() + h.Write([]byte(topic)) + contextUnmasked := fftypes.HashResult(h) + + mdi := ag.database.(*databasemocks.Plugin) + mdm := ag.data.(*datamocks.Manager) + mim := ag.identity.(*identitymanagermocks.Manager) + + mim.On("FindIdentityForVerifier", ag.ctx, []fftypes.IdentityType{fftypes.IdentityTypeOrg, fftypes.IdentityTypeCustom}, "ns1", &fftypes.VerifierRef{ + Type: fftypes.VerifierTypeEthAddress, + Value: member1key, + }).Return(member1org, nil) + + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, + Payload: fftypes.BatchPayload{ + Messages: []*fftypes.Message{{ + Header: fftypes.MessageHeader{ + Topics: fftypes.FFStringArray{"topic1"}, + }, + }}, + }, + } + payloadBinary, err := json.Marshal(&batch.Payload) + assert.NoError(t, err) + bp := &fftypes.BatchPersisted{ + TX: batch.Payload.TX, + BatchHeader: batch.BatchHeader, + Manifest: fftypes.JSONAnyPtr(string(payloadBinary)), + } + + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) + + err = ag.processPins(ag.ctx, []*fftypes.Pin{ + { + Sequence: 10001, + Hash: contextUnmasked, + Batch: batchID, + Index: 0, + Signer: member1key, + Dispatched: false, + }, + }, bs) + assert.NoError(t, err) + + err = bs.RunFinalize(ag.ctx) + assert.NoError(t, err) + // Confirm the offset - mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + assert.Equal(t, int64(10001), <-ag.eventPoller.offsetCommitted) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestAggregationMigratedBroadcastInvalid(t *testing.T) { + + ag, cancel := newTestAggregator() + defer cancel() + bs := newBatchState(ag) + + // Generate some pin data + member1org := newTestOrg("org1") + member1key := "0x12345" + topic := "some-topic" + batchID := fftypes.NewUUID() + h := sha256.New() + h.Write([]byte(topic)) + contextUnmasked := fftypes.HashResult(h) + + mdi := ag.database.(*databasemocks.Plugin) + mdm := ag.data.(*datamocks.Manager) + mim := ag.identity.(*identitymanagermocks.Manager) + + mim.On("FindIdentityForVerifier", ag.ctx, []fftypes.IdentityType{fftypes.IdentityTypeOrg, fftypes.IdentityTypeCustom}, "ns1", &fftypes.VerifierRef{ + Type: fftypes.VerifierTypeEthAddress, + Value: member1key, + }).Return(member1org, nil) + + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, + Payload: fftypes.BatchPayload{ + Messages: []*fftypes.Message{{ + Header: fftypes.MessageHeader{ + Topics: fftypes.FFStringArray{"topic1"}, + }, + }}, + }, + } + bp := &fftypes.BatchPersisted{ + TX: batch.Payload.TX, + BatchHeader: batch.BatchHeader, + Manifest: fftypes.JSONAnyPtr("{}"), + } + + mdi.On("GetBatchByID", ag.ctx, batchID).Return(bp, nil) err := ag.processPins(ag.ctx, []*fftypes.Pin{ { @@ -383,6 +685,9 @@ func TestAggregationBroadcast(t *testing.T) { err = bs.RunFinalize(ag.ctx) assert.NoError(t, err) + // Confirm the offset + assert.Equal(t, int64(10001), <-ag.eventPoller.offsetCommitted) + mdi.AssertExpectations(t) mdm.AssertExpectations(t) } @@ -434,7 +739,7 @@ func TestGetPins(t *testing.T) { {Sequence: 12345}, }, nil, nil) - lc, err := ag.getPins(ag.ctx, database.EventQueryFactory.NewFilter(ag.ctx).Gte("sequence", 12345)) + lc, err := ag.getPins(ag.ctx, database.EventQueryFactory.NewFilter(ag.ctx).Gte("sequence", 12345), 12345) assert.NoError(t, err) assert.Equal(t, int64(12345), lc[0].LocalSequence()) } @@ -446,13 +751,15 @@ func TestProcessPinsMissingBatch(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(nil, nil) - mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) err := ag.processPins(ag.ctx, []*fftypes.Pin{ {Sequence: 12345, Batch: fftypes.NewUUID()}, }, bs) assert.NoError(t, err) + // Confirm the offset + assert.Equal(t, int64(12345), <-ag.eventPoller.offsetCommitted) + } func TestProcessPinsMissingNoMsg(t *testing.T) { @@ -460,16 +767,20 @@ func TestProcessPinsMissingNoMsg(t *testing.T) { defer cancel() bs := newBatchState(ag) - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(&fftypes.Batch{ - ID: fftypes.NewUUID(), + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ {Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}}, }, }, - }, nil) - mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + } + bp, _ := batch.Confirmed() + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(bp, nil) err := ag.processPins(ag.ctx, []*fftypes.Pin{ {Sequence: 12345, Batch: fftypes.NewUUID(), Index: 25}, @@ -477,6 +788,9 @@ func TestProcessPinsMissingNoMsg(t *testing.T) { assert.NoError(t, err) mdi.AssertExpectations(t) + // Confirm the offset + assert.Equal(t, int64(12345), <-ag.eventPoller.offsetCommitted) + } func TestProcessPinsBadMsgHeader(t *testing.T) { @@ -484,9 +798,10 @@ func TestProcessPinsBadMsgHeader(t *testing.T) { defer cancel() bs := newBatchState(ag) - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(&fftypes.Batch{ - ID: fftypes.NewUUID(), + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ {Header: fftypes.MessageHeader{ @@ -495,13 +810,20 @@ func TestProcessPinsBadMsgHeader(t *testing.T) { }}, }, }, - }, nil) - mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + } + bp, _ := batch.Confirmed() + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(bp, nil) err := ag.processPins(ag.ctx, []*fftypes.Pin{ {Sequence: 12345, Batch: fftypes.NewUUID(), Index: 0}, }, bs) assert.NoError(t, err) + + // Confirm the offset + assert.Equal(t, int64(12345), <-ag.eventPoller.offsetCommitted) + mdi.AssertExpectations(t) } @@ -512,9 +834,10 @@ func TestProcessSkipDupMsg(t *testing.T) { bs := newBatchState(ag) batchID := fftypes.NewUUID() - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(&fftypes.Batch{ - ID: batchID, + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ {Header: fftypes.MessageHeader{ @@ -523,18 +846,29 @@ func TestProcessSkipDupMsg(t *testing.T) { }}, }, }, - }, nil).Once() + } + bp, _ := batch.Confirmed() + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(bp, nil).Once() mdi.On("GetPins", mock.Anything, mock.Anything).Return([]*fftypes.Pin{ {Sequence: 1111}, // blocks the context }, nil, nil) - mdi.On("UpdateOffset", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePublicBlobRefs).Return(batch.Payload.Messages[0], nil, true, nil) err := ag.processPins(ag.ctx, []*fftypes.Pin{ {Sequence: 12345, Batch: batchID, Index: 0, Hash: fftypes.NewRandB32()}, {Sequence: 12345, Batch: batchID, Index: 1, Hash: fftypes.NewRandB32()}, }, bs) assert.NoError(t, err) + + // Confirm the offset + assert.Equal(t, int64(12345), <-ag.eventPoller.offsetCommitted) + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) } @@ -544,9 +878,10 @@ func TestProcessMsgFailGetPins(t *testing.T) { bs := newBatchState(ag) batchID := fftypes.NewUUID() - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(&fftypes.Batch{ - ID: batchID, + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ {Header: fftypes.MessageHeader{ @@ -555,39 +890,91 @@ func TestProcessMsgFailGetPins(t *testing.T) { }}, }, }, - }, nil).Once() + } + bp, _ := batch.Confirmed() + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", ag.ctx, mock.Anything).Return(bp, nil).Once() mdi.On("GetPins", mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePublicBlobRefs).Return(batch.Payload.Messages[0], nil, true, nil) + err := ag.processPins(ag.ctx, []*fftypes.Pin{ {Sequence: 12345, Batch: batchID, Index: 0, Hash: fftypes.NewRandB32()}, }, bs) assert.EqualError(t, err, "pop") mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestProcessMsgFailData(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(nil, nil, false, fmt.Errorf("pop")) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{}, nil) + assert.Regexp(t, "pop", err) + + mdm.AssertExpectations(t) +} + +func TestProcessMsgFailMissingData(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(&fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}}, nil, false, nil) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{}, nil) + assert.NoError(t, err) + + mdm.AssertExpectations(t) } func TestProcessMsgFailMissingGroup(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - err := ag.processMessage(ag.ctx, &fftypes.Batch{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.Message{}, nil) + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(&fftypes.Message{Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}}, nil, true, nil) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{}, nil) assert.NoError(t, err) + mdm.AssertExpectations(t) } func TestProcessMsgFailBadPin(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - err := ag.processMessage(ag.ctx, &fftypes.Batch{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.Message{ + msg := &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), Group: fftypes.NewRandB32(), Topics: fftypes.FFStringArray{"topic1"}, }, + Hash: fftypes.NewRandB32(), Pins: fftypes.FFStringArray{"!Wrong"}, + } + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(msg, nil, true, nil) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{ + MessageRef: fftypes.MessageRef{ + ID: msg.Header.ID, + Hash: msg.Hash, + }, + Topics: len(msg.Header.Topics), }, newBatchState(ag)) assert.NoError(t, err) + mdm.AssertExpectations(t) + } func TestProcessMsgFailGetNextPins(t *testing.T) { @@ -597,16 +984,30 @@ func TestProcessMsgFailGetNextPins(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetNextPins", ag.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) - err := ag.processMessage(ag.ctx, &fftypes.Batch{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.Message{ + msg := &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), Group: fftypes.NewRandB32(), Topics: fftypes.FFStringArray{"topic1"}, }, Pins: fftypes.FFStringArray{fftypes.NewRandB32().String()}, + } + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(msg, nil, true, nil) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Masked: true, Sequence: 12345}, 10, &fftypes.MessageManifestEntry{ + MessageRef: fftypes.MessageRef{ + ID: msg.Header.ID, + Hash: msg.Hash, + }, + Topics: len(msg.Header.Topics), }, newBatchState(ag)) assert.EqualError(t, err, "pop") + mdm.AssertExpectations(t) + mdi.AssertExpectations(t) + } func TestProcessMsgFailDispatch(t *testing.T) { @@ -615,18 +1016,36 @@ func TestProcessMsgFailDispatch(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetPins", ag.ctx, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) - err := ag.processMessage(ag.ctx, &fftypes.Batch{}, &fftypes.Pin{Sequence: 12345}, 10, &fftypes.Message{ + msg := &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), Topics: fftypes.FFStringArray{"topic1"}, + SignerRef: fftypes.SignerRef{ + Key: "0x12345", + }, }, Pins: fftypes.FFStringArray{fftypes.NewRandB32().String()}, + } + + mdm := ag.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePublicBlobRefs).Return(msg, nil, true, nil) + + mim := ag.identity.(*identitymanagermocks.Manager) + mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{}, &fftypes.Pin{Sequence: 12345, Signer: "0x12345"}, 10, &fftypes.MessageManifestEntry{ + MessageRef: fftypes.MessageRef{ + ID: msg.Header.ID, + Hash: msg.Hash, + }, + Topics: len(msg.Header.Topics), }, newBatchState(ag)) assert.EqualError(t, err, "pop") + mdm.AssertExpectations(t) + mdi.AssertExpectations(t) + } func TestProcessMsgFailPinUpdate(t *testing.T) { @@ -640,6 +1059,20 @@ func TestProcessMsgFailPinUpdate(t *testing.T) { mdm := ag.data.(*datamocks.Manager) mim := ag.identity.(*identitymanagermocks.Manager) + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Group: fftypes.NewRandB32(), + Topics: fftypes.FFStringArray{"topic1"}, + Namespace: "ns1", + SignerRef: fftypes.SignerRef{ + Author: org1.DID, + Key: "0x12345", + }, + }, + Pins: fftypes.FFStringArray{pin.String()}, + } + mim.On("FindIdentityForVerifier", ag.ctx, []fftypes.IdentityType{fftypes.IdentityTypeOrg, fftypes.IdentityTypeCustom}, "ns1", &fftypes.VerifierRef{ Type: fftypes.VerifierTypeEthAddress, Value: "0x12345", @@ -647,24 +1080,20 @@ func TestProcessMsgFailPinUpdate(t *testing.T) { mdi.On("GetNextPins", ag.ctx, mock.Anything).Return([]*fftypes.NextPin{ {Context: fftypes.NewRandB32(), Hash: pin, Identity: org1.DID}, }, nil, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything, data.CRORequirePins).Return(msg, nil, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(false, nil) mdi.On("InsertEvent", ag.ctx, mock.Anything).Return(nil) - mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(nil) + mdi.On("UpdateMessages", ag.ctx, mock.Anything, mock.Anything).Return(nil) mdi.On("UpdateNextPin", ag.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - err := ag.processMessage(ag.ctx, &fftypes.Batch{}, &fftypes.Pin{Masked: true, Sequence: 12345, Signer: "0x12345"}, 10, &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - Group: fftypes.NewRandB32(), - Topics: fftypes.FFStringArray{"topic1"}, - Namespace: "ns1", - SignerRef: fftypes.SignerRef{ - Author: org1.DID, - Key: "0x12345", - }, + err := ag.processMessage(ag.ctx, &fftypes.BatchManifest{ + ID: fftypes.NewUUID(), + }, &fftypes.Pin{Masked: true, Sequence: 12345, Signer: "0x12345"}, 10, &fftypes.MessageManifestEntry{ + MessageRef: fftypes.MessageRef{ + ID: msg.Header.ID, + Hash: msg.Hash, }, - Pins: fftypes.FFStringArray{pin.String()}, + Topics: len(msg.Header.Topics), }, bs) assert.NoError(t, err) @@ -686,14 +1115,16 @@ func TestCheckMaskedContextReadyMismatchedAuthor(t *testing.T) { bs := newBatchState(ag) _, err := bs.CheckMaskedContextReady(ag.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - Group: fftypes.NewRandB32(), + ID: fftypes.NewUUID(), + Group: fftypes.NewRandB32(), + Tag: fftypes.SystemTagDefineDatatype, + Topics: fftypes.FFStringArray{"topic1"}, SignerRef: fftypes.SignerRef{ Author: "author1", Key: "0x12345", }, }, - }, "topic1", 12345, fftypes.NewRandB32()) + }, "topic1", 12345, fftypes.NewRandB32(), "12345") assert.NoError(t, err) } @@ -908,20 +1339,6 @@ func TestAttemptContextInitInsertPinsFail(t *testing.T) { } -func TestAttemptMessageDispatchFailGetData(t *testing.T) { - ag, cancel := newTestAggregator() - defer cancel() - - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) - - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, - }, nil, nil, nil) - assert.EqualError(t, err, "pop") - -} - func TestAttemptMessageDispatchFailValidateData(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() @@ -931,15 +1348,15 @@ func TestAttemptMessageDispatchFailValidateData(t *testing.T) { org1 := newTestOrg("org1") mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(false, fmt.Errorf("pop")) - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ + _, _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}}, Data: fftypes.DataRefs{ {ID: fftypes.NewUUID()}, }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + }, fftypes.DataArray{}, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.EqualError(t, err, "pop") } @@ -950,25 +1367,21 @@ func TestAttemptMessageDispatchMissingBlobs(t *testing.T) { blobHash := fftypes.NewRandB32() - mdm := ag.data.(*datamocks.Manager) mim := ag.identity.(*identitymanagermocks.Manager) org1 := newTestOrg("org1") mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Blob: &fftypes.BlobRef{ - Hash: blobHash, - Public: "public-ref", - }}, - }, true, nil) mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetBlobMatchingHash", ag.ctx, blobHash).Return(nil, nil) - mdm.On("CopyBlobPStoDX", ag.ctx, mock.Anything).Return(nil, nil) - - dispatched, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ + _, dispatched, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}}, + }, fftypes.DataArray{ + {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Blob: &fftypes.BlobRef{ + Hash: blobHash, + Public: "public-ref", + }}, }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, dispatched) @@ -983,10 +1396,6 @@ func TestAttemptMessageDispatchMissingTransfers(t *testing.T) { org1 := newTestOrg("org1") mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetTokenTransfers", ag.ctx, mock.Anything).Return([]*fftypes.TokenTransfer{}, nil, nil) @@ -1001,11 +1410,10 @@ func TestAttemptMessageDispatchMissingTransfers(t *testing.T) { }, } msg.Hash = msg.Header.Hash() - dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, fftypes.DataArray{}, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, dispatched) - mdm.AssertExpectations(t) mdi.AssertExpectations(t) } @@ -1018,9 +1426,6 @@ func TestAttemptMessageDispatchGetTransfersFail(t *testing.T) { org1 := newTestOrg("org1") mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetTokenTransfers", ag.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) @@ -1032,11 +1437,10 @@ func TestAttemptMessageDispatchGetTransfersFail(t *testing.T) { }, } msg.Hash = msg.Header.Hash() - dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, fftypes.DataArray{}, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.EqualError(t, err, "pop") assert.False(t, dispatched) - mdm.AssertExpectations(t) mdi.AssertExpectations(t) } @@ -1063,17 +1467,14 @@ func TestAttemptMessageDispatchTransferMismatch(t *testing.T) { mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetTokenTransfers", ag.ctx, mock.Anything).Return(transfers, nil, nil) - dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, dispatched, err := ag.attemptMessageDispatch(ag.ctx, msg, fftypes.DataArray{}, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, dispatched) - mdm.AssertExpectations(t) + mim.AssertExpectations(t) mdi.AssertExpectations(t) } @@ -1093,10 +1494,10 @@ func TestDefinitionBroadcastActionRejectCustomCorrelator(t *testing.T) { Return(definitions.HandlerResult{Action: definitions.ActionReject, CustomCorrelator: customCorrelator}, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(fftypes.DataArray{}, true, nil) mdi := ag.database.(*databasemocks.Plugin) - mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.MatchedBy(func(u database.Update) bool { + mdi.On("UpdateMessages", ag.ctx, mock.Anything, mock.MatchedBy(func(u database.Update) bool { update, err := u.Finalize() assert.NoError(t, err) assert.Len(t, update.SetOperations, 2) @@ -1117,17 +1518,19 @@ func TestDefinitionBroadcastActionRejectCustomCorrelator(t *testing.T) { return event.Correlator.Equals(customCorrelator) })).Return(nil) - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ + _, _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ Type: fftypes.MessageTypeDefinition, ID: fftypes.NewUUID(), Namespace: "any", SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, + Tag: fftypes.SystemTagDefineDatatype, + Topics: fftypes.FFStringArray{"topic1"}, }, Data: fftypes.DataRefs{ {ID: fftypes.NewUUID()}, }, - }, nil, bs, &fftypes.Pin{Signer: "0x12345"}) + }, fftypes.DataArray{}, nil, bs, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) err = bs.RunFinalize(ag.ctx) assert.NoError(t, err) @@ -1144,10 +1547,10 @@ func TestDefinitionBroadcastInvalidSigner(t *testing.T) { mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(fftypes.DataArray{}, true, nil) mdi := ag.database.(*databasemocks.Plugin) - mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.MatchedBy(func(u database.Update) bool { + mdi.On("UpdateMessages", ag.ctx, mock.Anything, mock.MatchedBy(func(u database.Update) bool { update, err := u.Finalize() assert.NoError(t, err) assert.Len(t, update.SetOperations, 2) @@ -1166,7 +1569,7 @@ func TestDefinitionBroadcastInvalidSigner(t *testing.T) { })).Return(nil) mdi.On("InsertEvent", ag.ctx, mock.Anything).Return(nil) - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ + _, _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ Type: fftypes.MessageTypeDefinition, ID: fftypes.NewUUID(), @@ -1176,7 +1579,7 @@ func TestDefinitionBroadcastInvalidSigner(t *testing.T) { Data: fftypes.DataRefs{ {ID: fftypes.NewUUID()}, }, - }, nil, bs, &fftypes.Pin{Signer: "0x12345"}) + }, fftypes.DataArray{}, nil, bs, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) } @@ -1185,49 +1588,24 @@ func TestDispatchBroadcastQueuesLaterDispatch(t *testing.T) { defer cancel() bs := newBatchState(ag) - org1 := newTestOrg("org1") + msg1, msg2, org1, manifest := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, false, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID, data.CRORequirePublicBlobRefs).Return(msg1, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID, data.CRORequirePublicBlobRefs).Return(msg2, fftypes.DataArray{}, true, nil).Once() mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetPins", ag.ctx, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) - msg1 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - } - msg2 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - } - - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{msg1, msg2}, - }, - } - // First message should dispatch - err := ag.processMessage(ag.ctx, batch, &fftypes.Pin{Sequence: 12345}, 0, msg1, bs) + err := ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Sequence: 12345}, 0, manifest.Messages[0], bs) assert.NoError(t, err) // Second message should not (mocks have Once limit on GetMessageData to confirm) - err = ag.processMessage(ag.ctx, batch, &fftypes.Pin{Sequence: 12346}, 0, msg1, bs) + err = ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Sequence: 12346}, 0, manifest.Messages[1], bs) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -1239,15 +1617,16 @@ func TestDispatchPrivateQueuesLaterDispatch(t *testing.T) { defer cancel() bs := newBatchState(ag) - org1 := newTestOrg("org1") + groupID := fftypes.NewRandB32() + msg1, msg2, org1, manifest := newTestManifest(fftypes.MessageTypePrivate, groupID) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, false, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID, data.CRORequirePins).Return(msg1, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID, data.CRORequirePins).Return(msg2, fftypes.DataArray{}, true, nil).Once() - groupID := fftypes.NewRandB32() initNPG := &nextPinGroupState{topic: "topic1", groupID: groupID} member1NonceOne := initNPG.calcPinHash("org1", 1) member1NonceTwo := initNPG.calcPinHash("org1", 2) @@ -1260,46 +1639,15 @@ func TestDispatchPrivateQueuesLaterDispatch(t *testing.T) { {Context: context, Nonce: 1 /* match member1NonceOne */, Identity: org1.DID, Hash: member1NonceOne}, }, nil, nil) - msg1 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypePrivate, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: org1.DID, - }, - }, - Pins: fftypes.FFStringArray{member1NonceOne.String()}, - } - msg2 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypePrivate, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: org1.DID, - }, - }, - Pins: fftypes.FFStringArray{member1NonceTwo.String()}, - } - - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{msg1, msg2}, - }, - } + msg1.Pins = fftypes.FFStringArray{member1NonceOne.String()} + msg2.Pins = fftypes.FFStringArray{member1NonceTwo.String()} // First message should dispatch - err := ag.processMessage(ag.ctx, batch, &fftypes.Pin{Masked: true, Sequence: 12345}, 0, msg1, bs) + err := ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Masked: true, Sequence: 12345}, 0, manifest.Messages[0], bs) assert.NoError(t, err) // Second message should not (mocks have Once limit on GetMessageData to confirm) - err = ag.processMessage(ag.ctx, batch, &fftypes.Pin{Masked: true, Sequence: 12346}, 0, msg2, bs) + err = ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Masked: true, Sequence: 12346}, 0, manifest.Messages[1], bs) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -1311,15 +1659,17 @@ func TestDispatchPrivateNextPinIncremented(t *testing.T) { defer cancel() bs := newBatchState(ag) - org1 := newTestOrg("org1") + groupID := fftypes.NewRandB32() + msg1, msg2, org1, manifest := newTestManifest(fftypes.MessageTypePrivate, groupID) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil).Twice() + mdm.On("GetMessageWithDataCached", ag.ctx, msg1.Header.ID, data.CRORequirePins).Return(msg1, fftypes.DataArray{}, true, nil).Once() + mdm.On("GetMessageWithDataCached", ag.ctx, msg2.Header.ID, data.CRORequirePins).Return(msg2, fftypes.DataArray{}, true, nil).Once() + mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) - groupID := fftypes.NewRandB32() initNPG := &nextPinGroupState{topic: "topic1", groupID: groupID} member1NonceOne := initNPG.calcPinHash(org1.DID, 1) member1NonceTwo := initNPG.calcPinHash(org1.DID, 2) @@ -1332,48 +1682,15 @@ func TestDispatchPrivateNextPinIncremented(t *testing.T) { {Context: context, Nonce: 1 /* match member1NonceOne */, Identity: org1.DID, Hash: member1NonceOne}, }, nil, nil) - msg1 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypePrivate, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: org1.DID, - Key: "0x12345", - }, - }, - Pins: fftypes.FFStringArray{member1NonceOne.String()}, - } - msg2 := &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypePrivate, - ID: fftypes.NewUUID(), - Namespace: "any", - Topics: fftypes.FFStringArray{"topic1"}, - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: org1.DID, - Key: "0x12345", - }, - }, - Pins: fftypes.FFStringArray{member1NonceTwo.String()}, - } - - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{msg1, msg2}, - }, - } + msg1.Pins = fftypes.FFStringArray{member1NonceOne.String()} + msg2.Pins = fftypes.FFStringArray{member1NonceTwo.String()} // First message should dispatch - err := ag.processMessage(ag.ctx, batch, &fftypes.Pin{Masked: true, Sequence: 12345, Signer: "0x12345"}, 0, msg1, bs) + err := ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Masked: true, Sequence: 12345, Signer: "0x12345"}, 0, manifest.Messages[0], bs) assert.NoError(t, err) // Second message should dispatch too (Twice on GetMessageData) - err = ag.processMessage(ag.ctx, batch, &fftypes.Pin{Masked: true, Sequence: 12346, Signer: "0x12345"}, 0, msg2, bs) + err = ag.processMessage(ag.ctx, manifest, &fftypes.Pin{Masked: true, Sequence: 12346, Signer: "0x12345"}, 0, manifest.Messages[1], bs) assert.NoError(t, err) mdi.AssertExpectations(t) @@ -1384,7 +1701,7 @@ func TestDefinitionBroadcastActionRetry(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") + msg1, _, org1, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) @@ -1393,19 +1710,9 @@ func TestDefinitionBroadcastActionRetry(t *testing.T) { msh.On("HandleDefinitionBroadcast", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(definitions.HandlerResult{Action: definitions.ActionRetry}, fmt.Errorf("pop")) mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageWithDataCached", ag.ctx, mock.Anything).Return(msg1, fftypes.DataArray{}, true, nil) - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, _, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.EqualError(t, err, "pop") } @@ -1414,29 +1721,15 @@ func TestDefinitionBroadcastRejectSignerLookupFail(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") - - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + msg1, _, _, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) - valid, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, valid, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.Regexp(t, "pop", err) assert.False(t, valid) - mdm.AssertExpectations(t) mim.AssertExpectations(t) } @@ -1444,29 +1737,15 @@ func TestDefinitionBroadcastRejectSignerLookupWrongOrg(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") - - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + msg1, _, _, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(newTestOrg("org2"), nil) - valid, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, valid, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, valid) - mdm.AssertExpectations(t) mim.AssertExpectations(t) } @@ -1474,22 +1753,10 @@ func TestDefinitionBroadcastRejectBadSigner(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") + msg1, _, org1, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) + msg1.Header.SignerRef = fftypes.SignerRef{Key: "0x23456", Author: org1.DID} - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - - valid, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x23456", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, valid, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, valid) @@ -1499,7 +1766,8 @@ func TestDefinitionBroadcastRejectUnregisteredSignerIdentityClaim(t *testing.T) ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") + msg1, _, _, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) + msg1.Header.Tag = fftypes.SystemTagIdentityClaim mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) @@ -1507,34 +1775,35 @@ func TestDefinitionBroadcastRejectUnregisteredSignerIdentityClaim(t *testing.T) msh := ag.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("HandleDefinitionBroadcast", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(definitions.HandlerResult{Action: definitions.ActionWait}, nil) - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - - valid, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - Tag: fftypes.SystemTagIdentityClaim, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, valid, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) assert.False(t, valid) mim.AssertExpectations(t) msh.AssertExpectations(t) - mdm.AssertExpectations(t) +} + +func TestDefinitionBroadcastRootUnregisteredOk(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + msg1, _, _, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) + + mim := ag.identity.(*identitymanagermocks.Manager) + mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + + _, valid, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + assert.NoError(t, err) + assert.False(t, valid) + + mim.AssertExpectations(t) } func TestDefinitionBroadcastActionWait(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - org1 := newTestOrg("org1") + msg1, _, org1, _ := newTestManifest(fftypes.MessageTypeDefinition, nil) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) @@ -1542,48 +1811,40 @@ func TestDefinitionBroadcastActionWait(t *testing.T) { msh := ag.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("HandleDefinitionBroadcast", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(definitions.HandlerResult{Action: definitions.ActionWait}, nil) - mdm := ag.data.(*datamocks.Manager) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ - Type: fftypes.MessageTypeDefinition, - ID: fftypes.NewUUID(), - Namespace: "any", - SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, - }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, - }, - }, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) + _, _, err := ag.attemptMessageDispatch(ag.ctx, msg1, nil, nil, &batchState{}, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) + mim.AssertExpectations(t) + msh.AssertExpectations(t) + } func TestAttemptMessageDispatchEventFail(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() bs := newBatchState(ag) - org1 := newTestOrg("org1") + msg1, _, org1, _ := newTestManifest(fftypes.MessageTypeBroadcast, nil) mdi := ag.database.(*databasemocks.Plugin) mdm := ag.data.(*datamocks.Manager) mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) - mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", ag.ctx, mock.Anything).Return(fmt.Errorf("pop")) - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}}, + _, _, err := ag.attemptMessageDispatch(ag.ctx, msg1, fftypes.DataArray{ + &fftypes.Data{ID: msg1.Data[0].ID}, }, nil, bs, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) err = bs.RunFinalize(ag.ctx) assert.EqualError(t, err, "pop") + mim.AssertExpectations(t) + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) + } func TestAttemptMessageDispatchGroupInit(t *testing.T) { @@ -1597,53 +1858,27 @@ func TestAttemptMessageDispatchGroupInit(t *testing.T) { mim := ag.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return(fftypes.DataArray{}, true, nil) mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) - mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", ag.ctx, mock.Anything).Return(nil) - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ + _, _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ ID: fftypes.NewUUID(), Type: fftypes.MessageTypeGroupInit, SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}, }, - }, nil, bs, &fftypes.Pin{Signer: "0x12345"}) + }, nil, nil, bs, &fftypes.Pin{Signer: "0x12345"}) assert.NoError(t, err) } -func TestAttemptMessageUpdateMessageFail(t *testing.T) { - ag, cancel := newTestAggregator() - defer cancel() - bs := newBatchState(ag) - org1 := newTestOrg("org1") - - mdi := ag.database.(*databasemocks.Plugin) - mdm := ag.data.(*datamocks.Manager) - mim := ag.identity.(*identitymanagermocks.Manager) - - mim.On("FindIdentityForVerifier", ag.ctx, mock.Anything, mock.Anything, mock.Anything).Return(org1, nil) - mdm.On("GetMessageData", ag.ctx, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) - mdm.On("ValidateAll", ag.ctx, mock.Anything).Return(true, nil) - mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - - _, err := ag.attemptMessageDispatch(ag.ctx, &fftypes.Message{ - Header: fftypes.MessageHeader{ID: fftypes.NewUUID(), SignerRef: fftypes.SignerRef{Key: "0x12345", Author: org1.DID}}, - }, nil, bs, &fftypes.Pin{Signer: "0x12345"}) - assert.NoError(t, err) - - err = bs.RunFinalize(ag.ctx) - assert.EqualError(t, err, "pop") - -} - func TestRewindOffchainBatchesNoBatches(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() mdi := ag.database.(*databasemocks.Plugin) - mdi.On("UpdateMessage", ag.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + mdi.On("UpdateMessages", ag.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) rewind, offset := ag.rewindOffchainBatches() assert.False(t, rewind) @@ -1657,10 +1892,10 @@ func TestRewindOffchainBatchesBatchesNoRewind(t *testing.T) { defer cancel() go ag.batchRewindListener() - ag.rewindBatches <- fftypes.NewUUID() - ag.rewindBatches <- fftypes.NewUUID() - ag.rewindBatches <- fftypes.NewUUID() - ag.rewindBatches <- fftypes.NewUUID() + ag.rewindBatches <- *fftypes.NewUUID() + ag.rewindBatches <- *fftypes.NewUUID() + ag.rewindBatches <- *fftypes.NewUUID() + ag.rewindBatches <- *fftypes.NewUUID() mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetPins", ag.ctx, mock.Anything, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) @@ -1677,10 +1912,10 @@ func TestRewindOffchainBatchesBatchesRewind(t *testing.T) { defer cancel() go ag.batchRewindListener() - ag.rewindBatches <- fftypes.NewUUID() - ag.rewindBatches <- fftypes.NewUUID() - ag.rewindBatches <- fftypes.NewUUID() - ag.rewindBatches <- fftypes.NewUUID() + ag.rewindBatches <- *fftypes.NewUUID() + ag.rewindBatches <- *fftypes.NewUUID() + ag.rewindBatches <- *fftypes.NewUUID() + ag.rewindBatches <- *fftypes.NewUUID() mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetPins", ag.ctx, mock.Anything, mock.Anything).Return([]*fftypes.Pin{ @@ -1698,7 +1933,7 @@ func TestRewindOffchainBatchesBatchesError(t *testing.T) { ag, cancel := newTestAggregator() cancel() - ag.queuedRewinds <- fftypes.NewUUID() + ag.queuedRewinds <- *fftypes.NewUUID() mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetPins", ag.ctx, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) @@ -1711,7 +1946,7 @@ func TestResolveBlobsNoop(t *testing.T) { ag, cancel := newTestAggregator() defer cancel() - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{}}, }) @@ -1726,7 +1961,7 @@ func TestResolveBlobsErrorGettingHash(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetBlobMatchingHash", ag.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), }}, @@ -1743,7 +1978,7 @@ func TestResolveBlobsNotFoundPrivate(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetBlobMatchingHash", ag.ctx, mock.Anything).Return(nil, nil) - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), }}, @@ -1760,7 +1995,7 @@ func TestResolveBlobsFoundPrivate(t *testing.T) { mdi := ag.database.(*databasemocks.Plugin) mdi.On("GetBlobMatchingHash", ag.ctx, mock.Anything).Return(&fftypes.Blob{}, nil) - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ + resolved, err := ag.resolveBlobs(ag.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), }}, @@ -1770,69 +2005,6 @@ func TestResolveBlobsFoundPrivate(t *testing.T) { assert.True(t, resolved) } -func TestResolveBlobsCopyNotFound(t *testing.T) { - ag, cancel := newTestAggregator() - defer cancel() - - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBlobMatchingHash", ag.ctx, mock.Anything).Return(nil, nil) - - mdm := ag.data.(*datamocks.Manager) - mdm.On("CopyBlobPStoDX", ag.ctx, mock.Anything).Return(nil, nil) - - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ - {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ - Hash: fftypes.NewRandB32(), - Public: "public-ref", - }}, - }) - - assert.NoError(t, err) - assert.False(t, resolved) -} - -func TestResolveBlobsCopyFail(t *testing.T) { - ag, cancel := newTestAggregator() - defer cancel() - - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBlobMatchingHash", ag.ctx, mock.Anything).Return(nil, nil) - - mdm := ag.data.(*datamocks.Manager) - mdm.On("CopyBlobPStoDX", ag.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ - {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ - Hash: fftypes.NewRandB32(), - Public: "public-ref", - }}, - }) - - assert.EqualError(t, err, "pop") - assert.False(t, resolved) -} - -func TestResolveBlobsCopyOk(t *testing.T) { - ag, cancel := newTestAggregator() - defer cancel() - - mdi := ag.database.(*databasemocks.Plugin) - mdi.On("GetBlobMatchingHash", ag.ctx, mock.Anything).Return(nil, nil) - - mdm := ag.data.(*datamocks.Manager) - mdm.On("CopyBlobPStoDX", ag.ctx, mock.Anything).Return(&fftypes.Blob{}, nil) - - resolved, err := ag.resolveBlobs(ag.ctx, []*fftypes.Data{ - {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{ - Hash: fftypes.NewRandB32(), - Public: "public-ref", - }}, - }) - - assert.NoError(t, err) - assert.True(t, resolved) -} - func TestBatchActions(t *testing.T) { prefinalizeCalled := false finalizeCalled := false @@ -1912,3 +2084,86 @@ func TestProcessWithBatchActionsSuccess(t *testing.T) { }) assert.NoError(t, err) } + +func TestExtractManifestFail(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + manifest := ag.extractManifest(ag.ctx, &fftypes.BatchPersisted{ + Manifest: fftypes.JSONAnyPtr("!wrong"), + }) + + assert.Nil(t, manifest) +} + +func TestExtractManifestBadVersion(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + manifest := ag.extractManifest(ag.ctx, &fftypes.BatchPersisted{ + Manifest: fftypes.JSONAnyPtr(`{"version":999}`), + }) + + assert.Nil(t, manifest) +} + +func TestMigrateManifestFail(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + manifest := ag.migrateManifest(ag.ctx, &fftypes.BatchPersisted{ + Manifest: fftypes.JSONAnyPtr("!wrong"), + }) + + assert.Nil(t, manifest) +} + +func TestBatchCaching(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + persisted, expectedManifest := batch.Confirmed() + + pin := &fftypes.Pin{ + Batch: batch.ID, + BatchHash: batch.Hash, + } + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", ag.ctx, batch.ID).Return(persisted, nil).Once() // to prove caching + + batchRetrieved, manifest, err := ag.GetBatchForPin(ag.ctx, pin) + assert.NoError(t, err) + assert.Equal(t, persisted, batchRetrieved) + assert.Equal(t, expectedManifest, manifest) + + batchRetrieved, manifest, err = ag.GetBatchForPin(ag.ctx, pin) + assert.NoError(t, err) + assert.Equal(t, persisted, batchRetrieved) + assert.Equal(t, expectedManifest, manifest) + +} + +func TestGetBatchForPinHashMismatch(t *testing.T) { + ag, cancel := newTestAggregator() + defer cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + persisted, _ := batch.Confirmed() + pin := &fftypes.Pin{ + Batch: batch.ID, + BatchHash: fftypes.NewRandB32(), + } + + mdi := ag.database.(*databasemocks.Plugin) + mdi.On("GetBatchByID", ag.ctx, batch.ID).Return(persisted, nil) + + batchRetrieved, manifest, err := ag.GetBatchForPin(ag.ctx, pin) + assert.Nil(t, batchRetrieved) + assert.Nil(t, manifest) + assert.Nil(t, err) + +} diff --git a/internal/events/batch_pin_complete.go b/internal/events/batch_pin_complete.go index 8505c3a317..1a0d8985c7 100644 --- a/internal/events/batch_pin_complete.go +++ b/internal/events/batch_pin_complete.go @@ -18,8 +18,6 @@ package events import ( "context" - "encoding/json" - "io" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/pkg/blockchain" @@ -48,24 +46,34 @@ func (em *eventManager) BatchPinComplete(bi blockchain.Plugin, batchPin *blockch }() log.L(em.ctx).Tracef("BatchPinComplete batch=%s info: %+v", batchPin.BatchID, batchPin.Event.Info) - if batchPin.BatchPayloadRef != "" { - return em.handleBroadcastPinComplete(batchPin, signingKey) - } - return em.handlePrivatePinComplete(batchPin, signingKey) -} - -func (em *eventManager) handlePrivatePinComplete(batchPin *blockchain.BatchPin, signingKey *fftypes.VerifierRef) error { // Here we simple record all the pins as parked, and emit an event for the aggregator // to check whether the messages in the batch have been written. - return em.retry.Do(em.ctx, "persist private batch pins", func(attempt int) (bool, error) { + return em.retry.Do(em.ctx, "persist batch pins", func(attempt int) (bool, error) { // We process the batch into the DB as a single transaction (if transactions are supported), both for // efficiency and to minimize the chance of duplicates (although at-least-once delivery is the core model) err := em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { - err := em.persistBatchTransaction(ctx, batchPin) - if err == nil { - err = em.persistContexts(ctx, batchPin, signingKey, true) + if err := em.persistBatchTransaction(ctx, batchPin); err != nil { + return err } - return err + chainEvent := buildBlockchainEvent(batchPin.Namespace, nil, &batchPin.Event, &fftypes.TransactionRef{ + Type: fftypes.TransactionTypeBatchPin, + ID: batchPin.TransactionID, + }) + if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { + return err + } + em.emitBlockchainEventMetric(batchPin.Event) + private := batchPin.BatchPayloadRef == "" + if err := em.persistContexts(ctx, batchPin, signingKey, private); err != nil { + return err + } + // Kick off a download for broadcast batches + if !private { + if err := em.sharedDownload.InitiateDownloadBatch(ctx, batchPin.Namespace, batchPin.TransactionID, batchPin.BatchPayloadRef); err != nil { + return err + } + } + return nil }) return err != nil, err // retry indefinitely (until context closes) }) @@ -77,64 +85,31 @@ func (em *eventManager) persistBatchTransaction(ctx context.Context, batchPin *b } func (em *eventManager) persistContexts(ctx context.Context, batchPin *blockchain.BatchPin, signingKey *fftypes.VerifierRef, private bool) error { + pins := make([]*fftypes.Pin, len(batchPin.Contexts)) for idx, hash := range batchPin.Contexts { - if err := em.database.UpsertPin(ctx, &fftypes.Pin{ - Masked: private, - Hash: hash, - Batch: batchPin.BatchID, - Index: int64(idx), - Signer: signingKey.Value, // We don't store the type as we can infer that from the blockchain - Created: fftypes.Now(), - }); err != nil { - return err + pins[idx] = &fftypes.Pin{ + Masked: private, + Hash: hash, + Batch: batchPin.BatchID, + BatchHash: batchPin.BatchHash, + Index: int64(idx), + Signer: signingKey.Value, // We don't store the type as we can infer that from the blockchain + Created: fftypes.Now(), } } - return nil -} - -func (em *eventManager) handleBroadcastPinComplete(batchPin *blockchain.BatchPin, signingKey *fftypes.VerifierRef) error { - var body io.ReadCloser - if err := em.retry.Do(em.ctx, "retrieve data", func(attempt int) (retry bool, err error) { - body, err = em.sharedstorage.RetrieveData(em.ctx, batchPin.BatchPayloadRef) - return err != nil, err // retry indefinitely (until context closes) - }); err != nil { - return err - } - defer body.Close() - var batch *fftypes.Batch - err := json.NewDecoder(body).Decode(&batch) - if err != nil { - log.L(em.ctx).Errorf("Failed to parse payload referred in batch ID '%s' from transaction '%s'", batchPin.BatchID, batchPin.Event.ProtocolID) - return nil // log and swallow unprocessable data + // First attempt a single batch insert + err := em.database.InsertPins(ctx, pins) + if err == nil { + return nil } - body.Close() + log.L(ctx).Warnf("Batch insert of pins failed - assuming replay and performing upserts: %s", err) - // At this point the batch is parsed, so any errors in processing need to be considered as: - // 1) Retryable - any transient error returned by processBatch is retried indefinitely - // 2) Swallowable - the data is invalid, and we have to move onto subsequent messages - // 3) Server shutting down - the context is cancelled (handled by retry) - return em.retry.Do(em.ctx, "persist batch", func(attempt int) (bool, error) { - // We process the batch into the DB as a single transaction (if transactions are supported), both for - // efficiency and to minimize the chance of duplicates (although at-least-once delivery is the core model) - err := em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { - chainEvent := buildBlockchainEvent(batchPin.Namespace, nil, &batchPin.Event, &batch.Payload.TX) - if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { - return err - } - em.emitBlockchainEventMetric(batchPin.Event) - if err := em.persistBatchTransaction(ctx, batchPin); err != nil { - return err - } - - // Note that in the case of a bad batch broadcast, we don't store the pin. Because we know we - // are never going to be able to process it (we retrieved it successfully, it's just invalid). - valid, err := em.persistBatchFromBroadcast(ctx, batch, batchPin.BatchHash) - if valid && err == nil { - err = em.persistContexts(ctx, batchPin, signingKey, false) - } + // Fall back to an upsert + for _, pin := range pins { + if err := em.database.UpsertPin(ctx, pin); err != nil { return err - }) - return err != nil, err // retry indefinitely (until context closes) - }) + } + } + return nil } diff --git a/internal/events/batch_pin_complete_test.go b/internal/events/batch_pin_complete_test.go index 242ce007e7..087de14fcb 100644 --- a/internal/events/batch_pin_complete_test.go +++ b/internal/events/batch_pin_complete_test.go @@ -17,17 +17,14 @@ package events import ( - "bytes" "context" - "encoding/json" "fmt" - "io/ioutil" "testing" "github.com/hyperledger/firefly/mocks/blockchainmocks" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" - "github.com/hyperledger/firefly/mocks/sharedstoragemocks" + "github.com/hyperledger/firefly/mocks/shareddownloadmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/database" @@ -36,23 +33,40 @@ import ( "github.com/stretchr/testify/mock" ) -func sampleBatch(t *testing.T, txType fftypes.TransactionType, data ...*fftypes.Data) *fftypes.Batch { +func sampleBatch(t *testing.T, batchType fftypes.BatchType, txType fftypes.TransactionType, data fftypes.DataArray, blobs ...*fftypes.Blob) *fftypes.Batch { identity := fftypes.SignerRef{Author: "signingOrg", Key: "0x12345"} + msgType := fftypes.MessageTypeBroadcast + if batchType == fftypes.BatchTypePrivate { + msgType = fftypes.MessageTypePrivate + } + for i, d := range data { + var blob *fftypes.Blob + d.Namespace = "ns1" + if len(blobs) > i { + blob = blobs[i] + } + err := d.Seal(context.Background(), blob) + assert.NoError(t, err) + } msg := &fftypes.Message{ Header: fftypes.MessageHeader{ + Namespace: "ns1", SignerRef: identity, ID: fftypes.NewUUID(), + Type: msgType, TxType: txType, + Topics: fftypes.FFStringArray{"topic1"}, }, - } - for _, d := range data { - err := d.Seal(context.Background(), nil) - assert.NoError(t, err) + Data: data.Refs(), } batch := &fftypes.Batch{ - SignerRef: identity, - ID: fftypes.NewUUID(), - Node: fftypes.NewUUID(), + BatchHeader: fftypes.BatchHeader{ + Namespace: "ns1", + SignerRef: identity, + Type: batchType, + ID: fftypes.NewUUID(), + Node: fftypes.NewUUID(), + }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ ID: fftypes.NewUUID(), @@ -64,17 +78,21 @@ func sampleBatch(t *testing.T, txType fftypes.TransactionType, data ...*fftypes. } err := msg.Seal(context.Background()) assert.NoError(t, err) - batch.Hash = batch.Payload.Hash() + bp, _ := batch.Confirmed() + batch.Hash = fftypes.HashString(bp.Manifest.String()) return batch } + func TestBatchPinCompleteOkBroadcast(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := &blockchain.BatchPin{ + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + batchPin := &blockchain.BatchPin{ Namespace: "ns1", - TransactionID: fftypes.NewUUID(), - BatchID: fftypes.NewUUID(), + TransactionID: batch.Payload.TX.ID, + BatchID: batch.ID, BatchPayloadRef: "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD", Contexts: []*fftypes.Bytes32{fftypes.NewRandB32()}, Event: blockchain.Event{ @@ -83,38 +101,14 @@ func TestBatchPinCompleteOkBroadcast(t *testing.T) { ProtocolID: "10/20/30", }, } - batchData := &fftypes.Batch{ - ID: batch.BatchID, - Namespace: "ns1", - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x22222", - }, - PayloadRef: batch.BatchPayloadRef, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: batch.TransactionID, - }, - Messages: []*fftypes.Message{}, - Data: []*fftypes.Data{}, - }, - } - batchData.Hash = batchData.Payload.Hash() - batch.BatchHash = batchData.Hash - batchDataBytes, err := json.Marshal(&batchData) - assert.NoError(t, err) - batchReadCloser := ioutil.NopCloser(bytes.NewReader(batchDataBytes)) - mpi := em.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", mock.Anything, mock. - MatchedBy(func(pr string) bool { return pr == batch.BatchPayloadRef })). - Return(batchReadCloser, nil) + batch.Hash = batch.Payload.Hash() + batchPin.BatchHash = batch.Hash mth := em.txHelper.(*txcommonmocks.Helper) - mth.On("PersistTransaction", mock.Anything, "ns1", batch.TransactionID, fftypes.TransactionTypeBatchPin, "0x12345"). + mth.On("PersistTransaction", mock.Anything, "ns1", batchPin.TransactionID, fftypes.TransactionTypeBatchPin, "0x12345"). Return(false, fmt.Errorf("pop")).Once() - mth.On("PersistTransaction", mock.Anything, "ns1", batch.TransactionID, fftypes.TransactionTypeBatchPin, "0x12345"). + mth.On("PersistTransaction", mock.Anything, "ns1", batchPin.TransactionID, fftypes.TransactionTypeBatchPin, "0x12345"). Return(true, nil) mdi := em.database.(*databasemocks.Plugin) @@ -128,35 +122,34 @@ func TestBatchPinCompleteOkBroadcast(t *testing.T) { } mdi.On("InsertBlockchainEvent", mock.Anything, mock.MatchedBy(func(e *fftypes.BlockchainEvent) bool { - return e.Name == batch.Event.Name + return e.Name == batchPin.Event.Name })).Return(fmt.Errorf("pop")).Once() mdi.On("InsertBlockchainEvent", mock.Anything, mock.MatchedBy(func(e *fftypes.BlockchainEvent) bool { - return e.Name == batch.Event.Name - })).Return(nil).Times(2) + return e.Name == batchPin.Event.Name + })).Return(nil).Times(1) mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(e *fftypes.Event) bool { return e.Type == fftypes.EventTypeBlockchainEventReceived - })).Return(nil).Times(2) - mdi.On("UpsertPin", mock.Anything, mock.Anything).Return(nil).Once() - mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil).Once() + })).Return(nil).Times(1) + mdi.On("InsertPins", mock.Anything, mock.Anything).Return(nil).Once() + msd := em.sharedDownload.(*shareddownloadmocks.Manager) + msd.On("InitiateDownloadBatch", mock.Anything, "ns1", batchPin.TransactionID, batchPin.BatchPayloadRef).Return(nil) mbi := &blockchainmocks.Plugin{} - mim := em.identity.(*identitymanagermocks.Manager) - mim.On("NormalizeSigningKeyIdentity", mock.Anything, "0x12345").Return("author1", nil) - - err = em.BatchPinComplete(mbi, batch, &fftypes.VerifierRef{ + err := em.BatchPinComplete(mbi, batchPin, &fftypes.VerifierRef{ Type: fftypes.VerifierTypeEthAddress, Value: "0x12345", }) assert.NoError(t, err) mdi.AssertExpectations(t) + mth.AssertExpectations(t) } func TestBatchPinCompleteOkPrivate(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := &blockchain.BatchPin{ + batchPin := &blockchain.BatchPin{ Namespace: "ns1", TransactionID: fftypes.NewUUID(), BatchID: fftypes.NewUUID(), @@ -165,37 +158,20 @@ func TestBatchPinCompleteOkPrivate(t *testing.T) { BlockchainTXID: "0x12345", }, } - batchData := &fftypes.Batch{ - ID: batch.BatchID, - Namespace: "ns1", - PayloadRef: batch.BatchPayloadRef, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: batch.TransactionID, - }, - Messages: []*fftypes.Message{}, - Data: []*fftypes.Data{}, - }, - } - batchDataBytes, err := json.Marshal(&batchData) - assert.NoError(t, err) - batchReadCloser := ioutil.NopCloser(bytes.NewReader(batchDataBytes)) - - mpi := em.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", mock.Anything, mock. - MatchedBy(func(pr string) bool { return pr == batch.BatchPayloadRef })). - Return(batchReadCloser, nil) mth := em.txHelper.(*txcommonmocks.Helper) - mth.On("PersistTransaction", mock.Anything, "ns1", batch.TransactionID, fftypes.TransactionTypeBatchPin, "0x12345").Return(true, nil) + mth.On("PersistTransaction", mock.Anything, "ns1", batchPin.TransactionID, fftypes.TransactionTypeBatchPin, "0x12345").Return(true, nil) mdi := em.database.(*databasemocks.Plugin) mdi.On("RunAsGroup", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertPins", mock.Anything, mock.Anything).Return(fmt.Errorf("These pins have been seen before")) // simulate replay fallback mdi.On("UpsertPin", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertBlockchainEvent", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + mbi := &blockchainmocks.Plugin{} - err = em.BatchPinComplete(mbi, batch, &fftypes.VerifierRef{ + err := em.BatchPinComplete(mbi, batchPin, &fftypes.VerifierRef{ Type: fftypes.VerifierTypeEthAddress, Value: "0xffffeeee", }) @@ -208,57 +184,79 @@ func TestBatchPinCompleteOkPrivate(t *testing.T) { assert.NoError(t, err) mdi.AssertExpectations(t) + mth.AssertExpectations(t) } -func TestSequencedBroadcastRetrieveIPFSFail(t *testing.T) { +func TestBatchPinCompleteInsertPinsFail(t *testing.T) { em, cancel := newTestEventManager(t) + cancel() - batch := &blockchain.BatchPin{ - Namespace: "ns", - TransactionID: fftypes.NewUUID(), - BatchID: fftypes.NewUUID(), - BatchPayloadRef: "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD", - Contexts: []*fftypes.Bytes32{fftypes.NewRandB32()}, + batchPin := &blockchain.BatchPin{ + Namespace: "ns1", + TransactionID: fftypes.NewUUID(), + BatchID: fftypes.NewUUID(), + Contexts: []*fftypes.Bytes32{fftypes.NewRandB32()}, Event: blockchain.Event{ BlockchainTXID: "0x12345", }, } - cancel() // to avoid retry - mpi := em.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + mth := em.txHelper.(*txcommonmocks.Helper) + mth.On("PersistTransaction", mock.Anything, "ns1", batchPin.TransactionID, fftypes.TransactionTypeBatchPin, "0x12345").Return(true, nil) + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("RunAsGroup", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertPins", mock.Anything, mock.Anything).Return(fmt.Errorf("optimization miss")) + mdi.On("UpsertPin", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + mdi.On("InsertBlockchainEvent", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + mbi := &blockchainmocks.Plugin{} - err := em.BatchPinComplete(mbi, batch, &fftypes.VerifierRef{ + err := em.BatchPinComplete(mbi, batchPin, &fftypes.VerifierRef{ Type: fftypes.VerifierTypeEthAddress, Value: "0xffffeeee", }) - mpi.AssertExpectations(t) assert.Regexp(t, "FF10158", err) -} -func TestBatchPinCompleteBadData(t *testing.T) { + mdi.AssertExpectations(t) + mth.AssertExpectations(t) +} +func TestSequencedBroadcastInitiateDownloadFail(t *testing.T) { em, cancel := newTestEventManager(t) - defer cancel() - batch := &blockchain.BatchPin{ - Namespace: "ns", + batchPin := &blockchain.BatchPin{ + Namespace: "ns1", TransactionID: fftypes.NewUUID(), BatchID: fftypes.NewUUID(), BatchPayloadRef: "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD", Contexts: []*fftypes.Bytes32{fftypes.NewRandB32()}, + Event: blockchain.Event{ + BlockchainTXID: "0x12345", + }, } - batchReadCloser := ioutil.NopCloser(bytes.NewReader([]byte(`!json`))) - mpi := em.sharedstorage.(*sharedstoragemocks.Plugin) - mpi.On("RetrieveData", mock.Anything, mock.Anything).Return(batchReadCloser, nil) + cancel() // to avoid retry mbi := &blockchainmocks.Plugin{} - err := em.BatchPinComplete(mbi, batch, &fftypes.VerifierRef{ + mth := em.txHelper.(*txcommonmocks.Helper) + mth.On("PersistTransaction", mock.Anything, "ns1", batchPin.TransactionID, fftypes.TransactionTypeBatchPin, "0x12345").Return(true, nil) + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("InsertBlockchainEvent", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertPins", mock.Anything, mock.Anything).Return(nil) + msd := em.sharedDownload.(*shareddownloadmocks.Manager) + msd.On("InitiateDownloadBatch", mock.Anything, "ns1", batchPin.TransactionID, batchPin.BatchPayloadRef).Return(fmt.Errorf("pop")) + + err := em.BatchPinComplete(mbi, batchPin, &fftypes.VerifierRef{ Type: fftypes.VerifierTypeEthAddress, Value: "0xffffeeee", }) - assert.NoError(t, err) // We do not return a blocking error in the case of bad data stored in IPFS + mdi.AssertExpectations(t) + msd.AssertExpectations(t) + mth.AssertExpectations(t) + assert.Regexp(t, "FF10158", err) } func TestBatchPinCompleteNoTX(t *testing.T) { @@ -298,8 +296,9 @@ func TestBatchPinCompleteBadNamespace(t *testing.T) { func TestPersistBatchMissingID(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - valid, err := em.persistBatch(context.Background(), &fftypes.Batch{}) + batch, valid, err := em.persistBatch(context.Background(), &fftypes.Batch{}) assert.False(t, valid) + assert.Nil(t, batch) assert.NoError(t, err) } @@ -308,23 +307,25 @@ func TestPersistBatchAuthorResolveFail(t *testing.T) { defer cancel() batchHash := fftypes.NewRandB32() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, }, + Hash: batchHash, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ Type: fftypes.TransactionTypeBatchPin, ID: fftypes.NewUUID(), }, }, - Hash: batchHash, } mim := em.identity.(*identitymanagermocks.Manager) mim.On("NormalizeSigningKeyIdentity", mock.Anything, mock.Anything).Return("", fmt.Errorf("pop")) batch.Hash = batch.Payload.Hash() - valid, err := em.persistBatchFromBroadcast(context.Background(), batch, batchHash) + _, valid, err := em.persistBatch(context.Background(), batch) assert.NoError(t, err) // retryable assert.False(t, valid) } @@ -334,23 +335,25 @@ func TestPersistBatchBadAuthor(t *testing.T) { defer cancel() batchHash := fftypes.NewRandB32() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, }, + Hash: batchHash, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ Type: fftypes.TransactionTypeBatchPin, ID: fftypes.NewUUID(), }, }, - Hash: batchHash, } mim := em.identity.(*identitymanagermocks.Manager) mim.On("NormalizeSigningKeyIdentity", mock.Anything, mock.Anything).Return("author2", nil) batch.Hash = batch.Payload.Hash() - valid, err := em.persistBatchFromBroadcast(context.Background(), batch, batchHash) + _, valid, err := em.persistBatch(context.Background(), batch) assert.NoError(t, err) assert.False(t, valid) } @@ -359,23 +362,25 @@ func TestPersistBatchMismatchChainHash(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, }, + Hash: fftypes.NewRandB32(), Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ Type: fftypes.TransactionTypeBatchPin, ID: fftypes.NewUUID(), }, }, - Hash: fftypes.NewRandB32(), } mim := em.identity.(*identitymanagermocks.Manager) mim.On("NormalizeSigningKeyIdentity", mock.Anything, mock.Anything).Return("author1", nil) batch.Hash = batch.Payload.Hash() - valid, err := em.persistBatchFromBroadcast(context.Background(), batch, fftypes.NewRandB32()) + _, valid, err := em.persistBatch(context.Background(), batch) assert.NoError(t, err) assert.False(t, valid) } @@ -383,38 +388,42 @@ func TestPersistBatchMismatchChainHash(t *testing.T) { func TestPersistBatchUpsertBatchMismatchHash(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", - }, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: fftypes.NewUUID(), - }, - }, - } - batch.Hash = batch.Payload.Hash() + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(database.HashMismatch) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) assert.False(t, valid) + assert.Nil(t, bp) assert.NoError(t, err) mdi.AssertExpectations(t) } func TestPersistBatchBadHash(t *testing.T) { + em, cancel := newTestEventManager(t) + defer cancel() + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + batch.Hash = fftypes.NewRandB32() + + bp, valid, err := em.persistBatch(context.Background(), batch) + assert.False(t, valid) + assert.Nil(t, bp) + assert.NoError(t, err) +} + +func TestPersistBatchNoData(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -425,33 +434,23 @@ func TestPersistBatchBadHash(t *testing.T) { } batch.Hash = fftypes.NewRandB32() - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) assert.False(t, valid) + assert.Nil(t, bp) assert.NoError(t, err) } func TestPersistBatchUpsertBatchFail(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", - }, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: fftypes.NewUUID(), - }, - }, - } - batch.Hash = batch.Payload.Hash() + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) + assert.Nil(t, bp) assert.False(t, valid) assert.EqualError(t, err, "pop") } @@ -460,19 +459,21 @@ func TestPersistBatchSwallowBadData(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "author1", + Key: "0x12345", + }, + Namespace: "ns1", }, - Namespace: "ns1", Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ Type: fftypes.TransactionTypeBatchPin, ID: fftypes.NewUUID(), }, Messages: []*fftypes.Message{nil}, - Data: []*fftypes.Data{nil}, + Data: fftypes.DataArray{nil}, }, } batch.Hash = batch.Payload.Hash() @@ -480,74 +481,26 @@ func TestPersistBatchSwallowBadData(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) assert.False(t, valid) assert.NoError(t, err) + assert.Nil(t, bp) mdi.AssertExpectations(t) } -func TestPersistBatchGoodDataUpsertOptimizeExistingFail(t *testing.T) { +func TestPersistBatchGoodDataUpsertOptimizFail(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - Node: testNodeID, - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", - }, - Namespace: "ns1", - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: fftypes.NewUUID(), - }, - Data: []*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)}, - }, - }, - } - batch.Payload.Data[0].Hash = batch.Payload.Data[0].Value.Hash() - batch.Hash = batch.Payload.Hash() + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertDataArray", mock.Anything, mock.Anything).Return(fmt.Errorf("optimzation miss")) mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) - valid, err := em.persistBatch(context.Background(), batch) - assert.False(t, valid) - assert.EqualError(t, err, "pop") -} - -func TestPersistBatchGoodDataUpsertOptimizeNewFail(t *testing.T) { - em, cancel := newTestEventManager(t) - defer cancel() - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - Node: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "author1", - Key: "0x12345", - }, - Namespace: "ns1", - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: fftypes.NewUUID(), - }, - Data: []*fftypes.Data{ - {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)}, - }, - }, - } - batch.Payload.Data[0].Hash = batch.Payload.Data[0].Value.Hash() - batch.Hash = batch.Payload.Hash() - - mdi := em.database.(*databasemocks.Plugin) - mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) - - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) + assert.Nil(t, bp) assert.False(t, valid) assert.EqualError(t, err, "pop") } @@ -555,25 +508,26 @@ func TestPersistBatchGoodDataUpsertOptimizeNewFail(t *testing.T) { func TestPersistBatchGoodDataMessageFail(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := sampleBatch(t, fftypes.TransactionTypeBatchPin) - batch.Node = nil - batch.Payload.Messages[0].Header.DataHash = batch.Payload.Messages[0].Data.Hash() - batch.Payload.Messages[0].Hash = batch.Payload.Messages[0].Header.Hash() - batch.Hash = batch.Payload.Hash() + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil) - mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(fmt.Errorf("pop")) + mdi.On("InsertDataArray", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertMessages", mock.Anything, mock.Anything).Return(fmt.Errorf("optimzation miss")) + mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) assert.False(t, valid) + assert.Nil(t, bp) assert.EqualError(t, err, "pop") } func TestPersistBatchGoodMessageAuthorMismatch(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := sampleBatch(t, fftypes.TransactionTypeBatchPin) + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) batch.Payload.Messages[0].Header.Key = "0x9999999" batch.Payload.Messages[0].Header.DataHash = batch.Payload.Messages[0].Data.Hash() batch.Payload.Messages[0].Hash = batch.Payload.Messages[0].Header.Hash() @@ -582,7 +536,8 @@ func TestPersistBatchGoodMessageAuthorMismatch(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdi.On("UpsertBatch", mock.Anything, mock.Anything).Return(nil) - valid, err := em.persistBatch(context.Background(), batch) + bp, valid, err := em.persistBatch(context.Background(), batch) + assert.Nil(t, bp) assert.False(t, valid) assert.NoError(t, err) } @@ -591,13 +546,15 @@ func TestPersistBatchDataNilData(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, } data := &fftypes.Data{ ID: fftypes.NewUUID(), } - err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) - assert.NoError(t, err) + valid := em.validateBatchData(context.Background(), batch, 0, data) + assert.False(t, valid) } func TestPersistBatchDataBadHash(t *testing.T) { @@ -607,131 +564,147 @@ func TestPersistBatchDataBadHash(t *testing.T) { ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`), } - batch := sampleBatch(t, fftypes.TransactionTypeBatchPin, data) + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) batch.Payload.Data[0].Hash = fftypes.NewRandB32() - err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) - assert.NoError(t, err) + valid := em.validateBatchData(context.Background(), batch, 0, data) + assert.False(t, valid) } -func TestPersistBatchDataUpsertHashMismatch(t *testing.T) { +func TestPersistBatchDataOk(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := sampleBatch(t, fftypes.TransactionTypeBatchPin) - batch.Hash = fftypes.NewRandB32() data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} - data.Hash = data.Value.Hash() - - mdi := em.database.(*databasemocks.Plugin) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(database.HashMismatch) + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) - err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) - assert.NoError(t, err) - mdi.AssertExpectations(t) + valid := em.validateBatchData(context.Background(), batch, 0, data) + assert.True(t, valid) } -func TestPersistBatchDataUpsertDataError(t *testing.T) { +func TestPersistBatchDataWithPublicAlreaydDownloadedOk(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} - batch := sampleBatch(t, fftypes.TransactionTypeBatchPin, data) + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + Size: 12345, + } + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`), Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + Size: 12345, + Name: "myfile.txt", + Public: "ref1", + }} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}, blob) mdi := em.database.(*databasemocks.Plugin) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(fmt.Errorf("pop")) + mdi.On("GetBlobMatchingHash", mock.Anything, blob.Hash).Return(blob, nil) - err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) - assert.EqualError(t, err, "pop") + valid, err := em.checkAndInitiateBlobDownloads(context.Background(), batch, 0, data) + assert.Nil(t, err) + assert.True(t, valid) } -func TestPersistBatchDataOk(t *testing.T) { +func TestPersistBatchDataWithPublicInitiateDownload(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} - batch := sampleBatch(t, fftypes.TransactionTypeBatchPin, data) + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + Size: 12345, + } + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`), Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + Size: 12345, + Name: "myfile.txt", + Public: "ref1", + }} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}, blob) mdi := em.database.(*databasemocks.Plugin) - mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(nil) + mdi.On("GetBlobMatchingHash", mock.Anything, blob.Hash).Return(nil, nil) - err := em.persistBatchData(context.Background(), batch, 0, data, database.UpsertOptimizationSkip) - assert.NoError(t, err) - mdi.AssertExpectations(t) -} + msd := em.sharedDownload.(*shareddownloadmocks.Manager) + msd.On("InitiateDownloadBlob", mock.Anything, batch.Namespace, batch.Payload.TX.ID, data.ID, "ref1").Return(nil) -func TestPersistBatchMessageNilData(t *testing.T) { - em, cancel := newTestEventManager(t) - defer cancel() - batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - } - msg := &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - }, - } - valid, err := em.persistBatchMessage(context.Background(), batch, 0, msg, database.UpsertOptimizationSkip) - assert.False(t, valid) - assert.NoError(t, err) + valid, err := em.checkAndInitiateBlobDownloads(context.Background(), batch, 0, data) + assert.Nil(t, err) + assert.True(t, valid) } -func TestPersistBatchMessageUpsertHashMismatch(t *testing.T) { +func TestPersistBatchDataWithPublicInitiateDownloadFail(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := sampleBatch(t, fftypes.TransactionTypeBatchPin) + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + Size: 12345, + } + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`), Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + Size: 12345, + Name: "myfile.txt", + Public: "ref1", + }} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}, blob) mdi := em.database.(*databasemocks.Plugin) - mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(database.HashMismatch) + mdi.On("GetBlobMatchingHash", mock.Anything, blob.Hash).Return(nil, nil) + + msd := em.sharedDownload.(*shareddownloadmocks.Manager) + msd.On("InitiateDownloadBlob", mock.Anything, batch.Namespace, batch.Payload.TX.ID, data.ID, "ref1").Return(fmt.Errorf("pop")) - valid, err := em.persistBatchMessage(context.Background(), batch, 0, batch.Payload.Messages[0], database.UpsertOptimizationSkip) + valid, err := em.checkAndInitiateBlobDownloads(context.Background(), batch, 0, data) + assert.Regexp(t, "pop", err) assert.False(t, valid) - assert.NoError(t, err) - mdi.AssertExpectations(t) } -func TestPersistBatchMessageUpsertMessageFail(t *testing.T) { +func TestPersistBatchDataWithBlobGetBlobFail(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := sampleBatch(t, fftypes.TransactionTypeBatchPin) + + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + Size: 12345, + } + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`), Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + Size: 12345, + Name: "myfile.txt", + Public: "ref1", + }} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}, blob) mdi := em.database.(*databasemocks.Plugin) - mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(fmt.Errorf("pop")) + mdi.On("GetBlobMatchingHash", mock.Anything, blob.Hash).Return(nil, fmt.Errorf("pop")) - valid, err := em.persistBatchMessage(context.Background(), batch, 0, batch.Payload.Messages[0], database.UpsertOptimizationSkip) + valid, err := em.checkAndInitiateBlobDownloads(context.Background(), batch, 0, data) + assert.Regexp(t, "pop", err) assert.False(t, valid) - assert.EqualError(t, err, "pop") } -func TestPersistBatchMessageOK(t *testing.T) { +func TestPersistBatchMessageNilData(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - batch := sampleBatch(t, fftypes.TransactionTypeBatchPin) - - mdi := em.database.(*databasemocks.Plugin) - mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationSkip).Return(nil) - - valid, err := em.persistBatchMessage(context.Background(), batch, 0, batch.Payload.Messages[0], database.UpsertOptimizationSkip) - assert.True(t, valid) - assert.NoError(t, err) - mdi.AssertExpectations(t) + batch := &fftypes.Batch{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } + msg := &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + }, + } + valid := em.validateBatchMessage(context.Background(), batch, 0, msg) + assert.False(t, valid) } -func TestPersistContextsFail(t *testing.T) { +func TestPersistBatchMessageOK(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{}) - mdi := em.database.(*databasemocks.Plugin) - mdi.On("UpsertPin", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - - err := em.persistContexts(em.ctx, &blockchain.BatchPin{ - Contexts: []*fftypes.Bytes32{ - fftypes.NewRandB32(), - }, - }, &fftypes.VerifierRef{ - Type: fftypes.VerifierTypeEthAddress, - Value: "0x12345", - }, false) - assert.EqualError(t, err, "pop") - mdi.AssertExpectations(t) + valid := em.validateBatchMessage(context.Background(), batch, 0, batch.Payload.Messages[0]) + assert.True(t, valid) } diff --git a/internal/events/blockchain_event.go b/internal/events/blockchain_event.go index f55e6d4894..4afc8e7c4c 100644 --- a/internal/events/blockchain_event.go +++ b/internal/events/blockchain_event.go @@ -18,6 +18,7 @@ package events import ( "context" + "fmt" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/pkg/blockchain" @@ -42,11 +43,58 @@ func buildBlockchainEvent(ns string, subID *fftypes.UUID, event *blockchain.Even return ev } +func (em *eventManager) getChainListenerByProtocolIDCached(ctx context.Context, protocolID string) (*fftypes.ContractListener, error) { + return em.getChainListenerCached(fmt.Sprintf("pid:%s", protocolID), func() (*fftypes.ContractListener, error) { + return em.database.GetContractListenerByProtocolID(ctx, protocolID) + }) +} + +func (em *eventManager) getChainListenerByIDCached(ctx context.Context, id *fftypes.UUID) (*fftypes.ContractListener, error) { + return em.getChainListenerCached(fmt.Sprintf("id:%s", id), func() (*fftypes.ContractListener, error) { + return em.database.GetContractListenerByID(ctx, id) + }) +} + +func (em *eventManager) getChainListenerCached(cacheKey string, getter func() (*fftypes.ContractListener, error)) (*fftypes.ContractListener, error) { + cached := em.chainListenerCache.Get(cacheKey) + if cached != nil { + cached.Extend(em.chainListenerCacheTTL) + return cached.Value().(*fftypes.ContractListener), nil + } + listener, err := getter() + if listener == nil || err != nil { + return nil, err + } + em.chainListenerCache.Set(cacheKey, listener, em.chainListenerCacheTTL) + return listener, err +} + +func (em *eventManager) getTopicForChainListener(ctx context.Context, listenerID *fftypes.UUID) (string, error) { + if listenerID == nil { + return fftypes.SystemBatchPinTopic, nil + } + listener, err := em.getChainListenerByIDCached(ctx, listenerID) + if err != nil { + return "", err + } + var topic string + if listener != nil && listener.Topic != "" { + topic = listener.Topic + } else { + topic = listenerID.String() + } + return topic, nil +} + func (em *eventManager) persistBlockchainEvent(ctx context.Context, chainEvent *fftypes.BlockchainEvent) error { if err := em.database.InsertBlockchainEvent(ctx, chainEvent); err != nil { return err } - ffEvent := fftypes.NewEvent(fftypes.EventTypeBlockchainEventReceived, chainEvent.Namespace, chainEvent.ID, chainEvent.TX.ID) + topic, err := em.getTopicForChainListener(ctx, chainEvent.Listener) + if err != nil { + return err + } + ffEvent := fftypes.NewEvent(fftypes.EventTypeBlockchainEventReceived, chainEvent.Namespace, chainEvent.ID, chainEvent.TX.ID, topic) if err := em.database.InsertEvent(ctx, ffEvent); err != nil { return err } @@ -62,8 +110,7 @@ func (em *eventManager) emitBlockchainEventMetric(event blockchain.Event) { func (em *eventManager) BlockchainEvent(event *blockchain.EventWithSubscription) error { return em.retry.Do(em.ctx, "persist contract event", func(attempt int) (bool, error) { err := em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { - // TODO: should cache this lookup for efficiency - sub, err := em.database.GetContractListenerByProtocolID(ctx, event.Subscription) + sub, err := em.getChainListenerByProtocolIDCached(ctx, event.Subscription) if err != nil { return err } diff --git a/internal/events/blockchain_event_test.go b/internal/events/blockchain_event_test.go index 8f8cf7d3ed..a5754b5600 100644 --- a/internal/events/blockchain_event_test.go +++ b/internal/events/blockchain_event_test.go @@ -48,20 +48,22 @@ func TestContractEventWithRetries(t *testing.T) { sub := &fftypes.ContractListener{ Namespace: "ns", ID: fftypes.NewUUID(), + Topic: "topic1", } var eventID *fftypes.UUID mdi := em.database.(*databasemocks.Plugin) mdi.On("GetContractListenerByProtocolID", mock.Anything, "sb-1").Return(nil, fmt.Errorf("pop")).Once() - mdi.On("GetContractListenerByProtocolID", mock.Anything, "sb-1").Return(sub, nil).Times(3) + mdi.On("GetContractListenerByProtocolID", mock.Anything, "sb-1").Return(sub, nil).Times(1) // cached mdi.On("InsertBlockchainEvent", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")).Once() mdi.On("InsertBlockchainEvent", mock.Anything, mock.MatchedBy(func(e *fftypes.BlockchainEvent) bool { eventID = e.ID return *e.Listener == *sub.ID && e.Name == "Changed" && e.Namespace == "ns" })).Return(nil).Times(2) + mdi.On("GetContractListenerByID", mock.Anything, sub.ID).Return(sub, nil) mdi.On("InsertEvent", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")).Once() mdi.On("InsertEvent", mock.Anything, mock.MatchedBy(func(e *fftypes.Event) bool { - return e.Type == fftypes.EventTypeBlockchainEventReceived && e.Reference != nil && e.Reference == eventID + return e.Type == fftypes.EventTypeBlockchainEventReceived && e.Reference != nil && e.Reference == eventID && e.Topic == "topic1" })).Return(nil).Once() err := em.BlockchainEvent(ev) @@ -97,6 +99,51 @@ func TestContractEventUnknownSubscription(t *testing.T) { mdi.AssertExpectations(t) } +func TestPersistBlockchainEventChainListenerLookupFail(t *testing.T) { + em, cancel := newTestEventManager(t) + defer cancel() + + ev := &fftypes.BlockchainEvent{ + Name: "Changed", + Output: fftypes.JSONObject{ + "value": "1", + }, + Info: fftypes.JSONObject{ + "blockNumber": "10", + }, + Listener: fftypes.NewUUID(), + } + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("InsertBlockchainEvent", mock.Anything, mock.Anything).Return(nil) + mdi.On("GetContractListenerByID", mock.Anything, ev.Listener).Return(nil, fmt.Errorf("pop")) + + err := em.persistBlockchainEvent(em.ctx, ev) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) +} + +func TestGetTopicForChainListenerFallback(t *testing.T) { + em, cancel := newTestEventManager(t) + defer cancel() + + sub := &fftypes.ContractListener{ + Namespace: "ns", + ID: fftypes.NewUUID(), + Topic: "", + } + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("GetContractListenerByID", mock.Anything, mock.Anything).Return(sub, nil) + + topic, err := em.getTopicForChainListener(em.ctx, sub.ID) + assert.NoError(t, err) + assert.Equal(t, sub.ID.String(), topic) + + mdi.AssertExpectations(t) +} + func TestBlockchainEventMetric(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() diff --git a/internal/events/dx_callbacks.go b/internal/events/dx_callbacks.go index f915fd0ca2..908ace27cc 100644 --- a/internal/events/dx_callbacks.go +++ b/internal/events/dx_callbacks.go @@ -36,14 +36,14 @@ func (em *eventManager) MessageReceived(dx dataexchange.Plugin, peerID string, d var wrapper *fftypes.TransportWrapper err = json.Unmarshal(data, &wrapper) if err != nil { - l.Errorf("Invalid transmission from '%s': %s", peerID, err) + l.Errorf("Invalid transmission from %s peer '%s': %s", dx.Name(), peerID, err) return "", nil } if wrapper.Batch == nil { l.Errorf("Invalid transmission: nil batch") return "", nil } - l.Infof("Private batch received from '%s' (len=%d)", peerID, len(data)) + l.Infof("Private batch received from %s peer '%s' (len=%d)", dx.Name(), peerID, len(data)) if wrapper.Batch.Payload.TX.Type == fftypes.TransactionTypeUnpinned { valid, err := em.definitions.EnsureLocalGroup(em.ctx, wrapper.Group) @@ -56,12 +56,8 @@ func (em *eventManager) MessageReceived(dx dataexchange.Plugin, peerID string, d } } - mf, err := em.privateBatchReceived(peerID, wrapper.Batch) - manifestBytes := []byte{} - if err == nil && mf != nil { - manifestBytes, err = json.Marshal(&mf) - } - return string(manifestBytes), err + manifestString, err := em.privateBatchReceived(peerID, wrapper.Batch) + return manifestString, err } // Check data exchange peer the data came from, has been registered to the org listed in the batch. @@ -113,7 +109,7 @@ func (em *eventManager) checkReceivedOffchainIdentity(ctx context.Context, peerI return node, nil } -func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch) (manifest *fftypes.Manifest, err error) { +func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch) (manifest string, err error) { // Retry for persistence errors (not validation errors) err = em.retry.Do(em.ctx, "private batch received", func(attempt int) (bool, error) { @@ -129,27 +125,28 @@ func (em *eventManager) privateBatchReceived(peerID string, batch *fftypes.Batch return nil } - valid, err := em.persistBatch(ctx, batch) + persistedBatch, valid, err := em.persistBatch(ctx, batch) if err != nil || !valid { l.Errorf("Batch received from org=%s node=%s processing failed valid=%t: %s", node.Parent, node.Name, valid, err) return err // retry - persistBatch only returns retryable errors } - if batch.Payload.TX.Type == fftypes.TransactionTypeBatchPin { - // Poke the aggregator to do its stuff - em.aggregator.rewindBatches <- batch.ID - } else if batch.Payload.TX.Type == fftypes.TransactionTypeUnpinned { + if batch.Payload.TX.Type == fftypes.TransactionTypeUnpinned { // We need to confirm all these messages immediately. if err := em.markUnpinnedMessagesConfirmed(ctx, batch); err != nil { return err } } - manifest = batch.Manifest() + manifest = persistedBatch.Manifest.String() return nil }) }) + // Poke the aggregator to do its stuff - after we have committed the transaction so the pins are visible + if err == nil && batch.Payload.TX.Type == fftypes.TransactionTypeBatchPin { + log.L(em.ctx).Errorf("Rewinding for persisted private batch %s", batch.ID) + em.aggregator.rewindBatches <- *batch.ID + } return manifest, err - } func (em *eventManager) markUnpinnedMessagesConfirmed(ctx context.Context, batch *fftypes.Batch) error { @@ -176,32 +173,36 @@ func (em *eventManager) markUnpinnedMessagesConfirmed(ctx context.Context, batch } for _, msg := range batch.Payload.Messages { - event := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, batch.Namespace, msg.Header.ID, batch.Payload.TX.ID) - event.Correlator = msg.Header.CID - if err := em.database.InsertEvent(ctx, event); err != nil { - return err + for _, topic := range msg.Header.Topics { + // One event per topic + event := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, batch.Namespace, msg.Header.ID, batch.Payload.TX.ID, topic) + event.Correlator = msg.Header.CID + if err := em.database.InsertEvent(ctx, event); err != nil { + return err + } } } return nil } -func (em *eventManager) BLOBReceived(dx dataexchange.Plugin, peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error { +func (em *eventManager) PrivateBLOBReceived(dx dataexchange.Plugin, peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error { l := log.L(em.ctx) - l.Debugf("Blob received event from data exhange: Peer='%s' Hash='%v' PayloadRef='%s'", peerID, &hash, payloadRef) + l.Infof("Blob received event from data exchange %s: Peer='%s' Hash='%v' PayloadRef='%s'", dx.Name(), peerID, &hash, payloadRef) if peerID == "" || len(peerID) > 256 || payloadRef == "" || len(payloadRef) > 1024 { l.Errorf("Invalid blob received event from data exhange: Peer='%s' Hash='%v' PayloadRef='%s'", peerID, &hash, payloadRef) return nil // we consume the event still } + return em.blobReceivedCommon(peerID, hash, size, payloadRef) +} + +func (em *eventManager) blobReceivedCommon(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error { // We process the event in a retry loop (which will break only if the context is closed), so that // we only confirm consumption of the event to the plugin once we've processed it. return em.retry.Do(em.ctx, "blob reference insert", func(attempt int) (retry bool, err error) { - - batchIDs := make(map[fftypes.UUID]bool) - - err = em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { + return true, em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { // Insert the blob into the detabase err := em.database.InsertBlob(ctx, &fftypes.Blob{ Peer: peerID, @@ -213,48 +214,8 @@ func (em *eventManager) BLOBReceived(dx dataexchange.Plugin, peerID string, hash if err != nil { return err } - - // Now we need to work out what pins potentially are unblocked by the arrival of this data - - // Find any data associated with this blob - var data []*fftypes.DataRef - filter := database.DataQueryFactory.NewFilter(ctx).Eq("blob.hash", &hash) - data, _, err = em.database.GetDataRefs(ctx, filter) - if err != nil { - return err - } - - // Find the messages assocated with that data - var messages []*fftypes.Message - for _, data := range data { - fb := database.MessageQueryFactory.NewFilter(ctx) - filter := fb.And(fb.Eq("confirmed", nil)) - messages, _, err = em.database.GetMessagesForData(ctx, data.ID, filter) - if err != nil { - return err - } - } - - // Find the unique batch IDs for all the messages - for _, msg := range messages { - if msg.BatchID != nil { - batchIDs[*msg.BatchID] = true - } - } - return nil + return em.aggregator.rewindForBlobArrival(ctx, &hash) }) - if err != nil { - return true, err - } - - // Initiate rewinds for all the batchIDs that are potentially completed by the arrival of this data - for bid := range batchIDs { - var batchID = bid // cannot use the address of the loop var - l.Infof("Batch '%s' contains reference to received blob. Peer='%s' Hash='%v' PayloadRef='%s'", &bid, peerID, &hash, payloadRef) - em.aggregator.rewindBatches <- &batchID - } - - return false, nil }) } @@ -292,8 +253,18 @@ func (em *eventManager) TransferResult(dx dataexchange.Plugin, trackingID string op := operations[0] if status == fftypes.OpStatusSucceeded && dx.Capabilities().Manifest { switch op.Type { - case fftypes.OpTypeDataExchangeBatchSend: - expectedManifest := op.Input.GetString("manifest") + case fftypes.OpTypeDataExchangeSendBatch: + batchID, _ := fftypes.ParseUUID(em.ctx, op.Input.GetString("batch")) + expectedManifest := "" + if batchID != nil { + batch, err := em.database.GetBatchByID(em.ctx, batchID) + if err != nil { + return true, err + } + if batch != nil { + expectedManifest = batch.Manifest.String() + } + } if update.Manifest != expectedManifest { // Log and map to failure for user to see that the receiver did not provide a matching acknowledgement mismatchErr := i18n.NewError(em.ctx, i18n.MsgManifestMismatch, status, update.Manifest) @@ -301,7 +272,7 @@ func (em *eventManager) TransferResult(dx dataexchange.Plugin, trackingID string update.Error = mismatchErr.Error() status = fftypes.OpStatusFailed } - case fftypes.OpTypeDataExchangeBlobSend: + case fftypes.OpTypeDataExchangeSendBlob: expectedHash := op.Input.GetString("hash") if update.Hash != expectedHash { // Log and map to failure for user to see that the receiver did not provide a matching hash diff --git a/internal/events/dx_callbacks_test.go b/internal/events/dx_callbacks_test.go index 6349f9749d..114c0881b2 100644 --- a/internal/events/dx_callbacks_test.go +++ b/internal/events/dx_callbacks_test.go @@ -25,6 +25,7 @@ import ( "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/dataexchangemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/definitionsmocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/pkg/database" @@ -34,8 +35,9 @@ import ( "github.com/stretchr/testify/mock" ) -func sampleBatchTransfer(t *testing.T, txType fftypes.TransactionType, data ...*fftypes.Data) (*fftypes.Batch, []byte) { - batch := sampleBatch(t, txType, data...) +func sampleBatchTransfer(t *testing.T, txType fftypes.TransactionType) (*fftypes.Batch, []byte) { + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypePrivate, txType, fftypes.DataArray{data}) b, _ := json.Marshal(&fftypes.TransportWrapper{ Batch: batch, Group: &fftypes.Group{ @@ -96,24 +98,39 @@ func TestPinnedReceiveOK(t *testing.T) { }).Return(node1, nil) mim.On("CachedIdentityLookup", em.ctx, "signingOrg").Return(org1, false, nil) mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil, nil) - mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil, nil) + mdi.On("InsertDataArray", em.ctx, mock.Anything).Return(nil, nil) + mdi.On("InsertMessages", em.ctx, mock.Anything).Return(nil, nil) + mdx.On("Name").Return("utdx").Maybe() + mdm := em.data.(*datamocks.Manager) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() + m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) assert.NotNil(t, m) mdi.AssertExpectations(t) mdx.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestMessageReceiveOkBadBatchIgnored(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - _, b := sampleBatchTransfer(t, fftypes.TransactionTypeTokenPool) + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypePrivate, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + batch.Payload.TX.Type = fftypes.TransactionTypeTokenPool + b, _ := json.Marshal(&fftypes.TransportWrapper{ + Batch: batch, + Group: &fftypes.Group{ + Hash: fftypes.NewRandB32(), + }, + }) org1 := newTestOrg("org1") node1 := newTestNode("node1", org1) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") mim := em.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", em.ctx, []fftypes.IdentityType{fftypes.IdentityTypeNode}, fftypes.SystemNamespace, &fftypes.VerifierRef{ Type: fftypes.VerifierTypeFFDXPeerID, @@ -139,6 +156,7 @@ func TestMessageReceivePersistBatchError(t *testing.T) { node1 := newTestNode("node1", org1) mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") mim := em.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", em.ctx, []fftypes.IdentityType{fftypes.IdentityTypeNode}, fftypes.SystemNamespace, &fftypes.VerifierRef{ Type: fftypes.VerifierTypeFFDXPeerID, @@ -160,6 +178,7 @@ func TestMessageReceivedBadData(t *testing.T) { defer cancel() mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") m, err := em.MessageReceived(mdx, "peer1", []byte(`!{}`)) assert.NoError(t, err) assert.Empty(t, m) @@ -171,6 +190,7 @@ func TestMessageReceivedUnknownType(t *testing.T) { defer cancel() mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") m, err := em.MessageReceived(mdx, "peer1", []byte(`{ "type": "unknown" }`)) @@ -184,6 +204,7 @@ func TestMessageReceivedNilBatch(t *testing.T) { defer cancel() mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") m, err := em.MessageReceived(mdx, "peer1", []byte(`{ "type": "batch" }`)) @@ -197,6 +218,7 @@ func TestMessageReceivedNilMessage(t *testing.T) { defer cancel() mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") m, err := em.MessageReceived(mdx, "peer1", []byte(`{ "type": "message" }`)) @@ -210,6 +232,7 @@ func TestMessageReceivedNilGroup(t *testing.T) { defer cancel() mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") m, err := em.MessageReceived(mdx, "peer1", []byte(`{ "type": "message", "message": {} @@ -234,6 +257,7 @@ func TestMessageReceiveNodeLookupError(t *testing.T) { }).Return(nil, fmt.Errorf("pop")) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) assert.Empty(t, m) @@ -249,6 +273,7 @@ func TestMessageReceiveGetCandidateOrgFail(t *testing.T) { node1 := newTestNode("node1", org1) mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") mim := em.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", em.ctx, []fftypes.IdentityType{fftypes.IdentityTypeNode}, fftypes.SystemNamespace, &fftypes.VerifierRef{ Type: fftypes.VerifierTypeFFDXPeerID, @@ -273,6 +298,7 @@ func TestMessageReceiveGetCandidateOrgNotFound(t *testing.T) { node1 := newTestNode("node1", org1) mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") mim := em.identity.(*identitymanagermocks.Manager) mim.On("FindIdentityForVerifier", em.ctx, []fftypes.IdentityType{fftypes.IdentityTypeNode}, fftypes.SystemNamespace, &fftypes.VerifierRef{ Type: fftypes.VerifierTypeFFDXPeerID, @@ -295,6 +321,7 @@ func TestMessageReceiveGetCandidateOrgNotMatch(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") org1 := newTestOrg("org1") node1 := newTestNode("node1", org1) mim := em.identity.(*identitymanagermocks.Manager) @@ -311,7 +338,7 @@ func TestMessageReceiveGetCandidateOrgNotMatch(t *testing.T) { mdx.AssertExpectations(t) } -func TestBLOBReceivedTriggersRewindOk(t *testing.T) { +func TestPrivateBLOBReceivedTriggersRewindOk(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() hash := fftypes.NewRandB32() @@ -319,6 +346,7 @@ func TestBLOBReceivedTriggersRewindOk(t *testing.T) { batchID := fftypes.NewUUID() mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") mdi := em.database.(*databasemocks.Plugin) mdi.On("InsertBlob", em.ctx, mock.Anything).Return(nil) @@ -329,30 +357,34 @@ func TestBLOBReceivedTriggersRewindOk(t *testing.T) { {BatchID: batchID}, }, nil, nil) - err := em.BLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1") + err := em.PrivateBLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1") assert.NoError(t, err) bid := <-em.aggregator.rewindBatches - assert.Equal(t, *batchID, *bid) + assert.Equal(t, *batchID, bid) mdi.AssertExpectations(t) } -func TestBLOBReceivedBadEvent(t *testing.T) { +func TestPrivateBLOBReceivedBadEvent(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() - err := em.BLOBReceived(nil, "", fftypes.Bytes32{}, 12345, "") + mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") + + err := em.PrivateBLOBReceived(mdx, "", fftypes.Bytes32{}, 12345, "") assert.NoError(t, err) } -func TestBLOBReceivedGetMessagesFail(t *testing.T) { +func TestPrivateBLOBReceivedGetMessagesFail(t *testing.T) { em, cancel := newTestEventManager(t) cancel() // retryable error hash := fftypes.NewRandB32() dataID := fftypes.NewUUID() mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") mdi := em.database.(*databasemocks.Plugin) mdi.On("InsertBlob", em.ctx, mock.Anything).Return(nil) @@ -361,40 +393,42 @@ func TestBLOBReceivedGetMessagesFail(t *testing.T) { }, nil, nil) mdi.On("GetMessagesForData", em.ctx, dataID, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) - err := em.BLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1") + err := em.PrivateBLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1") assert.Regexp(t, "FF10158", err) mdi.AssertExpectations(t) } -func TestBLOBReceivedGetDataRefsFail(t *testing.T) { +func TestPrivateBLOBReceivedGetDataRefsFail(t *testing.T) { em, cancel := newTestEventManager(t) cancel() // retryable error hash := fftypes.NewRandB32() mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") mdi := em.database.(*databasemocks.Plugin) mdi.On("InsertBlob", em.ctx, mock.Anything).Return(nil) mdi.On("GetDataRefs", em.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) - err := em.BLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1") + err := em.PrivateBLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1") assert.Regexp(t, "FF10158", err) mdi.AssertExpectations(t) } -func TestBLOBReceivedInsertBlobFails(t *testing.T) { +func TestPrivateBLOBReceivedInsertBlobFails(t *testing.T) { em, cancel := newTestEventManager(t) cancel() // retryable error hash := fftypes.NewRandB32() mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") mdi := em.database.(*databasemocks.Plugin) mdi.On("InsertBlob", em.ctx, mock.Anything).Return(fmt.Errorf("pop")) - err := em.BLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1") + err := em.PrivateBLOBReceived(mdx, "peer1", *hash, 12345, "ns1/path1") assert.Regexp(t, "FF10158", err) mdi.AssertExpectations(t) @@ -430,12 +464,15 @@ func TestTransferResultManifestMismatch(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) id := fftypes.NewUUID() + mdi.On("GetBatchByID", mock.Anything, mock.Anything).Return(&fftypes.BatchPersisted{ + Manifest: fftypes.JSONAnyPtr("my-manifest"), + }, nil) mdi.On("GetOperations", mock.Anything, mock.Anything).Return([]*fftypes.Operation{ { ID: id, - Type: "dataexchange_batch_send", + Type: fftypes.OpTypeDataExchangeSendBatch, Input: fftypes.JSONObject{ - "manifest": "Bob", + "batch": fftypes.NewUUID().String(), }, }, }, nil, nil) @@ -458,6 +495,41 @@ func TestTransferResultManifestMismatch(t *testing.T) { } +func TestTransferResultManifestFamil(t *testing.T) { + em, cancel := newTestEventManager(t) + cancel() + + mdi := em.database.(*databasemocks.Plugin) + id := fftypes.NewUUID() + mdi.On("GetBatchByID", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + mdi.On("GetOperations", mock.Anything, mock.Anything).Return([]*fftypes.Operation{ + { + ID: id, + Type: fftypes.OpTypeDataExchangeSendBatch, + Input: fftypes.JSONObject{ + "batch": fftypes.NewUUID().String(), + }, + }, + }, nil, nil) + mdi.On("ResolveOperation", mock.Anything, id, fftypes.OpStatusFailed, mock.MatchedBy(func(errorMsg string) bool { + return strings.Contains(errorMsg, "FF10329") + }), fftypes.JSONObject{ + "extra": "info", + }).Return(nil) + + mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") + mdx.On("Capabilities").Return(&dataexchange.Capabilities{ + Manifest: true, + }) + err := em.TransferResult(mdx, id.String(), fftypes.OpStatusSucceeded, fftypes.TransportStatusUpdate{ + Info: fftypes.JSONObject{"extra": "info"}, + Manifest: "Sally", + }) + assert.Regexp(t, "FF10158", err) + +} + func TestTransferResultHashtMismatch(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() @@ -467,7 +539,7 @@ func TestTransferResultHashtMismatch(t *testing.T) { mdi.On("GetOperations", mock.Anything, mock.Anything).Return([]*fftypes.Operation{ { ID: id, - Type: "dataexchange_blob_send", + Type: fftypes.OpTypeDataExchangeSendBlob, Input: fftypes.JSONObject{ "hash": "Bob", }, @@ -579,6 +651,7 @@ func TestMessageReceiveMessageIdentityFail(t *testing.T) { _, b := sampleBatchTransfer(t, fftypes.TransactionTypeBatchPin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") msh := em.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(true, nil) @@ -610,6 +683,7 @@ func TestMessageReceiveMessageIdentityParentNotFound(t *testing.T) { _, b := sampleBatchTransfer(t, fftypes.TransactionTypeBatchPin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") msh := em.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(true, nil) @@ -642,6 +716,7 @@ func TestMessageReceiveMessageIdentityIncorrect(t *testing.T) { _, b := sampleBatchTransfer(t, fftypes.TransactionTypeBatchPin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") msh := em.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(true, nil) @@ -670,6 +745,7 @@ func TestMessageReceiveMessagePersistMessageFail(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") msh := em.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(true, nil) @@ -683,7 +759,9 @@ func TestMessageReceiveMessagePersistMessageFail(t *testing.T) { }).Return(node1, nil) mim.On("CachedIdentityLookup", em.ctx, "signingOrg").Return(org1, false, nil) mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil, nil) - mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) + mdi.On("InsertDataArray", em.ctx, mock.Anything).Return(nil) + mdi.On("InsertMessages", em.ctx, mock.Anything).Return(fmt.Errorf("optimization fail")) + mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) @@ -697,14 +775,11 @@ func TestMessageReceiveMessagePersistDataFail(t *testing.T) { em, cancel := newTestEventManager(t) cancel() // to avoid infinite retry - data := &fftypes.Data{ - ID: fftypes.NewUUID(), - Value: fftypes.JSONAnyPtr(`{}`), - } - _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned, data) + _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned) mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") msh := em.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(true, nil) @@ -718,7 +793,8 @@ func TestMessageReceiveMessagePersistDataFail(t *testing.T) { }).Return(node1, nil) mim.On("CachedIdentityLookup", em.ctx, "signingOrg").Return(org1, false, nil) mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil, nil) - mdi.On("UpsertData", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) + mdi.On("InsertDataArray", em.ctx, mock.Anything).Return(fmt.Errorf("optimization miss")) + mdi.On("UpsertData", em.ctx, mock.Anything, database.UpsertOptimizationExisting).Return(fmt.Errorf("pop")) m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) @@ -732,14 +808,11 @@ func TestMessageReceiveUnpinnedBatchOk(t *testing.T) { em, cancel := newTestEventManager(t) cancel() // to avoid infinite retry - data := &fftypes.Data{ - ID: fftypes.NewUUID(), - Value: fftypes.JSONAnyPtr(`{}`), - } - _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned, data) + _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned) mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") org1 := newTestOrg("org1") node1 := newTestNode("node1", org1) @@ -753,10 +826,12 @@ func TestMessageReceiveUnpinnedBatchOk(t *testing.T) { }).Return(node1, nil) mim.On("CachedIdentityLookup", em.ctx, "signingOrg").Return(org1, false, nil) mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil, nil) - mdi.On("UpsertData", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) - mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) + mdi.On("InsertDataArray", em.ctx, mock.Anything).Return(nil) + mdi.On("InsertMessages", em.ctx, mock.Anything).Return(nil) mdi.On("UpdateMessages", em.ctx, mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", em.ctx, mock.Anything).Return(nil) + mdm := em.data.(*datamocks.Manager) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() m, err := em.MessageReceived(mdx, "peer1", b) assert.NoError(t, err) @@ -764,19 +839,18 @@ func TestMessageReceiveUnpinnedBatchOk(t *testing.T) { mdi.AssertExpectations(t) mdx.AssertExpectations(t) + mdm.AssertExpectations(t) } + func TestMessageReceiveUnpinnedBatchConfirmMessagesFail(t *testing.T) { em, cancel := newTestEventManager(t) cancel() // to avoid infinite retry - data := &fftypes.Data{ - ID: fftypes.NewUUID(), - Value: fftypes.JSONAnyPtr(`{}`), - } - _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned, data) + _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned) mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") org1 := newTestOrg("org1") node1 := newTestNode("node1", org1) @@ -790,9 +864,11 @@ func TestMessageReceiveUnpinnedBatchConfirmMessagesFail(t *testing.T) { }).Return(node1, nil) mim.On("CachedIdentityLookup", em.ctx, "signingOrg").Return(org1, false, nil) mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil, nil) - mdi.On("UpsertData", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) - mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) + mdi.On("InsertDataArray", em.ctx, mock.Anything).Return(nil) + mdi.On("InsertMessages", em.ctx, mock.Anything).Return(nil) mdi.On("UpdateMessages", em.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + mdm := em.data.(*datamocks.Manager) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) @@ -800,20 +876,18 @@ func TestMessageReceiveUnpinnedBatchConfirmMessagesFail(t *testing.T) { mdi.AssertExpectations(t) mdx.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestMessageReceiveUnpinnedBatchPersistEventFail(t *testing.T) { em, cancel := newTestEventManager(t) cancel() // to avoid infinite retry - data := &fftypes.Data{ - ID: fftypes.NewUUID(), - Value: fftypes.JSONAnyPtr(`{}`), - } - _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned, data) + _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned) mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") org1 := newTestOrg("org1") node1 := newTestNode("node1", org1) @@ -827,10 +901,12 @@ func TestMessageReceiveUnpinnedBatchPersistEventFail(t *testing.T) { }).Return(node1, nil) mim.On("CachedIdentityLookup", em.ctx, "signingOrg").Return(org1, false, nil) mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil, nil) - mdi.On("UpsertData", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) - mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) + mdi.On("InsertDataArray", em.ctx, mock.Anything).Return(nil) + mdi.On("InsertMessages", em.ctx, mock.Anything).Return(nil) mdi.On("UpdateMessages", em.ctx, mock.Anything, mock.Anything).Return(nil) mdi.On("InsertEvent", em.ctx, mock.Anything).Return(fmt.Errorf("pop")) + mdm := em.data.(*datamocks.Manager) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() m, err := em.MessageReceived(mdx, "peer1", b) assert.Regexp(t, "FF10158", err) @@ -838,20 +914,18 @@ func TestMessageReceiveUnpinnedBatchPersistEventFail(t *testing.T) { mdi.AssertExpectations(t) mdx.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestMessageReceiveMessageEnsureLocalGroupFail(t *testing.T) { em, cancel := newTestEventManager(t) cancel() // to avoid infinite retry - data := &fftypes.Data{ - ID: fftypes.NewUUID(), - Value: fftypes.JSONAnyPtr(`{}`), - } - _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned, data) + _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned) mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") msh := em.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(false, fmt.Errorf("pop")) @@ -868,14 +942,11 @@ func TestMessageReceiveMessageEnsureLocalGroupReject(t *testing.T) { em, cancel := newTestEventManager(t) cancel() // to avoid infinite retry - data := &fftypes.Data{ - ID: fftypes.NewUUID(), - Value: fftypes.JSONAnyPtr(`{}`), - } - _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned, data) + _, b := sampleBatchTransfer(t, fftypes.TransactionTypeUnpinned) mdi := em.database.(*databasemocks.Plugin) mdx := &dataexchangemocks.Plugin{} + mdx.On("Name").Return("utdx") msh := em.definitions.(*definitionsmocks.DefinitionHandlers) msh.On("EnsureLocalGroup", em.ctx, mock.Anything).Return(false, nil) diff --git a/internal/events/event_dispatcher.go b/internal/events/event_dispatcher.go index 27d91326b2..666220aa70 100644 --- a/internal/events/event_dispatcher.go +++ b/internal/events/event_dispatcher.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -18,7 +18,6 @@ package events import ( "context" - "database/sql/driver" "fmt" "sync" @@ -28,6 +27,7 @@ import ( "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/retry" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/events" "github.com/hyperledger/firefly/pkg/fftypes" @@ -63,9 +63,10 @@ type eventDispatcher struct { subscription *subscription cel *changeEventListener changeEvents chan *fftypes.ChangeEvent + txHelper txcommon.Helper } -func newEventDispatcher(ctx context.Context, ei events.Plugin, di database.Plugin, dm data.Manager, sh definitions.DefinitionHandlers, connID string, sub *subscription, en *eventNotifier, cel *changeEventListener) *eventDispatcher { +func newEventDispatcher(ctx context.Context, ei events.Plugin, di database.Plugin, dm data.Manager, sh definitions.DefinitionHandlers, connID string, sub *subscription, en *eventNotifier, cel *changeEventListener, txHelper txcommon.Helper) *eventDispatcher { ctx, cancelCtx := context.WithCancel(ctx) readAhead := config.GetUint(config.SubscriptionDefaultsReadAhead) if sub.definition.Options.ReadAhead != nil { @@ -93,6 +94,7 @@ func newEventDispatcher(ctx context.Context, ei events.Plugin, di database.Plugi acksNacks: make(chan ackNack), closed: make(chan struct{}), cel: cel, + txHelper: txHelper, } pollerConf := &eventPollerConf{ @@ -149,7 +151,8 @@ func (ed *eventDispatcher) electAndStart() { <-ed.eventPoller.closed } -func (ed *eventDispatcher) getEvents(ctx context.Context, filter database.Filter) ([]fftypes.LocallySequenced, error) { +func (ed *eventDispatcher) getEvents(ctx context.Context, filter database.Filter, offset int64) ([]fftypes.LocallySequenced, error) { + log.L(ctx).Tracef("Reading page of events > %d (first events would be %d)", offset, offset+1) events, _, err := ed.database.GetEvents(ctx, filter) ls := make([]fftypes.LocallySequenced, len(events)) for i, e := range events { @@ -159,42 +162,19 @@ func (ed *eventDispatcher) getEvents(ctx context.Context, filter database.Filter } func (ed *eventDispatcher) enrichEvents(events []fftypes.LocallySequenced) ([]*fftypes.EventDelivery, error) { - // We need all the messages that match event references - refIDs := make([]driver.Value, len(events)) - for i, ls := range events { - e := ls.(*fftypes.Event) - if e.Reference != nil { - refIDs[i] = *e.Reference - } - } - - mfb := database.MessageQueryFactory.NewFilter(ed.ctx) - msgFilter := mfb.And( - mfb.In("id", refIDs), - mfb.Eq("namespace", ed.namespace), - ) - msgs, _, err := ed.database.GetMessages(ed.ctx, msgFilter) - if err != nil { - return nil, err - } - enriched := make([]*fftypes.EventDelivery, len(events)) for i, ls := range events { e := ls.(*fftypes.Event) - enriched[i] = &fftypes.EventDelivery{ - Event: *e, - Subscription: ed.subscription.definition.SubscriptionRef, + enrichedEvent, err := ed.txHelper.EnrichEvent(ed.ctx, e) + if err != nil { + return nil, err } - for _, msg := range msgs { - if *e.Reference == *msg.Header.ID { - enriched[i].Message = msg - break - } + enriched[i] = &fftypes.EventDelivery{ + EnrichedEvent: *enrichedEvent, + Subscription: ed.subscription.definition.SubscriptionRef, } } - return enriched, nil - } func (ed *eventDispatcher) filterEvents(candidates []*fftypes.EventDelivery) []*fftypes.EventDelivery { @@ -204,40 +184,72 @@ func (ed *eventDispatcher) filterEvents(candidates []*fftypes.EventDelivery) []* if filter.eventMatcher != nil && !filter.eventMatcher.MatchString(string(event.Type)) { continue } + msg := event.Message + tx := event.Transaction + be := event.BlockchainEvent tag := "" + topic := event.Topic group := "" author := "" - var topics []string + txType := "" + beName := "" + beListener := "" + if msg != nil { tag = msg.Header.Tag - topics = msg.Header.Topics author = msg.Header.Author if msg.Header.Group != nil { group = msg.Header.Group.String() } } - if filter.tagFilter != nil && !filter.tagFilter.MatchString(tag) { - continue + + if tx != nil { + txType = tx.Type.String() } - if filter.authorFilter != nil && !filter.authorFilter.MatchString(author) { - continue + + if be != nil { + beName = be.Name + beListener = be.Listener.String() } - if filter.topicsFilter != nil { + + if filter.topicFilter != nil { topicsMatch := false - for _, topic := range topics { - if filter.topicsFilter.MatchString(topic) { - topicsMatch = true - break - } + if filter.topicFilter.MatchString(topic) { + topicsMatch = true } if !topicsMatch { continue } } - if filter.groupFilter != nil && !filter.groupFilter.MatchString(group) { - continue + + if filter.messageFilter != nil { + if filter.messageFilter.tagFilter != nil && !filter.messageFilter.tagFilter.MatchString(tag) { + continue + } + if filter.messageFilter.authorFilter != nil && !filter.messageFilter.authorFilter.MatchString(author) { + continue + } + if filter.messageFilter.groupFilter != nil && !filter.messageFilter.groupFilter.MatchString(group) { + continue + } } + + if filter.transactionFilter != nil { + if filter.transactionFilter.typeFilter != nil && !filter.transactionFilter.typeFilter.MatchString(txType) { + continue + } + } + + if filter.blockchainFilter != nil { + if filter.blockchainFilter.nameFilter != nil && !filter.blockchainFilter.nameFilter.MatchString(beName) { + continue + } + if filter.blockchainFilter.listenerFilter != nil && !filter.blockchainFilter.listenerFilter.MatchString(beListener) { + continue + } + } + matchingEvents = append(matchingEvents, event) } return matchingEvents @@ -308,19 +320,13 @@ func (ed *eventDispatcher) bufferedDelivery(events []fftypes.LocallySequenced) ( nacks++ ed.handleNackOffsetUpdate(an) } else if nacks == 0 { - err := ed.handleAckOffsetUpdate(an) - if err != nil { - return false, err - } + ed.handleAckOffsetUpdate(an) lastAck = an.offset } } } if nacks == 0 && lastAck != highestOffset { - err := ed.eventPoller.commitOffset(ed.ctx, highestOffset) - if err != nil { - return false, err - } + ed.eventPoller.commitOffset(highestOffset) } return true, nil // poll again straight away for more messages } @@ -333,12 +339,12 @@ func (ed *eventDispatcher) handleNackOffsetUpdate(nack ackNack) { // That means resetting the polling offest, and clearing out all our state delete(ed.inflight, nack.id) if ed.eventPoller.pollingOffset > nack.offset { - ed.eventPoller.rewindPollingOffset(nack.offset) + ed.eventPoller.rewindPollingOffset(nack.offset - 1) } ed.inflight = map[fftypes.UUID]*fftypes.Event{} } -func (ed *eventDispatcher) handleAckOffsetUpdate(ack ackNack) error { +func (ed *eventDispatcher) handleAckOffsetUpdate(ack ackNack) { oldOffset := ed.eventPoller.getPollingOffset() ed.mux.Lock() delete(ed.inflight, ack.id) @@ -351,9 +357,8 @@ func (ed *eventDispatcher) handleAckOffsetUpdate(ack ackNack) error { ed.mux.Unlock() if (lowestInflight == -1 || lowestInflight > ack.offset) && ack.offset > oldOffset { // This was the lowest in flight, and we can move the offset forwards - return ed.eventPoller.commitOffset(ed.ctx, ack.offset) + ed.eventPoller.commitOffset(ack.offset) } - return nil } func (ed *eventDispatcher) dispatchChangeEvent(ce *fftypes.ChangeEvent) { @@ -382,7 +387,7 @@ func (ed *eventDispatcher) deliverEvents() { var data []*fftypes.Data var err error if withData && event.Message != nil { - data, _, err = ed.data.GetMessageData(ed.ctx, event.Message, true) + data, _, err = ed.data.GetMessageDataCached(ed.ctx, event.Message) } if err == nil { err = ed.transport.DeliveryRequest(ed.connID, ed.subscription.definition, event, data) diff --git a/internal/events/event_dispatcher_test.go b/internal/events/event_dispatcher_test.go index 2b3653ba9a..b4b8677965 100644 --- a/internal/events/event_dispatcher_test.go +++ b/internal/events/event_dispatcher_test.go @@ -24,6 +24,7 @@ import ( "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/definitionsmocks" @@ -42,8 +43,9 @@ func newTestEventDispatcher(sub *subscription) (*eventDispatcher, func()) { mei.On("Name").Return("ut").Maybe() mdm := &datamocks.Manager{} msh := &definitionsmocks.DefinitionHandlers{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) ctx, cancel := context.WithCancel(context.Background()) - return newEventDispatcher(ctx, mei, mdi, mdm, msh, fftypes.NewUUID().String(), sub, newEventNotifier(ctx, "ut"), newChangeEventListener(ctx)), func() { + return newEventDispatcher(ctx, mei, mdi, mdm, msh, fftypes.NewUUID().String(), sub, newEventNotifier(ctx, "ut"), newChangeEventListener(ctx), txHelper), func() { cancel() config.Reset() } @@ -155,8 +157,10 @@ func TestEventDispatcherReadAheadOutOfOrderAcks(t *testing.T) { ed, cancel := newTestEventDispatcher(sub) defer cancel() go ed.deliverEvents() + ed.eventPoller.offsetCommitted = make(chan int64, 3) mdi := ed.database.(*databasemocks.Plugin) mei := ed.transport.(*eventsmocks.PluginAll) + mdm := ed.data.(*datamocks.Manager) eventDeliveries := make(chan *fftypes.EventDelivery) deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -174,27 +178,19 @@ func TestEventDispatcherReadAheadOutOfOrderAcks(t *testing.T) { ref4 := fftypes.NewUUID() ev4 := fftypes.NewUUID() - // Capture offset commits - offsetUpdates := make(chan int64) - uof := mdi.On("UpdateOffset", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) - uof.RunFn = func(a mock.Arguments) { - f, err := a.Get(2).(database.Update).Finalize() - assert.NoError(t, err) - v, _ := f.SetOperations[0].Value.Value() - offsetUpdates <- v.(int64) - } // Setup enrichment - mdi.On("GetMessages", mock.Anything, mock.MatchedBy(func(filter database.Filter) bool { - fi, err := filter.Finalize() - assert.NoError(t, err) - assert.Equal(t, fmt.Sprintf(`( id IN ['%s','%s','%s','%s'] ) && ( namespace == 'ns1' )`, ref1, ref2, ref3, ref4), fi.String()) - return true - })).Return([]*fftypes.Message{ - {Header: fftypes.MessageHeader{ID: ref1}}, - {Header: fftypes.MessageHeader{ID: ref2}}, - {Header: fftypes.MessageHeader{ID: ref3}}, - {Header: fftypes.MessageHeader{ID: ref4}}, - }, nil, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: ref1}, + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref2).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: ref2}, + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref3).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: ref3}, + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref4).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: ref4}, + }, nil, true, nil) // Deliver a batch of messages batch1Done := make(chan struct{}) @@ -230,15 +226,16 @@ func TestEventDispatcherReadAheadOutOfOrderAcks(t *testing.T) { // Confirm we get the offset updates in the correct order, even though the confirmations // came in a different order from the app. - assert.Equal(t, int64(10000001), <-offsetUpdates) - assert.Equal(t, int64(10000003), <-offsetUpdates) - assert.Equal(t, int64(10000004), <-offsetUpdates) + assert.Equal(t, int64(10000001), <-ed.eventPoller.offsetCommitted) + assert.Equal(t, int64(10000003), <-ed.eventPoller.offsetCommitted) + assert.Equal(t, int64(10000004), <-ed.eventPoller.offsetCommitted) // This should complete the batch <-batch1Done mdi.AssertExpectations(t) mei.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestEventDispatcherNoReadAheadInOrder(t *testing.T) { @@ -257,6 +254,7 @@ func TestEventDispatcherNoReadAheadInOrder(t *testing.T) { go ed.deliverEvents() mdi := ed.database.(*databasemocks.Plugin) + mdm := ed.data.(*datamocks.Manager) mei := ed.transport.(*eventsmocks.PluginAll) eventDeliveries := make(chan *fftypes.EventDelivery) @@ -276,12 +274,18 @@ func TestEventDispatcherNoReadAheadInOrder(t *testing.T) { ev4 := fftypes.NewUUID() // Setup enrichment - mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{ - {Header: fftypes.MessageHeader{ID: ref1}}, - {Header: fftypes.MessageHeader{ID: ref2}}, - {Header: fftypes.MessageHeader{ID: ref3}}, - {Header: fftypes.MessageHeader{ID: ref4}}, - }, nil, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: ref1}, + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref2).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: ref2}, + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref3).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: ref3}, + }, nil, true, nil) + mdm.On("GetMessageWithDataCached", mock.Anything, ref4).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: ref4}, + }, nil, true, nil) // Deliver a batch of messages batch1Done := make(chan struct{}) @@ -322,6 +326,7 @@ func TestEventDispatcherNoReadAheadInOrder(t *testing.T) { mdi.AssertExpectations(t) mei.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestEventDispatcherChangeEvents(t *testing.T) { @@ -398,16 +403,33 @@ func TestEnrichEventsFailGetMessages(t *testing.T) { ed, cancel := newTestEventDispatcher(sub) defer cancel() + mdm := ed.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(nil, nil, false, fmt.Errorf("pop")) + + id1 := fftypes.NewUUID() + _, err := ed.enrichEvents([]fftypes.LocallySequenced{&fftypes.Event{ID: id1, Type: fftypes.EventTypeMessageConfirmed}}) + + assert.EqualError(t, err, "pop") +} + +func TestEnrichEventsFailGetTransactions(t *testing.T) { + + sub := &subscription{ + definition: &fftypes.Subscription{}, + } + ed, cancel := newTestEventDispatcher(sub) + defer cancel() + mdi := ed.database.(*databasemocks.Plugin) - mdi.On("GetMessages", mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + mdi.On("GetTransactionByID", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) id1 := fftypes.NewUUID() - _, err := ed.enrichEvents([]fftypes.LocallySequenced{&fftypes.Event{ID: id1}}) + _, err := ed.enrichEvents([]fftypes.LocallySequenced{&fftypes.Event{ID: id1, Type: fftypes.EventTypeTransactionSubmitted}}) assert.EqualError(t, err, "pop") } -func TestFilterEventsMatch(t *testing.T) { +func TestEnrichEventsFailGetBlockchainEvents(t *testing.T) { sub := &subscription{ definition: &fftypes.Subscription{}, @@ -415,68 +437,134 @@ func TestFilterEventsMatch(t *testing.T) { ed, cancel := newTestEventDispatcher(sub) defer cancel() + mdi := ed.database.(*databasemocks.Plugin) + mdi.On("GetBlockchainEventByID", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + + id1 := fftypes.NewUUID() + _, err := ed.enrichEvents([]fftypes.LocallySequenced{&fftypes.Event{ID: id1, Type: fftypes.EventTypeBlockchainEventReceived}}) + + assert.EqualError(t, err, "pop") +} + +func TestFilterEventsMatch(t *testing.T) { + + sub := &subscription{ + definition: &fftypes.Subscription{}, + messageFilter: &messageFilter{}, + transactionFilter: &transactionFilter{}, + blockchainFilter: &blockchainFilter{}, + } + ed, cancel := newTestEventDispatcher(sub) + defer cancel() + gid1 := fftypes.NewRandB32() id1 := fftypes.NewUUID() id2 := fftypes.NewUUID() id3 := fftypes.NewUUID() + id4 := fftypes.NewUUID() + id5 := fftypes.NewUUID() + id6 := fftypes.NewUUID() + lid := fftypes.NewUUID() events := ed.filterEvents([]*fftypes.EventDelivery{ { - Event: fftypes.Event{ - ID: id1, - Type: fftypes.EventTypeMessageConfirmed, - }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - Topics: fftypes.FFStringArray{"topic1"}, - Tag: "tag1", - Group: nil, - SignerRef: fftypes.SignerRef{ - Author: "signingOrg", - Key: "0x12345", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: id1, + Type: fftypes.EventTypeMessageConfirmed, + Topic: "topic1", + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + Topics: fftypes.FFStringArray{"topic1"}, + Tag: "tag1", + Group: nil, + SignerRef: fftypes.SignerRef{ + Author: "signingOrg", + Key: "0x12345", + }, }, }, }, }, { - Event: fftypes.Event{ - ID: id2, - Type: fftypes.EventTypeMessageConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: id2, + Type: fftypes.EventTypeMessageConfirmed, + Topic: "topic1", + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + Topics: fftypes.FFStringArray{"topic1"}, + Tag: "tag2", + Group: gid1, + SignerRef: fftypes.SignerRef{ + Author: "org2", + Key: "0x23456", + }, + }, + }, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - Topics: fftypes.FFStringArray{"topic1"}, - Tag: "tag2", - Group: gid1, - SignerRef: fftypes.SignerRef{ - Author: "org2", - Key: "0x23456", + }, + { + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: id3, + Type: fftypes.EventTypeMessageRejected, + Topic: "topic2", + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + Topics: fftypes.FFStringArray{"topic2"}, + Tag: "tag1", + Group: nil, + SignerRef: fftypes.SignerRef{ + Author: "signingOrg", + Key: "0x12345", + }, }, }, }, }, { - Event: fftypes.Event{ - ID: id3, - Type: fftypes.EventTypeMessageRejected, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: id4, + Type: fftypes.EventTypeBlockchainEventReceived, + }, + BlockchainEvent: &fftypes.BlockchainEvent{ + Name: "flapflip", + }, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - Topics: fftypes.FFStringArray{"topic2"}, - Tag: "tag1", - Group: nil, - SignerRef: fftypes.SignerRef{ - Author: "signingOrg", - Key: "0x12345", - }, + }, + { + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: id5, + Type: fftypes.EventTypeTransactionSubmitted, + }, + Transaction: &fftypes.Transaction{ + Type: fftypes.TransactionTypeBatchPin, + }, + }, + }, + { + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: id6, + Type: fftypes.EventTypeBlockchainEventReceived, + }, + BlockchainEvent: &fftypes.BlockchainEvent{ + Listener: lid, }, }, }, }) ed.subscription.eventMatcher = regexp.MustCompile(fmt.Sprintf("^%s$", fftypes.EventTypeMessageConfirmed)) - ed.subscription.topicsFilter = regexp.MustCompile(".*") - ed.subscription.tagFilter = regexp.MustCompile(".*") - ed.subscription.groupFilter = regexp.MustCompile(".*") + ed.subscription.topicFilter = regexp.MustCompile(".*") + ed.subscription.messageFilter.tagFilter = regexp.MustCompile(".*") + ed.subscription.messageFilter.groupFilter = regexp.MustCompile(".*") matched := ed.filterEvents(events) assert.Equal(t, 2, len(matched)) assert.Equal(t, *id1, *matched[0].ID) @@ -484,52 +572,255 @@ func TestFilterEventsMatch(t *testing.T) { // id three has the wrong event type ed.subscription.eventMatcher = nil - ed.subscription.topicsFilter = nil - ed.subscription.tagFilter = nil - ed.subscription.groupFilter = nil + ed.subscription.topicFilter = nil + ed.subscription.messageFilter.tagFilter = nil + ed.subscription.messageFilter.groupFilter = nil matched = ed.filterEvents(events) - assert.Equal(t, 3, len(matched)) + assert.Equal(t, 6, len(matched)) assert.Equal(t, *id1, *matched[0].ID) assert.Equal(t, *id2, *matched[1].ID) assert.Equal(t, *id3, *matched[2].ID) + assert.Equal(t, *id4, *matched[3].ID) + assert.Equal(t, *id5, *matched[4].ID) - ed.subscription.topicsFilter = regexp.MustCompile("topic1") + ed.subscription.topicFilter = regexp.MustCompile("topic1") matched = ed.filterEvents(events) assert.Equal(t, 2, len(matched)) assert.Equal(t, *id1, *matched[0].ID) assert.Equal(t, *id2, *matched[1].ID) - ed.subscription.topicsFilter = nil - ed.subscription.tagFilter = regexp.MustCompile("tag2") + ed.subscription.topicFilter = nil + ed.subscription.messageFilter.tagFilter = regexp.MustCompile("tag2") matched = ed.filterEvents(events) assert.Equal(t, 1, len(matched)) assert.Equal(t, *id2, *matched[0].ID) - ed.subscription.topicsFilter = nil - ed.subscription.authorFilter = nil - ed.subscription.groupFilter = regexp.MustCompile(gid1.String()) + ed.subscription.topicFilter = nil + ed.subscription.messageFilter.authorFilter = nil + ed.subscription.messageFilter.groupFilter = regexp.MustCompile(gid1.String()) matched = ed.filterEvents(events) assert.Equal(t, 1, len(matched)) assert.Equal(t, *id2, *matched[0].ID) - ed.subscription.groupFilter = regexp.MustCompile("^$") + ed.subscription.messageFilter.groupFilter = regexp.MustCompile("^$") matched = ed.filterEvents(events) assert.Equal(t, 0, len(matched)) - ed.subscription.groupFilter = nil - ed.subscription.topicsFilter = nil - ed.subscription.tagFilter = nil - ed.subscription.authorFilter = regexp.MustCompile("org2") + ed.subscription.messageFilter.groupFilter = nil + ed.subscription.topicFilter = nil + ed.subscription.messageFilter.tagFilter = nil + ed.subscription.messageFilter.authorFilter = regexp.MustCompile("org2") matched = ed.filterEvents(events) assert.Equal(t, 1, len(matched)) assert.Equal(t, *id2, *matched[0].ID) + ed.subscription.messageFilter = nil + ed.subscription.transactionFilter.typeFilter = regexp.MustCompile(fmt.Sprintf("^%s$", fftypes.TransactionTypeBatchPin)) + matched = ed.filterEvents(events) + assert.Equal(t, 1, len(matched)) + assert.Equal(t, *id5, *matched[0].ID) + + ed.subscription.messageFilter = nil + ed.subscription.transactionFilter = nil + ed.subscription.blockchainFilter.nameFilter = regexp.MustCompile("flapflip") + matched = ed.filterEvents(events) + assert.Equal(t, 1, len(matched)) + assert.Equal(t, *id4, *matched[0].ID) + + ed.subscription.messageFilter = nil + ed.subscription.transactionFilter = nil + ed.subscription.blockchainFilter.nameFilter = nil + ed.subscription.blockchainFilter.listenerFilter = regexp.MustCompile(lid.String()) + matched = ed.filterEvents(events) + assert.Equal(t, 1, len(matched)) + assert.Equal(t, *id6, *matched[0].ID) +} + +func TestEnrichTransactionEvents(t *testing.T) { + log.SetLevel("debug") + sub := &subscription{ + dispatcherElection: make(chan bool, 1), + definition: &fftypes.Subscription{ + SubscriptionRef: fftypes.SubscriptionRef{ID: fftypes.NewUUID(), Namespace: "ns1", Name: "sub1"}, + Ephemeral: true, + Options: fftypes.SubscriptionOptions{}, + }, + } + + ed, cancel := newTestEventDispatcher(sub) + defer cancel() + go ed.deliverEvents() + + mdi := ed.database.(*databasemocks.Plugin) + mei := ed.transport.(*eventsmocks.PluginAll) + + eventDeliveries := make(chan *fftypes.EventDelivery) + deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + deliveryRequestMock.RunFn = func(a mock.Arguments) { + eventDeliveries <- a.Get(2).(*fftypes.EventDelivery) + } + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + ref2 := fftypes.NewUUID() + ev2 := fftypes.NewUUID() + ref3 := fftypes.NewUUID() + ev3 := fftypes.NewUUID() + ref4 := fftypes.NewUUID() + ev4 := fftypes.NewUUID() + + // Setup enrichment + mdi.On("GetTransactionByID", mock.Anything, ref1).Return(&fftypes.Transaction{ + ID: ref1, + }, nil) + mdi.On("GetTransactionByID", mock.Anything, ref2).Return(&fftypes.Transaction{ + ID: ref2, + }, nil) + mdi.On("GetTransactionByID", mock.Anything, ref3).Return(&fftypes.Transaction{ + ID: ref3, + }, nil) + mdi.On("GetTransactionByID", mock.Anything, ref4).Return(&fftypes.Transaction{ + ID: ref4, + }, nil) + + // Deliver a batch of messages + batch1Done := make(chan struct{}) + go func() { + repoll, err := ed.bufferedDelivery([]fftypes.LocallySequenced{ + &fftypes.Event{ID: ev1, Sequence: 10000001, Reference: ref1, Type: fftypes.EventTypeTransactionSubmitted}, // match + &fftypes.Event{ID: ev2, Sequence: 10000002, Reference: ref2, Type: fftypes.EventTypeTransactionSubmitted}, // match + &fftypes.Event{ID: ev3, Sequence: 10000003, Reference: ref3, Type: fftypes.EventTypeTransactionSubmitted}, // match + &fftypes.Event{ID: ev4, Sequence: 10000004, Reference: ref4, Type: fftypes.EventTypeTransactionSubmitted}, // match + }) + assert.NoError(t, err) + assert.True(t, repoll) + close(batch1Done) + }() + + // Wait for the two calls to deliver the matching messages to the client (read ahead allows this) + event1 := <-eventDeliveries + assert.Equal(t, *ev1, *event1.ID) + assert.Equal(t, *ref1, *event1.Transaction.ID) + select { + case <-eventDeliveries: + assert.Fail(t, "should not have read ahead") + default: + } + ed.deliveryResponse(&fftypes.EventDeliveryResponse{ID: event1.ID}) + + event2 := <-eventDeliveries + ed.deliveryResponse(&fftypes.EventDeliveryResponse{ID: event2.ID}) + + event3 := <-eventDeliveries + ed.deliveryResponse(&fftypes.EventDeliveryResponse{ID: event3.ID}) + + event4 := <-eventDeliveries + ed.deliveryResponse(&fftypes.EventDeliveryResponse{ID: event4.ID}) + + // This should complete the batch + <-batch1Done + + mdi.AssertExpectations(t) + mei.AssertExpectations(t) +} + +func TestEnrichBlockchainEventEvents(t *testing.T) { + log.SetLevel("debug") + sub := &subscription{ + dispatcherElection: make(chan bool, 1), + definition: &fftypes.Subscription{ + SubscriptionRef: fftypes.SubscriptionRef{ID: fftypes.NewUUID(), Namespace: "ns1", Name: "sub1"}, + Ephemeral: true, + Options: fftypes.SubscriptionOptions{}, + }, + } + + ed, cancel := newTestEventDispatcher(sub) + defer cancel() + go ed.deliverEvents() + + mdi := ed.database.(*databasemocks.Plugin) + mei := ed.transport.(*eventsmocks.PluginAll) + + eventDeliveries := make(chan *fftypes.EventDelivery) + deliveryRequestMock := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + deliveryRequestMock.RunFn = func(a mock.Arguments) { + eventDeliveries <- a.Get(2).(*fftypes.EventDelivery) + } + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + ref2 := fftypes.NewUUID() + ev2 := fftypes.NewUUID() + ref3 := fftypes.NewUUID() + ev3 := fftypes.NewUUID() + ref4 := fftypes.NewUUID() + ev4 := fftypes.NewUUID() + + // Setup enrichment + mdi.On("GetBlockchainEventByID", mock.Anything, ref1).Return(&fftypes.BlockchainEvent{ + ID: ref1, + }, nil) + mdi.On("GetBlockchainEventByID", mock.Anything, ref2).Return(&fftypes.BlockchainEvent{ + ID: ref2, + }, nil) + mdi.On("GetBlockchainEventByID", mock.Anything, ref3).Return(&fftypes.BlockchainEvent{ + ID: ref3, + }, nil) + mdi.On("GetBlockchainEventByID", mock.Anything, ref4).Return(&fftypes.BlockchainEvent{ + ID: ref4, + }, nil) + + // Deliver a batch of messages + batch1Done := make(chan struct{}) + go func() { + repoll, err := ed.bufferedDelivery([]fftypes.LocallySequenced{ + &fftypes.Event{ID: ev1, Sequence: 10000001, Reference: ref1, Type: fftypes.EventTypeBlockchainEventReceived}, // match + &fftypes.Event{ID: ev2, Sequence: 10000002, Reference: ref2, Type: fftypes.EventTypeBlockchainEventReceived}, // match + &fftypes.Event{ID: ev3, Sequence: 10000003, Reference: ref3, Type: fftypes.EventTypeBlockchainEventReceived}, // match + &fftypes.Event{ID: ev4, Sequence: 10000004, Reference: ref4, Type: fftypes.EventTypeBlockchainEventReceived}, // match + }) + assert.NoError(t, err) + assert.True(t, repoll) + close(batch1Done) + }() + + // Wait for the two calls to deliver the matching messages to the client (read ahead allows this) + event1 := <-eventDeliveries + assert.Equal(t, *ev1, *event1.ID) + assert.Equal(t, *ref1, *event1.BlockchainEvent.ID) + select { + case <-eventDeliveries: + assert.Fail(t, "should not have read ahead") + default: + } + ed.deliveryResponse(&fftypes.EventDeliveryResponse{ID: event1.ID}) + + event2 := <-eventDeliveries + ed.deliveryResponse(&fftypes.EventDeliveryResponse{ID: event2.ID}) + + event3 := <-eventDeliveries + ed.deliveryResponse(&fftypes.EventDeliveryResponse{ID: event3.ID}) + + event4 := <-eventDeliveries + ed.deliveryResponse(&fftypes.EventDeliveryResponse{ID: event4.ID}) + + // This should complete the batch + <-batch1Done + + mdi.AssertExpectations(t) + mei.AssertExpectations(t) } func TestBufferedDeliveryNoEvents(t *testing.T) { sub := &subscription{ - definition: &fftypes.Subscription{}, + definition: &fftypes.Subscription{}, + messageFilter: &messageFilter{}, + transactionFilter: &transactionFilter{}, + blockchainFilter: &blockchainFilter{}, } ed, cancel := newTestEventDispatcher(sub) defer cancel() @@ -543,15 +834,18 @@ func TestBufferedDeliveryNoEvents(t *testing.T) { func TestBufferedDeliveryEnrichFail(t *testing.T) { sub := &subscription{ - definition: &fftypes.Subscription{}, + definition: &fftypes.Subscription{}, + messageFilter: &messageFilter{}, + transactionFilter: &transactionFilter{}, + blockchainFilter: &blockchainFilter{}, } ed, cancel := newTestEventDispatcher(sub) defer cancel() - mdi := ed.database.(*databasemocks.Plugin) - mdi.On("GetMessages", mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + mdm := ed.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", mock.Anything, mock.Anything).Return(nil, nil, false, fmt.Errorf("pop")) - repoll, err := ed.bufferedDelivery([]fftypes.LocallySequenced{&fftypes.Event{ID: fftypes.NewUUID()}}) + repoll, err := ed.bufferedDelivery([]fftypes.LocallySequenced{&fftypes.Event{ID: fftypes.NewUUID(), Type: fftypes.EventTypeMessageConfirmed}}) assert.False(t, repoll) assert.EqualError(t, err, "pop") @@ -560,7 +854,10 @@ func TestBufferedDeliveryEnrichFail(t *testing.T) { func TestBufferedDeliveryClosedContext(t *testing.T) { sub := &subscription{ - definition: &fftypes.Subscription{}, + messageFilter: &messageFilter{}, + transactionFilter: &transactionFilter{}, + blockchainFilter: &blockchainFilter{}, + definition: &fftypes.Subscription{}, } ed, cancel := newTestEventDispatcher(sub) go ed.deliverEvents() @@ -568,7 +865,6 @@ func TestBufferedDeliveryClosedContext(t *testing.T) { mdi := ed.database.(*databasemocks.Plugin) mei := ed.transport.(*eventsmocks.PluginAll) - mdi.On("GetMessages", mock.Anything, mock.Anything).Return(nil, nil, nil) mdi.On("GetDataRefs", mock.Anything, mock.Anything).Return(nil, nil, nil) mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -589,7 +885,6 @@ func TestBufferedDeliveryNackRewind(t *testing.T) { mdi := ed.database.(*databasemocks.Plugin) mei := ed.transport.(*eventsmocks.PluginAll) - mdi.On("GetMessages", mock.Anything, mock.Anything).Return(nil, nil, nil) mdi.On("GetDataRefs", mock.Anything, mock.Anything).Return(nil, nil, nil) mdi.On("UpdateOffset", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -616,61 +911,17 @@ func TestBufferedDeliveryNackRewind(t *testing.T) { }) <-bdDone - assert.Equal(t, int64(100001), ed.eventPoller.pollingOffset) -} - -func TestBufferedDeliveryAckFail(t *testing.T) { - - sub := &subscription{ - definition: &fftypes.Subscription{}, - } - ed, cancel := newTestEventDispatcher(sub) - defer cancel() - go ed.deliverEvents() - ed.readAhead = 50 - - mdi := ed.database.(*databasemocks.Plugin) - mei := ed.transport.(*eventsmocks.PluginAll) - mdi.On("GetMessages", mock.Anything, mock.Anything).Return(nil, nil, nil) - mdi.On("GetDataRefs", mock.Anything, mock.Anything).Return(nil, nil, nil) - mdi.On("UpdateOffset", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - - delivered := make(chan bool) - deliver := mei.On("DeliveryRequest", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) - deliver.RunFn = func(a mock.Arguments) { - delivered <- true - } - - bdDone := make(chan struct{}) - ev1 := fftypes.NewUUID() - ev2 := fftypes.NewUUID() - ed.eventPoller.pollingOffset = 100000 - go func() { - repoll, err := ed.bufferedDelivery([]fftypes.LocallySequenced{ - &fftypes.Event{ID: ev1, Sequence: 100001}, - &fftypes.Event{ID: ev2, Sequence: 100002}, - }) - assert.EqualError(t, err, "pop") - assert.False(t, repoll) - close(bdDone) - }() - - <-delivered - <-delivered - ed.deliveryResponse(&fftypes.EventDeliveryResponse{ - ID: ev1, - }) - - <-bdDone - assert.Equal(t, int64(100001), ed.eventPoller.pollingOffset) - + assert.Equal(t, int64(100000), ed.eventPoller.pollingOffset) } func TestBufferedDeliveryFailNack(t *testing.T) { log.SetLevel("trace") sub := &subscription{ - definition: &fftypes.Subscription{}, + definition: &fftypes.Subscription{}, + messageFilter: &messageFilter{}, + transactionFilter: &transactionFilter{}, + blockchainFilter: &blockchainFilter{}, } ed, cancel := newTestEventDispatcher(sub) defer cancel() @@ -679,7 +930,6 @@ func TestBufferedDeliveryFailNack(t *testing.T) { mdi := ed.database.(*databasemocks.Plugin) mei := ed.transport.(*eventsmocks.PluginAll) - mdi.On("GetMessages", mock.Anything, mock.Anything).Return(nil, nil, nil) mdi.On("GetDataRefs", mock.Anything, mock.Anything).Return(nil, nil, nil) mdi.On("UpdateOffset", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) @@ -711,36 +961,6 @@ func TestBufferedDeliveryFailNack(t *testing.T) { } -func TestBufferedFinalAckFail(t *testing.T) { - - sub := &subscription{ - definition: &fftypes.Subscription{}, - topicsFilter: regexp.MustCompile("never matches"), - } - ed, cancel := newTestEventDispatcher(sub) - defer cancel() - go ed.deliverEvents() - ed.readAhead = 50 - - mdi := ed.database.(*databasemocks.Plugin) - mdi.On("GetMessages", mock.Anything, mock.Anything).Return(nil, nil, nil) - mdi.On("GetDataRefs", mock.Anything, mock.Anything).Return(nil, nil, nil) - mdi.On("UpdateOffset", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - - ev1 := fftypes.NewUUID() - ev2 := fftypes.NewUUID() - ed.eventPoller.pollingOffset = 100000 - repoll, err := ed.bufferedDelivery([]fftypes.LocallySequenced{ - &fftypes.Event{ID: ev1, Sequence: 100001}, - &fftypes.Event{ID: ev2, Sequence: 100002}, - }) - assert.EqualError(t, err, "pop") - assert.False(t, repoll) - - assert.Equal(t, int64(100002), ed.eventPoller.pollingOffset) - -} - func TestAckNotInFlightNoop(t *testing.T) { sub := &subscription{ @@ -794,7 +1014,7 @@ func TestGetEvents(t *testing.T) { {Sequence: 12345}, }, nil, nil) - lc, err := ed.getEvents(ag.ctx, database.EventQueryFactory.NewFilter(ag.ctx).Gte("sequence", 12345)) + lc, err := ed.getEvents(ag.ctx, database.EventQueryFactory.NewFilter(ag.ctx).Gte("sequence", 12345), 12345) assert.NoError(t, err) assert.Equal(t, int64(12345), lc[0].LocalSequence()) } @@ -815,19 +1035,21 @@ func TestDeliverEventsWithDataFail(t *testing.T) { defer cancel() mdm := ed.data.(*datamocks.Manager) - mdm.On("GetMessageData", ed.ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + mdm.On("GetMessageDataCached", ed.ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) id1 := fftypes.NewUUID() ed.eventDelivery <- &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: id1, - }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: id1, }, - Data: fftypes.DataRefs{ - {ID: fftypes.NewUUID()}, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + }, + Data: fftypes.DataRefs{ + {ID: fftypes.NewUUID()}, + }, }, }, } diff --git a/internal/events/event_manager.go b/internal/events/event_manager.go index 605ab03147..c4992bf074 100644 --- a/internal/events/event_manager.go +++ b/internal/events/event_manager.go @@ -21,6 +21,7 @@ import ( "context" "encoding/json" "strconv" + "time" "github.com/hyperledger/firefly/internal/assets" "github.com/hyperledger/firefly/internal/broadcast" @@ -35,6 +36,7 @@ import ( "github.com/hyperledger/firefly/internal/metrics" "github.com/hyperledger/firefly/internal/privatemessaging" "github.com/hyperledger/firefly/internal/retry" + "github.com/hyperledger/firefly/internal/shareddownload" "github.com/hyperledger/firefly/internal/sysmessaging" "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/blockchain" @@ -43,6 +45,7 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" "github.com/hyperledger/firefly/pkg/sharedstorage" "github.com/hyperledger/firefly/pkg/tokens" + "github.com/karlseguin/ccache" ) type EventManager interface { @@ -64,9 +67,13 @@ type EventManager interface { // Bound dataexchange callbacks TransferResult(dx dataexchange.Plugin, trackingID string, status fftypes.OpStatus, update fftypes.TransportStatusUpdate) error - BLOBReceived(dx dataexchange.Plugin, peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error + PrivateBLOBReceived(dx dataexchange.Plugin, peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error MessageReceived(dx dataexchange.Plugin, peerID string, data []byte) (manifest string, err error) + // Bound sharedstorage callbacks + SharedStorageBatchDownloaded(ss sharedstorage.Plugin, ns, payloadRef string, data []byte) (*fftypes.UUID, error) + SharedStorageBLOBDownloaded(ss sharedstorage.Plugin, hash fftypes.Bytes32, size int64, payloadRef string) error + // Bound token callbacks TokenPoolCreated(ti tokens.Plugin, pool *tokens.TokenPool) error TokensTransferred(ti tokens.Plugin, transfer *tokens.TokenTransfer) error @@ -77,63 +84,69 @@ type EventManager interface { } type eventManager struct { - ctx context.Context - ni sysmessaging.LocalNodeInfo - sharedstorage sharedstorage.Plugin - database database.Plugin - txHelper txcommon.Helper - identity identity.Manager - definitions definitions.DefinitionHandlers - data data.Manager - subManager *subscriptionManager - retry retry.Retry - aggregator *aggregator - broadcast broadcast.Manager - messaging privatemessaging.Manager - assets assets.Manager - newEventNotifier *eventNotifier - newPinNotifier *eventNotifier - opCorrelationRetries int - defaultTransport string - internalEvents *system.Events - metrics metrics.Manager + ctx context.Context + ni sysmessaging.LocalNodeInfo + sharedstorage sharedstorage.Plugin + database database.Plugin + txHelper txcommon.Helper + identity identity.Manager + definitions definitions.DefinitionHandlers + data data.Manager + subManager *subscriptionManager + retry retry.Retry + aggregator *aggregator + broadcast broadcast.Manager + messaging privatemessaging.Manager + assets assets.Manager + sharedDownload shareddownload.Manager + newEventNotifier *eventNotifier + newPinNotifier *eventNotifier + opCorrelationRetries int + defaultTransport string + internalEvents *system.Events + metrics metrics.Manager + chainListenerCache *ccache.Cache + chainListenerCacheTTL time.Duration } -func NewEventManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, si sharedstorage.Plugin, di database.Plugin, bi blockchain.Plugin, im identity.Manager, dh definitions.DefinitionHandlers, dm data.Manager, bm broadcast.Manager, pm privatemessaging.Manager, am assets.Manager, mm metrics.Manager) (EventManager, error) { +func NewEventManager(ctx context.Context, ni sysmessaging.LocalNodeInfo, si sharedstorage.Plugin, di database.Plugin, bi blockchain.Plugin, im identity.Manager, dh definitions.DefinitionHandlers, dm data.Manager, bm broadcast.Manager, pm privatemessaging.Manager, am assets.Manager, sd shareddownload.Manager, mm metrics.Manager, txHelper txcommon.Helper) (EventManager, error) { if ni == nil || si == nil || di == nil || bi == nil || im == nil || dh == nil || dm == nil || bm == nil || pm == nil || am == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } newPinNotifier := newEventNotifier(ctx, "pins") newEventNotifier := newEventNotifier(ctx, "events") em := &eventManager{ - ctx: log.WithLogField(ctx, "role", "event-manager"), - ni: ni, - sharedstorage: si, - database: di, - txHelper: txcommon.NewTransactionHelper(di), - identity: im, - definitions: dh, - data: dm, - broadcast: bm, - messaging: pm, - assets: am, + ctx: log.WithLogField(ctx, "role", "event-manager"), + ni: ni, + sharedstorage: si, + database: di, + txHelper: txHelper, + identity: im, + definitions: dh, + data: dm, + broadcast: bm, + messaging: pm, + assets: am, + sharedDownload: sd, retry: retry.Retry{ InitialDelay: config.GetDuration(config.EventAggregatorRetryInitDelay), MaximumDelay: config.GetDuration(config.EventAggregatorRetryMaxDelay), Factor: config.GetFloat64(config.EventAggregatorRetryFactor), }, - defaultTransport: config.GetString(config.EventTransportsDefault), - opCorrelationRetries: config.GetInt(config.EventAggregatorOpCorrelationRetries), - newEventNotifier: newEventNotifier, - newPinNotifier: newPinNotifier, - aggregator: newAggregator(ctx, di, bi, dh, im, dm, newPinNotifier, mm), - metrics: mm, + defaultTransport: config.GetString(config.EventTransportsDefault), + opCorrelationRetries: config.GetInt(config.EventAggregatorOpCorrelationRetries), + newEventNotifier: newEventNotifier, + newPinNotifier: newPinNotifier, + aggregator: newAggregator(ctx, di, bi, dh, im, dm, newPinNotifier, mm), + metrics: mm, + chainListenerCache: ccache.New(ccache.Configure().MaxSize(config.GetByteSize(config.EventListenerTopicCacheSize))), + chainListenerCacheTTL: config.GetDuration(config.EventListenerTopicCacheTTL), } ie, _ := eifactory.GetPlugin(ctx, system.SystemEventsTransport) em.internalEvents = ie.(*system.Events) var err error - if em.subManager, err = newSubscriptionManager(ctx, di, dm, newEventNotifier, dh); err != nil { + if em.subManager, err = newSubscriptionManager(ctx, di, dm, newEventNotifier, dh, txHelper); err != nil { return nil, err } diff --git a/internal/events/event_manager_test.go b/internal/events/event_manager_test.go index 59dfebb00b..4ff3a71529 100644 --- a/internal/events/event_manager_test.go +++ b/internal/events/event_manager_test.go @@ -23,6 +23,7 @@ import ( "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/internal/events/system" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/assetmocks" "github.com/hyperledger/firefly/mocks/blockchainmocks" "github.com/hyperledger/firefly/mocks/broadcastmocks" @@ -33,6 +34,7 @@ import ( "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/metricsmocks" "github.com/hyperledger/firefly/mocks/privatemessagingmocks" + "github.com/hyperledger/firefly/mocks/shareddownloadmocks" "github.com/hyperledger/firefly/mocks/sharedstoragemocks" "github.com/hyperledger/firefly/mocks/sysmessagingmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" @@ -65,7 +67,9 @@ func newTestEventManagerCommon(t *testing.T, metrics bool) (*eventManager, func( mpm := &privatemessagingmocks.Manager{} mam := &assetmocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} + mdd := &shareddownloadmocks.Manager{} mmi := &metricsmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mmi.On("IsMetricsEnabled").Return(metrics) if metrics { mmi.On("TransferConfirmed", mock.Anything) @@ -73,7 +77,7 @@ func newTestEventManagerCommon(t *testing.T, metrics bool) (*eventManager, func( mni.On("GetNodeUUID", mock.Anything).Return(testNodeID).Maybe() met.On("Name").Return("ut").Maybe() mbi.On("VerifierType").Return(fftypes.VerifierTypeEthAddress).Maybe() - emi, err := NewEventManager(ctx, mni, mpi, mdi, mbi, mim, msh, mdm, mbm, mpm, mam, mmi) + emi, err := NewEventManager(ctx, mni, mpi, mdi, mbi, mim, msh, mdm, mbm, mpm, mam, mdd, mmi, txHelper) em := emi.(*eventManager) em.txHelper = &txcommonmocks.Helper{} rag := mdi.On("RunAsGroup", em.ctx, mock.Anything).Maybe() @@ -104,7 +108,7 @@ func TestStartStop(t *testing.T) { } func TestStartStopBadDependencies(t *testing.T) { - _, err := NewEventManager(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + _, err := NewEventManager(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) assert.Regexp(t, "FF10128", err) } @@ -122,9 +126,11 @@ func TestStartStopBadTransports(t *testing.T) { mpm := &privatemessagingmocks.Manager{} mni := &sysmessagingmocks.LocalNodeInfo{} mam := &assetmocks.Manager{} + msd := &shareddownloadmocks.Manager{} mm := &metricsmocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) mbi.On("VerifierType").Return(fftypes.VerifierTypeEthAddress) - _, err := NewEventManager(context.Background(), mni, mpi, mdi, mbi, mim, msh, mdm, mbm, mpm, mam, mm) + _, err := NewEventManager(context.Background(), mni, mpi, mdi, mbi, mim, msh, mdm, mbm, mpm, mam, msd, mm, txHelper) assert.Regexp(t, "FF10172", err) } diff --git a/internal/events/event_poller.go b/internal/events/event_poller.go index 88211b63b7..8dca0e1854 100644 --- a/internal/events/event_poller.go +++ b/internal/events/event_poller.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -29,15 +29,16 @@ import ( ) type eventPoller struct { - ctx context.Context - database database.Plugin - shoulderTaps chan bool - eventNotifier *eventNotifier - closed chan struct{} - offsetID int64 - pollingOffset int64 - mux sync.Mutex - conf *eventPollerConf + ctx context.Context + database database.Plugin + shoulderTaps chan bool + eventNotifier *eventNotifier + closed chan struct{} + offsetCommitted chan int64 + offsetID int64 + pollingOffset int64 + mux sync.Mutex + conf *eventPollerConf } type newEventsHandler func(events []fftypes.LocallySequenced) (bool, error) @@ -50,7 +51,7 @@ type eventPollerConf struct { firstEvent *fftypes.SubOptsFirstEvent queryFactory database.QueryFactory addCriteria func(database.AndFilter) database.AndFilter - getItems func(context.Context, database.Filter) ([]fftypes.LocallySequenced, error) + getItems func(context.Context, database.Filter, int64) ([]fftypes.LocallySequenced, error) maybeRewind func() (bool, int64) newEventsHandler newEventsHandler namespace string @@ -62,12 +63,13 @@ type eventPollerConf struct { func newEventPoller(ctx context.Context, di database.Plugin, en *eventNotifier, conf *eventPollerConf) *eventPoller { ep := &eventPoller{ - ctx: log.WithLogField(ctx, "role", fmt.Sprintf("ep[%s:%s]", conf.namespace, conf.offsetName)), - database: di, - shoulderTaps: make(chan bool, 1), - eventNotifier: en, - closed: make(chan struct{}), - conf: conf, + ctx: log.WithLogField(ctx, "role", fmt.Sprintf("ep[%s:%s]", conf.namespace, conf.offsetName)), + database: di, + shoulderTaps: make(chan bool, 1), + offsetCommitted: make(chan int64, 1), + eventNotifier: en, + closed: make(chan struct{}), + conf: conf, } if ep.conf.maybeRewind == nil { ep.conf.maybeRewind = func() (bool, int64) { return false, -1 } @@ -121,15 +123,17 @@ func (ep *eventPoller) start() { } go ep.newEventNotifications() go ep.eventLoop() + go ep.offsetCommitLoop() } -func (ep *eventPoller) rewindPollingOffset(offset int64) { +func (ep *eventPoller) rewindPollingOffset(offset int64) int64 { log.L(ep.ctx).Infof("Event polling rewind to: %d", offset) ep.mux.Lock() defer ep.mux.Unlock() if offset < ep.pollingOffset { - ep.pollingOffset = offset // this will be re-delivered + ep.pollingOffset = offset } + return ep.pollingOffset } func (ep *eventPoller) getPollingOffset() int64 { @@ -138,21 +142,20 @@ func (ep *eventPoller) getPollingOffset() int64 { return ep.pollingOffset } -func (ep *eventPoller) commitOffset(ctx context.Context, offset int64) error { +func (ep *eventPoller) commitOffset(offset int64) { // Next polling cycle should start one higher than this offset + ep.mux.Lock() ep.pollingOffset = offset + ep.mux.Unlock() - // Must be called from the event polling routine - l := log.L(ctx) // No persistence for ephemeral (non-durable) subscriptions if !ep.conf.ephemeral { - u := database.OffsetQueryFactory.NewUpdate(ep.ctx).Set("current", ep.pollingOffset) - if err := ep.database.UpdateOffset(ctx, ep.offsetID, u); err != nil { - return err + // We do this in the background, as it is an expensive full DB commit + select { + case ep.offsetCommitted <- offset: + default: } } - l.Debugf("Event polling offset committed %d", ep.pollingOffset) - return nil } func (ep *eventPoller) readPage() ([]fftypes.LocallySequenced, error) { @@ -163,7 +166,7 @@ func (ep *eventPoller) readPage() ([]fftypes.LocallySequenced, error) { // a rewind based on it. rewind, pollingOffset := ep.conf.maybeRewind() if rewind { - ep.rewindPollingOffset(pollingOffset) + pollingOffset = ep.rewindPollingOffset(pollingOffset) } else { // Ensure we go through the mutex to pickup rewinds that happened elsewhere pollingOffset = ep.getPollingOffset() @@ -175,7 +178,7 @@ func (ep *eventPoller) readPage() ([]fftypes.LocallySequenced, error) { fb.Gt("sequence", pollingOffset), ) filter = ep.conf.addCriteria(filter) - items, err = ep.conf.getItems(ep.ctx, filter.Sort("sequence").Limit(uint64(ep.conf.eventBatchSize))) + items, err = ep.conf.getItems(ep.ctx, filter.Sort("sequence").Limit(uint64(ep.conf.eventBatchSize)), pollingOffset) if err != nil { return true, err // Retry indefinitely, until context cancelled } @@ -187,7 +190,10 @@ func (ep *eventPoller) readPage() ([]fftypes.LocallySequenced, error) { func (ep *eventPoller) eventLoop() { l := log.L(ep.ctx) l.Debugf("Started event detector") - defer close(ep.closed) + defer func() { + close(ep.closed) + close(ep.offsetCommitted) + }() for { // Read messages from the DB - in an error condition we retry until success, or a closed context @@ -219,6 +225,23 @@ func (ep *eventPoller) eventLoop() { } } +func (ep *eventPoller) offsetCommitLoop() { + l := log.L(ep.ctx) + for range ep.offsetCommitted { + _ = ep.conf.retry.Do(ep.ctx, "process events", func(attempt int) (retry bool, err error) { + ep.mux.Lock() + pollingOffset := ep.pollingOffset + ep.mux.Unlock() + u := database.OffsetQueryFactory.NewUpdate(ep.ctx).Set("current", pollingOffset) + if err := ep.database.UpdateOffset(ep.ctx, ep.offsetID, u); err != nil { + return true, err + } + l.Debugf("Event polling offset committed %d", pollingOffset) + return false, nil + }) + } +} + func (ep *eventPoller) dispatchEventsRetry(events []fftypes.LocallySequenced) (repoll bool, err error) { err = ep.conf.retry.Do(ep.ctx, "process events", func(attempt int) (retry bool, err error) { repoll, err = ep.conf.newEventsHandler(events) diff --git a/internal/events/event_poller_test.go b/internal/events/event_poller_test.go index f6a5b60955..b1b56e1af9 100644 --- a/internal/events/event_poller_test.go +++ b/internal/events/event_poller_test.go @@ -47,7 +47,7 @@ func newTestEventPoller(t *testing.T, mdi *databasemocks.Plugin, neh newEventsHa namespace: "unit", offsetName: "test", queryFactory: database.EventQueryFactory, - getItems: func(c context.Context, f database.Filter) ([]fftypes.LocallySequenced, error) { + getItems: func(c context.Context, f database.Filter, o int64) ([]fftypes.LocallySequenced, error) { events, _, err := mdi.GetEvents(c, f) ls := make([]fftypes.LocallySequenced, len(events)) for i, e := range events { @@ -207,7 +207,7 @@ func TestReadPageSingleCommitEvent(t *testing.T) { return false, nil }, nil) cancel() - ev1 := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, "ns1", fftypes.NewUUID(), nil) + ev1 := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, "ns1", fftypes.NewUUID(), nil, "") mdi.On("GetEvents", mock.Anything, mock.Anything).Return([]*fftypes.Event{ev1}, nil, nil).Once() mdi.On("GetEvents", mock.Anything, mock.Anything).Return([]*fftypes.Event{}, nil, nil) ep.eventLoop() @@ -226,8 +226,9 @@ func TestReadPageRewind(t *testing.T) { }, func() (bool, int64) { return true, 12345 }) + ep.pollingOffset = 23456 cancel() - ev1 := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, "ns1", fftypes.NewUUID(), nil) + ev1 := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, "ns1", fftypes.NewUUID(), nil, "") mdi.On("GetEvents", mock.Anything, mock.MatchedBy(func(filter database.Filter) bool { f, err := filter.Finalize() assert.NoError(t, err) @@ -248,7 +249,7 @@ func TestReadPageProcessEventsRetryExit(t *testing.T) { mdi := &databasemocks.Plugin{} ep, cancel := newTestEventPoller(t, mdi, func(events []fftypes.LocallySequenced) (bool, error) { return false, fmt.Errorf("pop") }, nil) cancel() - ev1 := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, "ns1", fftypes.NewUUID(), nil) + ev1 := fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, "ns1", fftypes.NewUUID(), nil, "") mdi.On("GetEvents", mock.Anything, mock.Anything).Return([]*fftypes.Event{ev1}, nil, nil).Once() ep.eventLoop() @@ -262,7 +263,7 @@ func TestProcessEventsFail(t *testing.T) { }, nil) defer cancel() _, err := ep.conf.newEventsHandler([]fftypes.LocallySequenced{ - fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, "ns1", fftypes.NewUUID(), nil), + fftypes.NewEvent(fftypes.EventTypeMessageConfirmed, "ns1", fftypes.NewUUID(), nil, ""), }) assert.EqualError(t, err, "pop") mdi.AssertExpectations(t) @@ -311,3 +312,41 @@ func TestDoubleTap(t *testing.T) { ep.shoulderTap() ep.shoulderTap() // this should not block } + +func TestDoubleConfirm(t *testing.T) { + mdi := &databasemocks.Plugin{} + ep, cancel := newTestEventPoller(t, mdi, nil, nil) + defer cancel() + ep.commitOffset(12345) + ep.commitOffset(12346) // this should not block +} + +func TestOffsetCommitLoopOk(t *testing.T) { + mdi := &databasemocks.Plugin{} + + ep, cancel := newTestEventPoller(t, mdi, nil, nil) + cancel() + + mdi.On("UpdateOffset", mock.Anything, ep.offsetID, mock.Anything).Return(nil) + + ep.offsetCommitted <- int64(12345) + close(ep.offsetCommitted) + ep.offsetCommitLoop() + + mdi.AssertExpectations(t) +} + +func TestOffsetCommitLoopFail(t *testing.T) { + mdi := &databasemocks.Plugin{} + + ep, cancel := newTestEventPoller(t, mdi, nil, nil) + cancel() + + mdi.On("UpdateOffset", mock.Anything, ep.offsetID, mock.Anything).Return(fmt.Errorf("pop")) + + ep.offsetCommitted <- int64(12345) + close(ep.offsetCommitted) + ep.offsetCommitLoop() + + mdi.AssertExpectations(t) +} diff --git a/internal/events/operation_update.go b/internal/events/operation_update.go index a2be9cecfc..834bacc93b 100644 --- a/internal/events/operation_update.go +++ b/internal/events/operation_update.go @@ -27,7 +27,7 @@ import ( func (em *eventManager) operationUpdateCtx(ctx context.Context, operationID *fftypes.UUID, txState fftypes.OpStatus, blockchainTXID, errorMessage string, opOutput fftypes.JSONObject) error { op, err := em.database.GetOperationByID(ctx, operationID) if err != nil || op == nil { - log.L(em.ctx).Warnf("Operation update '%s' ignored, as it was not submitted by this node", operationID) + log.L(ctx).Warnf("Operation update '%s' ignored, as it was not submitted by this node", operationID) return nil } @@ -37,15 +37,18 @@ func (em *eventManager) operationUpdateCtx(ctx context.Context, operationID *fft // Special handling for OpTypeTokenTransfer, which writes an event when it fails if op.Type == fftypes.OpTypeTokenTransfer && txState == fftypes.OpStatusFailed { - event := fftypes.NewEvent(fftypes.EventTypeTransferOpFailed, op.Namespace, op.ID, op.Transaction) - var tokenTransfer fftypes.TokenTransfer - err = txcommon.RetrieveTokenTransferInputs(ctx, op, &tokenTransfer) - if err != nil { - log.L(em.ctx).Warnf("Could not determine token transfer: %s", err) + tokenTransfer, err := txcommon.RetrieveTokenTransferInputs(ctx, op) + topic := "" + if tokenTransfer != nil { + topic = tokenTransfer.Pool.String() + } + event := fftypes.NewEvent(fftypes.EventTypeTransferOpFailed, op.Namespace, op.ID, op.Transaction, topic) + if err != nil || tokenTransfer.LocalID == nil || tokenTransfer.Type == "" { + log.L(em.ctx).Warnf("Could not parse token transfer: %s", err) } else { event.Correlator = tokenTransfer.LocalID if em.metrics.IsMetricsEnabled() { - em.metrics.TransferConfirmed(&tokenTransfer) + em.metrics.TransferConfirmed(tokenTransfer) } } if err := em.database.InsertEvent(ctx, event); err != nil { @@ -55,11 +58,14 @@ func (em *eventManager) operationUpdateCtx(ctx context.Context, operationID *fft // Special handling for OpTypeTokenApproval, which writes an event when it fails if op.Type == fftypes.OpTypeTokenApproval && txState == fftypes.OpStatusFailed { - event := fftypes.NewEvent(fftypes.EventTypeApprovalOpFailed, op.Namespace, op.ID, op.Transaction) - var tokenApproval fftypes.TokenApproval - err = txcommon.RetrieveTokenApprovalInputs(ctx, op, &tokenApproval) - if err != nil { - log.L(em.ctx).Warnf("Could not determine token retrieval: %s", err) + tokenApproval, err := txcommon.RetrieveTokenApprovalInputs(ctx, op) + topic := "" + if tokenApproval != nil { + topic = tokenApproval.Pool.String() + } + event := fftypes.NewEvent(fftypes.EventTypeApprovalOpFailed, op.Namespace, op.ID, op.Transaction, topic) + if err != nil || tokenApproval.LocalID == nil { + log.L(em.ctx).Warnf("Could not parse token approval: %s", err) } else { event.Correlator = tokenApproval.LocalID } diff --git a/internal/events/operation_update_test.go b/internal/events/operation_update_test.go index 0451d94892..13b62606fe 100644 --- a/internal/events/operation_update_test.go +++ b/internal/events/operation_update_test.go @@ -154,6 +154,7 @@ func TestOperationUpdateTransferFail(t *testing.T) { Transaction: fftypes.NewUUID(), Input: fftypes.JSONObject{ "localId": localID.String(), + "type": "transfer", }, } info := fftypes.JSONObject{"some": "info"} diff --git a/internal/events/persist_batch.go b/internal/events/persist_batch.go index 3357e012d1..b429de995b 100644 --- a/internal/events/persist_batch.go +++ b/internal/events/persist_batch.go @@ -24,25 +24,24 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (em *eventManager) persistBatchFromBroadcast(ctx context.Context /* db TX context*/, batch *fftypes.Batch, onchainHash *fftypes.Bytes32) (valid bool, err error) { - - if !onchainHash.Equals(batch.Hash) { - log.L(ctx).Errorf("Invalid batch '%s'. Hash in batch '%s' does not match transaction hash '%s'", batch.ID, batch.Hash, onchainHash) - return false, nil // This is not retryable. skip this batch - } - - return em.persistBatch(ctx, batch) +type messageAndData struct { + message *fftypes.Message + data fftypes.DataArray } // persistBatch performs very simple validation on each message/data element (hashes) and either persists // or discards them. Errors are returned only in the case of database failures, which should be retried. -func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, batch *fftypes.Batch) (valid bool, err error) { +func (em *eventManager) persistBatch(ctx context.Context, batch *fftypes.Batch) (persistedBatch *fftypes.BatchPersisted, valid bool, err error) { l := log.L(ctx) - now := fftypes.Now() - if batch.ID == nil || batch.Payload.TX.ID == nil { - l.Errorf("Invalid batch '%s'. Missing ID or transaction ID (%v)", batch.ID, batch.Payload.TX.ID) - return false, nil // This is not retryable. skip this batch + if batch.ID == nil || batch.Payload.TX.ID == nil || batch.Hash == nil { + l.Errorf("Invalid batch. Missing ID (%v), transaction ID (%s) or hash (%s)", batch.ID, batch.Payload.TX.ID, batch.Hash) + return nil, false, nil // This is not retryable. skip this batch + } + + if len(batch.Payload.Messages) == 0 || len(batch.Payload.Data) == 0 { + l.Errorf("Invalid batch '%s'. Missing messages (%d) or data (%d)", batch.ID, len(batch.Payload.Messages), len(batch.Payload.Data)) + return nil, false, nil // This is not retryable. skip this batch } switch batch.Payload.TX.Type { @@ -50,135 +49,253 @@ func (em *eventManager) persistBatch(ctx context.Context /* db TX context*/, bat case fftypes.TransactionTypeUnpinned: default: l.Errorf("Invalid batch '%s'. Invalid transaction type: %s", batch.ID, batch.Payload.TX.Type) - return false, nil // This is not retryable. skip this batch - } - - // Verify the hash calculation - hash := batch.Payload.Hash() - if batch.Hash == nil || *batch.Hash != *hash { - l.Errorf("Invalid batch '%s'. Hash does not match payload. Found=%s Expected=%s", batch.ID, hash, batch.Hash) - return false, nil // This is not retryable. skip this batch + return nil, false, nil // This is not retryable. skip this batch } // Set confirmed on the batch (the messages should not be confirmed at this point - that's the aggregator's job) - batch.Confirmed = now + persistedBatch, manifest := batch.Confirmed() + manifestHash := fftypes.HashString(persistedBatch.Manifest.String()) + + // Verify the hash calculation. + if !manifestHash.Equals(batch.Hash) { + // To cope with existing batches written by v0.13 and older environments, we have to do a more expensive + // hashing of the whole payload before we reject. + if batch.Payload.Hash().Equals(batch.Hash) { + l.Infof("Persisting migrated batch '%s'. Hash is a payload hash: %s", batch.ID, batch.Hash) + } else { + l.Errorf("Invalid batch '%s'. Hash does not match payload. Found=%s Expected=%s", batch.ID, manifestHash, batch.Hash) + return nil, false, nil // This is not retryable. skip this batch + } + } - // Upsert the batch itself, ensuring the hash does not change - err = em.database.UpsertBatch(ctx, batch) + // Upsert the batch + err = em.database.UpsertBatch(ctx, persistedBatch) if err != nil { if err == database.HashMismatch { l.Errorf("Invalid batch '%s'. Batch hash mismatch with existing record", batch.ID) - return false, nil // This is not retryable. skip this batch + return nil, false, nil // This is not retryable. skip this batch } l.Errorf("Failed to insert batch '%s': %s", batch.ID, err) - return false, err // a persistence failure here is considered retryable (so returned) + return nil, false, err // a persistence failure here is considered retryable (so returned) } - optimization := em.getOptimization(ctx, batch) + valid, err = em.validateAndPersistBatchContent(ctx, batch) + if err != nil || !valid { + return nil, valid, err + } + em.aggregator.cacheBatch(em.aggregator.getBatchCacheKey(persistedBatch.ID, persistedBatch.Hash), persistedBatch, manifest) + return persistedBatch, true, err +} + +func (em *eventManager) validateAndPersistBatchContent(ctx context.Context, batch *fftypes.Batch) (valid bool, err error) { // Insert the data entries + dataByID := make(map[fftypes.UUID]*fftypes.Data) for i, data := range batch.Payload.Data { - if err = em.persistBatchData(ctx, batch, i, data, optimization); err != nil { + if valid = em.validateBatchData(ctx, batch, i, data); !valid { + return false, nil + } + if valid, err = em.checkAndInitiateBlobDownloads(ctx, batch, i, data); !valid || err != nil { return false, err } + dataByID[*data.ID] = data } // Insert the message entries for i, msg := range batch.Payload.Messages { - if valid, err = em.persistBatchMessage(ctx, batch, i, msg, optimization); !valid || err != nil { - return valid, err + if valid = em.validateBatchMessage(ctx, batch, i, msg); !valid { + return false, nil } } - return true, nil -} - -func (em *eventManager) getOptimization(ctx context.Context, batch *fftypes.Batch) database.UpsertOptimization { - localNode := em.ni.GetNodeUUID(ctx) - if batch.Node == nil { - // This is from a node that hasn't yet completed registration, so we can't optimize - return database.UpsertOptimizationSkip - } else if localNode != nil && localNode.Equals(batch.Node) { - // We sent the batch, so we should already have all the messages and data locally - optimize the DB operations for that - return database.UpsertOptimizationExisting + // We require that the batch contains exactly the set of data that is in the messages - no more or less. + // While this means an edge case inefficiencly of re-transmission of data when sent in multiple messages, + // that is outweighed by the efficiency it allows in the insertion logic in the majority case. + matchedData := make(map[fftypes.UUID]bool) + matchedMsgs := make([]*messageAndData, len(batch.Payload.Messages)) + for iMsg, msg := range batch.Payload.Messages { + msgData := make(fftypes.DataArray, len(msg.Data)) + for di, dataRef := range msg.Data { + msgData[di] = dataByID[*dataRef.ID] + if msgData[di] == nil || !msgData[di].Hash.Equals(dataRef.Hash) { + log.L(ctx).Errorf("Message '%s' in batch '%s' - data not in-line in batch id='%s' hash='%s'", msg.Header.ID, batch.ID, dataRef.ID, dataRef.Hash) + return false, nil + } + matchedData[*dataRef.ID] = true + } + matchedMsgs[iMsg] = &messageAndData{ + message: msg, + data: msgData, + } + } + if len(matchedData) != len(dataByID) { + log.L(ctx).Errorf("Batch '%s' contains %d unique data, but %d are referenced from messages", batch.ID, len(dataByID), len(matchedData)) + return false, nil } - // We didn't send the batch, so all the data should be new - optimize the DB operations for that - return database.UpsertOptimizationNew -} -func (em *eventManager) persistBatchData(ctx context.Context /* db TX context*/, batch *fftypes.Batch, i int, data *fftypes.Data, optimization database.UpsertOptimization) error { - _, err := em.persistReceivedData(ctx, i, data, "batch", batch.ID, optimization) - return err + return em.persistBatchContent(ctx, batch, matchedMsgs) } -func (em *eventManager) persistReceivedData(ctx context.Context /* db TX context*/, i int, data *fftypes.Data, mType string, mID *fftypes.UUID, optimization database.UpsertOptimization) (bool, error) { +func (em *eventManager) validateBatchData(ctx context.Context, batch *fftypes.Batch, i int, data *fftypes.Data) bool { l := log.L(ctx) - l.Tracef("%s '%s' data %d: %+v", mType, mID, i, data) + l.Tracef("Batch '%s' data %d: %+v", batch.ID, i, data) if data == nil { - l.Errorf("null data entry %d in %s '%s'", i, mType, mID) - return false, nil // skip data entry + l.Errorf("null data entry %d in batch '%s'", i, batch.ID) + return false } hash, err := data.CalcHash(ctx) if err != nil { - log.L(ctx).Errorf("Invalid data entry %d in %s '%s': %s", i, mType, mID, err) - return false, nil // + log.L(ctx).Errorf("Invalid data entry %d in batch '%s': %s", i, batch.ID, err) + return false } if data.Hash == nil || *data.Hash != *hash { - log.L(ctx).Errorf("Invalid data entry %d in %s '%s': Hash=%v Expected=%v", i, mType, mID, data.Hash, hash) - return false, nil // skip data entry + log.L(ctx).Errorf("Invalid data entry %d in batch '%s': Hash=%v Expected=%v", i, batch.ID, data.Hash, hash) + return false } - // Insert the data, ensuring the hash doesn't change - if err := em.database.UpsertData(ctx, data, optimization); err != nil { - if err == database.HashMismatch { - log.L(ctx).Errorf("Invalid data entry %d in %s '%s'. Hash mismatch with existing record with same UUID '%s' Hash=%s", i, mType, mID, data.ID, data.Hash) - return false, nil // This is not retryable. skip this data entry + return true +} + +func (em *eventManager) checkAndInitiateBlobDownloads(ctx context.Context, batch *fftypes.Batch, i int, data *fftypes.Data) (bool, error) { + + if data.Blob != nil && batch.Type == fftypes.BatchTypeBroadcast { + // Need to check if we need to initiate a download + blob, err := em.database.GetBlobMatchingHash(ctx, data.Blob.Hash) + if err != nil { + return false, err + } + if blob == nil { + if data.Blob.Public == "" { + log.L(ctx).Errorf("Invalid data entry %d id=%s in batch '%s' - missing public blob reference", i, data.ID, batch.ID) + return false, nil + } + if err = em.sharedDownload.InitiateDownloadBlob(ctx, data.Namespace, batch.Payload.TX.ID, data.ID, data.Blob.Public); err != nil { + return false, err + } } - log.L(ctx).Errorf("Failed to insert data entry %d in %s '%s': %s", i, mType, mID, err) - return false, err // a persistence failure here is considered retryable (so returned) + } return true, nil } -func (em *eventManager) persistBatchMessage(ctx context.Context /* db TX context*/, batch *fftypes.Batch, i int, msg *fftypes.Message, optimization database.UpsertOptimization) (bool, error) { - if msg != nil && (msg.Header.Author != batch.Author || msg.Header.Key != batch.Key) { +func (em *eventManager) validateBatchMessage(ctx context.Context, batch *fftypes.Batch, i int, msg *fftypes.Message) bool { + + l := log.L(ctx) + if msg == nil { + l.Errorf("null message entry %d in batch '%s'", i, batch.ID) + return false + } + + if msg.Header.Author != batch.Author || msg.Header.Key != batch.Key { log.L(ctx).Errorf("Mismatched key/author '%s'/'%s' on message entry %d in batch '%s'", msg.Header.Key, msg.Header.Author, i, batch.ID) - return false, nil // skip entry + return false } + msg.BatchID = batch.ID + + l.Tracef("Batch '%s' message %d: %+v", batch.ID, i, msg) - return em.persistReceivedMessage(ctx, i, msg, "batch", batch.ID, optimization) + err := msg.Verify(ctx) + if err != nil { + l.Errorf("Invalid message entry %d in batch '%s': %s", i, batch.ID, err) + return false + } + // Set the state to pending, for the insertion stage + msg.State = fftypes.MessageStatePending + + return true } -func (em *eventManager) persistReceivedMessage(ctx context.Context /* db TX context*/, i int, msg *fftypes.Message, mType string, mID *fftypes.UUID, optimization database.UpsertOptimization) (bool, error) { - l := log.L(ctx) - l.Tracef("%s '%s' message %d: %+v", mType, mID, i, msg) +func (em *eventManager) sentByUs(ctx context.Context, batch *fftypes.Batch) bool { + localNode := em.ni.GetNodeUUID(ctx) + if batch.Node == nil { + // This is from a node that hasn't yet completed registration, so we can't optimize + return false + } else if batch.Node.Equals(localNode) { + // We sent the batch, so we should already have all the messages and data locally + return true + } + // We didn't send the batch, so all the data should be new - optimize the DB operations for that + return false +} - if msg == nil { - l.Errorf("null message entry %d in %s '%s'", i, mType, mID) - return false, nil // skip entry +func (em *eventManager) verifyAlreadyStored(ctx context.Context, batch *fftypes.Batch) (valid bool, err error) { + for _, msg := range batch.Payload.Messages { + msgLocal, _, _, err := em.data.GetMessageWithDataCached(ctx, msg.Header.ID) + if err != nil { + return false, err + } + if msgLocal == nil { + log.L(ctx).Errorf("Message entry %s in batch sent by this node, was not found", msg.Header.ID) + return false, nil + } + if !msgLocal.Hash.Equals(msg.Hash) { + log.L(ctx).Errorf("Message entry %s hash mismatch with already stored. Local=%s BatchMsg=%s", msg.Header.ID, msgLocal.Hash, msg.Hash) + return false, nil + } } + return true, nil +} - err := msg.Verify(ctx) +func (em *eventManager) persistBatchContent(ctx context.Context, batch *fftypes.Batch, matchedMsgs []*messageAndData) (valid bool, err error) { + + // We want to insert the messages and data in the most efficient way we can. + // If we are sure we wrote the batch, then we do a cached lookup of each in turn - which is efficient + // because all of those should be in the cache as we wrote them recently. + if em.sentByUs(ctx, batch) { + allStored, err := em.verifyAlreadyStored(ctx, batch) + if err != nil { + return false, err + } + if allStored { + return true, nil + } + // Fall through otherwise + log.L(ctx).Warnf("Batch %s was sent by our UUID, but the content was not already stored. Assuming node has been reset", batch.ID) + } + + // Otherwise try a one-shot insert of all the data, on the basis it's likely unique + err = em.database.InsertDataArray(ctx, batch.Payload.Data) if err != nil { - l.Errorf("Invalid message entry %d in %s '%s': %s", i, mType, mID, err) - return false, nil // skip message entry + log.L(ctx).Debugf("Batch data insert optimization failed for batch '%s': %s", batch.ID, err) + // Fall back to individual upserts + for i, data := range batch.Payload.Data { + if err := em.database.UpsertData(ctx, data, database.UpsertOptimizationExisting); err != nil { + if err == database.HashMismatch { + log.L(ctx).Errorf("Invalid data entry %d in batch '%s'. Hash mismatch with existing record with same UUID '%s' Hash=%s", i, batch.ID, data.ID, data.Hash) + return false, nil + } + log.L(ctx).Errorf("Failed to insert data entry %d in batch '%s': %s", i, batch.ID, err) + return false, err + } + } } - // Insert the message, ensuring the hash doesn't change. - // We do not mark it as confirmed at this point, that's the job of the aggregator. - msg.State = fftypes.MessageStatePending - if err = em.database.UpsertMessage(ctx, msg, optimization); err != nil { - if err == database.HashMismatch { - l.Errorf("Invalid message entry %d in %s '%s'. Hash mismatch with existing record with same UUID '%s' Hash=%s", i, mType, mID, msg.Header.ID, msg.Hash) - return false, nil // This is not retryable. skip this data entry + // Then the same one-shot insert of all the mesages, on the basis they are likely unique (even if + // one of the data elements wasn't unique). Likely reasons for exceptions here are idempotent replay, + // or a root broadcast where "em.sentByUs" returned false, but we actually sent it. + err = em.database.InsertMessages(ctx, batch.Payload.Messages) + if err != nil { + log.L(ctx).Debugf("Batch message insert optimization failed for batch '%s': %s", batch.ID, err) + // Fall back to individual upserts + for i, msg := range batch.Payload.Messages { + if err = em.database.UpsertMessage(ctx, msg, database.UpsertOptimizationExisting); err != nil { + if err == database.HashMismatch { + log.L(ctx).Errorf("Invalid message entry %d in batch'%s'. Hash mismatch with existing record with same UUID '%s' Hash=%s", i, batch.ID, msg.Header.ID, msg.Hash) + return false, nil // This is not retryable. skip this data entry + } + log.L(ctx).Errorf("Failed to insert message entry %d in batch '%s': %s", i, batch.ID, err) + return false, err // a persistence failure here is considered retryable (so returned) + } } - l.Errorf("Failed to insert message entry %d in %s '%s': %s", i, mType, mID, err) - return false, err // a persistence failure here is considered retryable (so returned) } + // If all is well, update the cache before we return + for _, mm := range matchedMsgs { + em.data.UpdateMessageCache(mm.message, mm.data) + } return true, nil } diff --git a/internal/events/persist_batch_test.go b/internal/events/persist_batch_test.go index bb90e00ddb..5889a2fd7b 100644 --- a/internal/events/persist_batch_test.go +++ b/internal/events/persist_batch_test.go @@ -22,12 +22,14 @@ import ( "testing" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" + "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) -func TestPersistBatchFromBroadcast(t *testing.T) { +func TestPersistBatch(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() @@ -45,10 +47,12 @@ func TestPersistBatchFromBroadcast(t *testing.T) { } batch := &fftypes.Batch{ - ID: fftypes.NewUUID(), - SignerRef: fftypes.SignerRef{ - Author: "did:firefly:org/12345", - Key: "0x12345", + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + SignerRef: fftypes.SignerRef{ + Author: "did:firefly:org/12345", + Key: "0x12345", + }, }, Payload: fftypes.BatchPayload{ TX: fftypes.TransactionRef{ @@ -73,31 +77,229 @@ func TestPersistBatchFromBroadcast(t *testing.T) { }, }, }, - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ data, }, }, } - batch.Hash = batch.Payload.Hash() + bp, _ := batch.Confirmed() + batch.Hash = fftypes.HashString(bp.Manifest.String()) - _, err = em.persistBatchFromBroadcast(em.ctx, batch, batch.Hash) + _, _, err = em.persistBatch(em.ctx, batch) assert.EqualError(t, err, "pop") // Confirms we got to upserting the batch } -func TestPersistBatchFromBroadcastBadHash(t *testing.T) { +func TestPersistBatchNoCacheDataNotInBatch(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() mdi := em.database.(*databasemocks.Plugin) - mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(fmt.Errorf(("pop"))) + mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil) + mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationSkip).Return(nil) + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + data.ID = fftypes.NewUUID() + _ = data.Seal(em.ctx, nil) + bp, _ := batch.Confirmed() + batch.Hash = fftypes.HashString(bp.Manifest.String()) + + _, valid, err := em.persistBatch(em.ctx, batch) + assert.False(t, valid) + assert.NoError(t, err) + +} + +func TestPersistBatchExtraDataInBatch(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil) + mdi.On("UpsertMessage", em.ctx, mock.Anything, database.UpsertOptimizationSkip).Return(nil) + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + data2 := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test2"`)} + _ = data2.Seal(em.ctx, nil) + batch.Payload.Data = append(batch.Payload.Data, data2) + bp, _ := batch.Confirmed() + batch.Hash = fftypes.HashString(bp.Manifest.String()) + + _, valid, err := em.persistBatch(em.ctx, batch) + assert.False(t, valid) + assert.NoError(t, err) + +} + +func TestPersistBatchNilMessageEntryop(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + valid := em.validateBatchMessage(em.ctx, &fftypes.Batch{}, 0, nil) + assert.False(t, valid) + +} + +func TestPersistBatchContentSendByUsOK(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + batch.Node = testNodeID + + mdm := em.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", em.ctx, batch.Payload.Messages[0].Header.ID).Return(batch.Payload.Messages[0], batch.Payload.Data, true, nil) + + ok, err := em.persistBatchContent(em.ctx, batch, []*messageAndData{}) + assert.NoError(t, err) + assert.True(t, ok) + + mdm.AssertExpectations(t) +} + +func TestPersistBatchContentSentByNil(t *testing.T) { - batch := &fftypes.Batch{} - batch.Hash = fftypes.NewRandB32() + em, cancel := newTestEventManager(t) + defer cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + batch.Node = nil + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("InsertDataArray", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertMessages", mock.Anything, mock.Anything).Return(nil) + + ok, err := em.persistBatchContent(em.ctx, batch, []*messageAndData{}) + assert.NoError(t, err) + assert.True(t, ok) + + mdi.AssertExpectations(t) + +} + +func TestPersistBatchContentSentByUsNotFoundFallback(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + batch.Node = testNodeID + + mdm := em.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", em.ctx, batch.Payload.Messages[0].Header.ID).Return(nil, nil, false, nil) + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("InsertDataArray", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertMessages", mock.Anything, mock.Anything).Return(nil) - ok, err := em.persistBatchFromBroadcast(em.ctx, batch, fftypes.NewRandB32()) + ok, err := em.persistBatchContent(em.ctx, batch, []*messageAndData{}) + assert.NoError(t, err) + assert.True(t, ok) + + mdm.AssertExpectations(t) + mdi.AssertExpectations(t) + +} + +func TestPersistBatchContentSentByUsFoundMismatch(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + batch.Node = testNodeID + + mdm := em.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", em.ctx, batch.Payload.Messages[0].Header.ID).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + }, + }, nil, true, nil) + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("InsertDataArray", mock.Anything, mock.Anything).Return(nil) + mdi.On("InsertMessages", mock.Anything, mock.Anything).Return(fmt.Errorf("optimization miss")) + mdi.On("UpsertMessage", mock.Anything, mock.Anything, database.UpsertOptimizationExisting).Return(database.HashMismatch) + + ok, err := em.persistBatchContent(em.ctx, batch, []*messageAndData{}) assert.NoError(t, err) assert.False(t, ok) + mdm.AssertExpectations(t) + mdi.AssertExpectations(t) + +} + +func TestPersistBatchContentSentByUsFoundError(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + batch.Node = testNodeID + + mdm := em.data.(*datamocks.Manager) + mdm.On("GetMessageWithDataCached", em.ctx, batch.Payload.Messages[0].Header.ID).Return(nil, nil, false, fmt.Errorf("pop")) + + ok, err := em.persistBatchContent(em.ctx, batch, []*messageAndData{}) + assert.Regexp(t, "pop", err) + assert.False(t, ok) + + mdm.AssertExpectations(t) + +} + +func TestPersistBatchContentDataHashMismatch(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("InsertDataArray", mock.Anything, mock.Anything).Return(fmt.Errorf("optimization miss")) + mdi.On("UpsertData", mock.Anything, mock.Anything, database.UpsertOptimizationExisting).Return(database.HashMismatch) + + ok, err := em.persistBatchContent(em.ctx, batch, []*messageAndData{}) + assert.NoError(t, err) + assert.False(t, ok) + + mdi.AssertExpectations(t) + +} + +func TestPersistBatchContentDataMissingBlobRef(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + } + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`), Blob: &fftypes.BlobRef{ + Hash: blob.Hash, + }} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}, blob) + + mdi := em.database.(*databasemocks.Plugin) + mdi.On("GetBlobMatchingHash", mock.Anything, mock.Anything).Return(nil, nil) + + ok, err := em.validateAndPersistBatchContent(em.ctx, batch) + assert.NoError(t, err) + assert.False(t, ok) + + mdi.AssertExpectations(t) + } diff --git a/internal/events/ss_callbacks.go b/internal/events/ss_callbacks.go new file mode 100644 index 0000000000..11dc89ca3e --- /dev/null +++ b/internal/events/ss_callbacks.go @@ -0,0 +1,71 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package events + +import ( + "context" + "encoding/json" + + "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/hyperledger/firefly/pkg/sharedstorage" +) + +func (em *eventManager) SharedStorageBatchDownloaded(ss sharedstorage.Plugin, ns, payloadRef string, data []byte) (*fftypes.UUID, error) { + + l := log.L(em.ctx) + + // De-serializae the batch + var batch *fftypes.Batch + err := json.Unmarshal(data, &batch) + if err != nil { + l.Errorf("Invalid batch downloaded from %s '%s': %s", ss.Name(), payloadRef, err) + return nil, nil + } + l.Infof("Shared storage batch downloaded from %s '%s' id=%s (len=%d)", ss.Name(), payloadRef, batch.ID, len(data)) + + if batch.Namespace != ns { + l.Errorf("Invalid batch '%s'. Namespace in batch '%s' does not match pin namespace '%s'", batch.ID, batch.Namespace, ns) + return nil, nil // This is not retryable. skip this batch + } + + err = em.retry.Do(em.ctx, "persist batch", func(attempt int) (bool, error) { + err := em.database.RunAsGroup(em.ctx, func(ctx context.Context) error { + _, _, err := em.persistBatch(ctx, batch) + return err + }) + if err != nil { + return true, err + } + return false, nil + }) + if err != nil { + return nil, err + } + + // Rewind the aggregator to this batch - after the DB updates are complete + log.L(em.ctx).Errorf("Rewinding for downloaded broadcast batch %s", batch.ID) + em.aggregator.rewindBatches <- *batch.ID + return batch.ID, nil +} + +func (em *eventManager) SharedStorageBLOBDownloaded(ss sharedstorage.Plugin, hash fftypes.Bytes32, size int64, payloadRef string) error { + l := log.L(em.ctx) + l.Infof("Blob received event from public storage %s: Hash='%v'", ss.Name(), hash) + + return em.blobReceivedCommon("", hash, size, payloadRef) +} diff --git a/internal/events/ss_callbacks_test.go b/internal/events/ss_callbacks_test.go new file mode 100644 index 0000000000..9c46fac42c --- /dev/null +++ b/internal/events/ss_callbacks_test.go @@ -0,0 +1,145 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package events + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" + "github.com/hyperledger/firefly/mocks/sharedstoragemocks" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestSharedStorageBatchDownloadedOk(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + b, _ := json.Marshal(&batch) + + mdi := em.database.(*databasemocks.Plugin) + mss := em.sharedstorage.(*sharedstoragemocks.Plugin) + mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(nil, nil) + mdi.On("InsertDataArray", em.ctx, mock.Anything).Return(nil, nil) + mdi.On("InsertMessages", em.ctx, mock.Anything).Return(nil, nil) + mss.On("Name").Return("utdx").Maybe() + mdm := em.data.(*datamocks.Manager) + mdm.On("UpdateMessageCache", mock.Anything, mock.Anything).Return() + + bid, err := em.SharedStorageBatchDownloaded(mss, batch.Namespace, "payload1", b) + assert.NoError(t, err) + assert.Equal(t, batch.ID, bid) + + assert.Equal(t, *batch.ID, <-em.aggregator.rewindBatches) + + mdi.AssertExpectations(t) + mss.AssertExpectations(t) + mdm.AssertExpectations(t) + +} + +func TestSharedStorageBatchDownloadedPersistFail(t *testing.T) { + + em, cancel := newTestEventManager(t) + cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + b, _ := json.Marshal(&batch) + + mdi := em.database.(*databasemocks.Plugin) + mss := em.sharedstorage.(*sharedstoragemocks.Plugin) + mdi.On("UpsertBatch", em.ctx, mock.Anything).Return(fmt.Errorf("pop")) + mss.On("Name").Return("utdx").Maybe() + + _, err := em.SharedStorageBatchDownloaded(mss, batch.Namespace, "payload1", b) + assert.Regexp(t, "FF10158", err) + + mdi.AssertExpectations(t) + mss.AssertExpectations(t) + +} + +func TestSharedStorageBatchDownloadedNSMismatch(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + data := &fftypes.Data{ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"test"`)} + batch := sampleBatch(t, fftypes.BatchTypeBroadcast, fftypes.TransactionTypeBatchPin, fftypes.DataArray{data}) + b, _ := json.Marshal(&batch) + + mss := em.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("Name").Return("utdx").Maybe() + + _, err := em.SharedStorageBatchDownloaded(mss, "srong", "payload1", b) + assert.NoError(t, err) + + mss.AssertExpectations(t) + +} + +func TestSharedStorageBatchDownloadedBadData(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + mss := em.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("Name").Return("utdx").Maybe() + + _, err := em.SharedStorageBatchDownloaded(mss, "srong", "payload1", []byte("!json")) + assert.NoError(t, err) + + mss.AssertExpectations(t) + +} + +func TestSharedStorageBLOBDownloadedOk(t *testing.T) { + + em, cancel := newTestEventManager(t) + defer cancel() + + dataID := fftypes.NewUUID() + batchID := fftypes.NewUUID() + + mdi := em.database.(*databasemocks.Plugin) + mss := em.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("Name").Return("utsd") + mdi.On("InsertBlob", em.ctx, mock.Anything).Return(nil, nil) + mdi.On("GetDataRefs", em.ctx, mock.Anything).Return(fftypes.DataRefs{ + {ID: dataID}, + }, nil, nil) + mdi.On("GetMessagesForData", em.ctx, dataID, mock.Anything).Return([]*fftypes.Message{ + {Header: fftypes.MessageHeader{ID: fftypes.NewUUID()}, BatchID: batchID}, + }, nil, nil) + + err := em.SharedStorageBLOBDownloaded(mss, *fftypes.NewRandB32(), 12345, "payload1") + assert.NoError(t, err) + + assert.Equal(t, *batchID, <-em.aggregator.rewindBatches) + + mdi.AssertExpectations(t) + mss.AssertExpectations(t) + +} diff --git a/internal/events/subscription_manager.go b/internal/events/subscription_manager.go index c9f6382267..9c28dc4add 100644 --- a/internal/events/subscription_manager.go +++ b/internal/events/subscription_manager.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -29,6 +29,7 @@ import ( "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/retry" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/events" "github.com/hyperledger/firefly/pkg/fftypes" @@ -39,10 +40,25 @@ type subscription struct { dispatcherElection chan bool eventMatcher *regexp.Regexp - groupFilter *regexp.Regexp - tagFilter *regexp.Regexp - topicsFilter *regexp.Regexp - authorFilter *regexp.Regexp + messageFilter *messageFilter + blockchainFilter *blockchainFilter + transactionFilter *transactionFilter + topicFilter *regexp.Regexp +} + +type messageFilter struct { + groupFilter *regexp.Regexp + tagFilter *regexp.Regexp + authorFilter *regexp.Regexp +} + +type blockchainFilter struct { + nameFilter *regexp.Regexp + listenerFilter *regexp.Regexp +} + +type transactionFilter struct { + typeFilter *regexp.Regexp } type connection struct { @@ -57,6 +73,7 @@ type subscriptionManager struct { ctx context.Context database database.Plugin data data.Manager + txHelper txcommon.Helper eventNotifier *eventNotifier definitions definitions.DefinitionHandlers transports map[string]events.Plugin @@ -71,7 +88,7 @@ type subscriptionManager struct { retry retry.Retry } -func newSubscriptionManager(ctx context.Context, di database.Plugin, dm data.Manager, en *eventNotifier, sh definitions.DefinitionHandlers) (*subscriptionManager, error) { +func newSubscriptionManager(ctx context.Context, di database.Plugin, dm data.Manager, en *eventNotifier, sh definitions.DefinitionHandlers, txHelper txcommon.Helper) (*subscriptionManager, error) { ctx, cancelCtx := context.WithCancel(ctx) sm := &subscriptionManager{ ctx: ctx, @@ -86,6 +103,7 @@ func newSubscriptionManager(ctx context.Context, di database.Plugin, dm data.Man cancelCtx: cancelCtx, eventNotifier: en, definitions: sh, + txHelper: txHelper, retry: retry.Retry{ InitialDelay: config.GetDuration(config.SubscriptionsRetryInitialDelay), MaximumDelay: config.GetDuration(config.SubscriptionsRetryMaxDelay), @@ -276,34 +294,69 @@ func (sm *subscriptionManager) parseSubscriptionDef(ctx context.Context, subDef } var tagFilter *regexp.Regexp - if filter.Tag != "" { - tagFilter, err = regexp.Compile(filter.Tag) + if filter.DeprecatedTag != "" { + log.L(ctx).Warnf("Your subscription filter uses the deprecated 'tag' key - please change to 'message.tag' instead") + tagFilter, err = regexp.Compile(filter.DeprecatedTag) + if err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.tag", filter.DeprecatedTag) + } + } + + if filter.Message.Tag != "" { + tagFilter, err = regexp.Compile(filter.Message.Tag) if err != nil { - return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.tag", filter.Tag) + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.message.tag", filter.Message.Tag) } } var groupFilter *regexp.Regexp - if filter.Group != "" { - groupFilter, err = regexp.Compile(filter.Group) + if filter.DeprecatedGroup != "" { + log.L(ctx).Warnf("Your subscription filter uses the deprecated 'group' key - please change to 'message.group' instead") + // set to group filter, will be overwritten by non-deprecated key if both are present + groupFilter, err = regexp.Compile(filter.DeprecatedGroup) if err != nil { - return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.group", filter.Group) + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.group", filter.DeprecatedGroup) } } - var topicsFilter *regexp.Regexp - if filter.Topics != "" { - topicsFilter, err = regexp.Compile(filter.Topics) + if filter.Message.Group != "" { + groupFilter, err = regexp.Compile(filter.Message.Group) if err != nil { - return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.topics", filter.Topics) + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.message.group", filter.Message.Group) + } + } + + var topicFilter *regexp.Regexp + if filter.DeprecatedTopics != "" { + log.L(ctx).Warnf("Your subscription filter uses the deprecated 'topics' key - please change to 'topic' instead") + // set to topics filter, will be overwritten by non-deprecated key if both are present + topicFilter, err = regexp.Compile(filter.DeprecatedTopics) + if err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.topics", filter.DeprecatedTopics) + } + } + + if filter.Topic != "" { + topicFilter, err = regexp.Compile(filter.Topic) + if err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.topic", filter.Topic) } } var authorFilter *regexp.Regexp - if filter.Author != "" { - authorFilter, err = regexp.Compile(filter.Author) + if filter.DeprecatedAuthor != "" { + log.L(ctx).Warnf("Your subscription filter uses the deprecated 'author' key - please change to 'message.author' instead") + // set to author filter, will be overwritten by non-deprecated key if both are present + authorFilter, err = regexp.Compile(filter.DeprecatedAuthor) if err != nil { - return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.author", filter.Author) + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.author", filter.DeprecatedAuthor) + } + } + + if filter.Message.Author != "" { + authorFilter, err = regexp.Compile(filter.Message.Author) + if err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.message.author", filter.Message.Author) } } @@ -311,11 +364,53 @@ func (sm *subscriptionManager) parseSubscriptionDef(ctx context.Context, subDef dispatcherElection: make(chan bool, 1), definition: subDef, eventMatcher: eventFilter, - groupFilter: groupFilter, - tagFilter: tagFilter, - topicsFilter: topicsFilter, - authorFilter: authorFilter, + topicFilter: topicFilter, + messageFilter: &messageFilter{ + tagFilter: tagFilter, + groupFilter: groupFilter, + authorFilter: authorFilter, + }, } + + if (filter.BlockchainEvent != fftypes.BlockchainEventFilter{}) { + var nameFilter *regexp.Regexp + if filter.BlockchainEvent.Name != "" { + nameFilter, err = regexp.Compile(filter.BlockchainEvent.Name) + if err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.blockchain.name", filter.BlockchainEvent.Name) + } + } + + var listenerFilter *regexp.Regexp + if filter.BlockchainEvent.Listener != "" { + listenerFilter, err = regexp.Compile(filter.BlockchainEvent.Listener) + if err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.blockchain.listener", filter.BlockchainEvent.Listener) + } + } + + bf := &blockchainFilter{ + nameFilter: nameFilter, + listenerFilter: listenerFilter, + } + sub.blockchainFilter = bf + } + + if (filter.Transaction != fftypes.TransactionFilter{}) { + var typeFilter *regexp.Regexp + if filter.Transaction.Type != "" { + typeFilter, err = regexp.Compile(filter.Transaction.Type) + if err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgRegexpCompileFailed, "filter.transaction.type", filter.Transaction.Type) + } + } + + tf := &transactionFilter{ + typeFilter: typeFilter, + } + sub.transactionFilter = tf + } + return sub, err } @@ -381,7 +476,7 @@ func (sm *subscriptionManager) matchSubToConnLocked(conn *connection, sub *subsc } if conn.transport == sub.definition.Transport && conn.matcher(sub.definition.SubscriptionRef) { if _, ok := conn.dispatchers[*sub.definition.ID]; !ok { - dispatcher := newEventDispatcher(sm.ctx, conn.ei, sm.database, sm.data, sm.definitions, conn.id, sub, sm.eventNotifier, sm.cel) + dispatcher := newEventDispatcher(sm.ctx, conn.ei, sm.database, sm.data, sm.definitions, conn.id, sub, sm.eventNotifier, sm.cel, sm.txHelper) conn.dispatchers[*sub.definition.ID] = dispatcher dispatcher.start() } @@ -418,7 +513,7 @@ func (sm *subscriptionManager) ephemeralSubscription(ei events.Plugin, connID, n } // Create the dispatcher, and start immediately - dispatcher := newEventDispatcher(sm.ctx, ei, sm.database, sm.data, sm.definitions, connID, newSub, sm.eventNotifier, sm.cel) + dispatcher := newEventDispatcher(sm.ctx, ei, sm.database, sm.data, sm.definitions, connID, newSub, sm.eventNotifier, sm.cel, sm.txHelper) dispatcher.start() conn.dispatchers[*subID] = dispatcher diff --git a/internal/events/subscription_manager_test.go b/internal/events/subscription_manager_test.go index 51e5179080..3147732bb4 100644 --- a/internal/events/subscription_manager_test.go +++ b/internal/events/subscription_manager_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/definitionsmocks" @@ -39,6 +40,7 @@ func newTestSubManager(t *testing.T, mei *eventsmocks.PluginAll) (*subscriptionM mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} msh := &definitionsmocks.DefinitionHandlers{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) ctx, cancel := context.WithCancel(context.Background()) mei.On("Name").Return("ut") @@ -47,7 +49,7 @@ func newTestSubManager(t *testing.T, mei *eventsmocks.PluginAll) (*subscriptionM mei.On("Init", mock.Anything, mock.Anything, mock.Anything).Return(nil) mdi.On("GetEvents", mock.Anything, mock.Anything, mock.Anything).Return([]*fftypes.Event{}, nil, nil).Maybe() mdi.On("GetOffset", mock.Anything, mock.Anything, mock.Anything).Return(&fftypes.Offset{RowID: 3333333, Current: 0}, nil).Maybe() - sm, err := newSubscriptionManager(ctx, mdi, mdm, newEventNotifier(ctx, "ut"), msh) + sm, err := newSubscriptionManager(ctx, mdi, mdm, newEventNotifier(ctx, "ut"), msh, txHelper) assert.NoError(t, err) sm.transports = map[string]events.Plugin{ "ut": mei, @@ -123,7 +125,8 @@ func TestRegisterEphemeralSubscriptions(t *testing.T) { assert.NoError(t, err) be := &boundCallbacks{sm: sm, ei: mei} - err = be.EphemeralSubscription("conn1", "ns1", &fftypes.SubscriptionFilter{}, &fftypes.SubscriptionOptions{}) + // check with filter + err = be.EphemeralSubscription("conn1", "ns1", &fftypes.SubscriptionFilter{Message: fftypes.MessageFilter{Author: "flapflip"}}, &fftypes.SubscriptionOptions{}) assert.NoError(t, err) assert.Equal(t, 1, len(sm.connections["conn1"].dispatchers)) @@ -151,7 +154,9 @@ func TestRegisterEphemeralSubscriptionsFail(t *testing.T) { be := &boundCallbacks{sm: sm, ei: mei} err = be.EphemeralSubscription("conn1", "ns1", &fftypes.SubscriptionFilter{ - Topics: "[[[[[ !wrong", + Message: fftypes.MessageFilter{ + Author: "[[[[[ !wrong", + }, }, &fftypes.SubscriptionOptions{}) assert.Regexp(t, "FF10171", err) assert.Empty(t, sm.connections["conn1"].dispatchers) @@ -161,9 +166,10 @@ func TestRegisterEphemeralSubscriptionsFail(t *testing.T) { func TestSubManagerBadPlugin(t *testing.T) { mdi := &databasemocks.Plugin{} mdm := &datamocks.Manager{} + txHelper := txcommon.NewTransactionHelper(mdi, mdm) config.Reset() config.Set(config.EventTransportsEnabled, []string{"!unknown!"}) - _, err := newSubscriptionManager(context.Background(), mdi, mdm, newEventNotifier(context.Background(), "ut"), nil) + _, err := newSubscriptionManager(context.Background(), mdi, mdm, newEventNotifier(context.Background(), "ut"), nil, txHelper) assert.Regexp(t, "FF10172", err) } @@ -220,11 +226,19 @@ func TestStartSubRestoreOkSubsOK(t *testing.T) { ID: fftypes.NewUUID(), }, Filter: fftypes.SubscriptionFilter{ + Topic: ".*", Events: ".*", - Topics: ".*", - Tag: ".*", - Group: ".*", - Author: ".*", + Message: fftypes.MessageFilter{ + Tag: ".*", + Group: ".*", + Author: ".*", + }, + Transaction: fftypes.TransactionFilter{ + Type: ".*", + }, + BlockchainEvent: fftypes.BlockchainEventFilter{ + Name: ".*", + }, }}, }, nil, nil) err := sm.start() @@ -276,55 +290,232 @@ func TestCreateSubscriptionBadTopicFilter(t *testing.T) { mei.On("ValidateOptions", mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ Filter: fftypes.SubscriptionFilter{ - Topics: "[[[[! badness", + Topic: "[[[[! badness", }, Transport: "ut", }) assert.Regexp(t, "FF10171.*topic", err) } -func TestCreateSubscriptionBadContextFilter(t *testing.T) { +func TestCreateSubscriptionBadGroupFilter(t *testing.T) { mei := &eventsmocks.PluginAll{} sm, cancel := newTestSubManager(t, mei) defer cancel() mei.On("ValidateOptions", mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ Filter: fftypes.SubscriptionFilter{ - Tag: "[[[[! badness", + Message: fftypes.MessageFilter{ + Group: "[[[[! badness", + }, }, Transport: "ut", }) - assert.Regexp(t, "FF10171.*tag", err) + assert.Regexp(t, "FF10171.*group", err) } -func TestCreateSubscriptionBadGroupFilter(t *testing.T) { +func TestCreateSubscriptionBadAuthorFilter(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + Message: fftypes.MessageFilter{ + Author: "[[[[! badness", + }, + }, + Transport: "ut", + }) + assert.Regexp(t, "FF10171.*author", err) +} + +func TestCreateSubscriptionBadTxTypeFilter(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + Transaction: fftypes.TransactionFilter{ + Type: "[[[[! badness", + }, + }, + Transport: "ut", + }) + assert.Regexp(t, "FF10171.*type", err) +} + +func TestCreateSubscriptionBadBlockchainEventNameFilter(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + BlockchainEvent: fftypes.BlockchainEventFilter{ + Name: "[[[[! badness", + }, + }, + Transport: "ut", + }) + assert.Regexp(t, "FF10171.*name", err) +} + +func TestCreateSubscriptionBadDeprecatedGroupFilter(t *testing.T) { mei := &eventsmocks.PluginAll{} sm, cancel := newTestSubManager(t, mei) defer cancel() mei.On("ValidateOptions", mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ Filter: fftypes.SubscriptionFilter{ - Group: "[[[[! badness", + DeprecatedGroup: "[[[[! badness", }, Transport: "ut", }) assert.Regexp(t, "FF10171.*group", err) } -func TestCreateSubscriptionBadAuthorFilter(t *testing.T) { +func TestCreateSubscriptionBadDeprecatedTagFilter(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + DeprecatedTag: "[[[[! badness", + }, + Transport: "ut", + }) + assert.Regexp(t, "FF10171.*tag", err) +} + +func TestCreateSubscriptionBadMessageTagFilter(t *testing.T) { mei := &eventsmocks.PluginAll{} sm, cancel := newTestSubManager(t, mei) defer cancel() mei.On("ValidateOptions", mock.Anything).Return(nil) _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ Filter: fftypes.SubscriptionFilter{ - Author: "[[[[! badness", + Message: fftypes.MessageFilter{ + Tag: "[[[[! badness", + }, + }, + Transport: "ut", + }) + assert.Regexp(t, "FF10171.*message.tag", err) +} + +func TestCreateSubscriptionBadDeprecatedAuthorFilter(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + DeprecatedAuthor: "[[[[! badness", }, Transport: "ut", }) assert.Regexp(t, "FF10171.*author", err) } +func TestCreateSubscriptionBadDeprecatedTopicsFilter(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + DeprecatedTopics: "[[[[! badness", + }, + Transport: "ut", + }) + assert.Regexp(t, "FF10171.*topics", err) +} + +func TestCreateSubscriptionBadBlockchainEventListenerFilter(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + BlockchainEvent: fftypes.BlockchainEventFilter{ + Listener: "[[[[! badness", + }, + }, + Transport: "ut", + }) + assert.Regexp(t, "FF10171.*listener", err) +} + +func TestCreateSubscriptionSuccessMessageFilter(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + Message: fftypes.MessageFilter{ + Author: "flapflip", + }, + }, + Transport: "ut", + }) + assert.NoError(t, err) +} + +func TestCreateSubscriptionSuccessTxFilter(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + Transaction: fftypes.TransactionFilter{ + Type: "flapflip", + }, + }, + Transport: "ut", + }) + assert.NoError(t, err) +} + +func TestCreateSubscriptionSuccessBlockchainEvent(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + BlockchainEvent: fftypes.BlockchainEventFilter{ + Name: "flapflip", + }, + }, + Transport: "ut", + }) + assert.NoError(t, err) +} + +func TestCreateSubscriptionWithDeprecatedFilters(t *testing.T) { + mei := &eventsmocks.PluginAll{} + sm, cancel := newTestSubManager(t, mei) + defer cancel() + mei.On("ValidateOptions", mock.Anything).Return(nil) + _, err := sm.parseSubscriptionDef(sm.ctx, &fftypes.Subscription{ + Filter: fftypes.SubscriptionFilter{ + Topic: "flop", + DeprecatedTopics: "test", + DeprecatedTag: "flap", + DeprecatedAuthor: "flip", + DeprecatedGroup: "flapflip", + }, + Transport: "ut", + }) + assert.NoError(t, err) + +} + func TestDispatchDeliveryResponseOK(t *testing.T) { mei := &eventsmocks.PluginAll{} sm, cancel := newTestSubManager(t, mei) diff --git a/internal/events/system/events.go b/internal/events/system/events.go index e8fc53477b..9ca64c8ac4 100644 --- a/internal/events/system/events.go +++ b/internal/events/system/events.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -89,7 +89,7 @@ func (se *Events) AddListener(ns string, el EventListener) error { return nil } -func (se *Events) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (se *Events) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { se.mux.Lock() defer se.mux.Unlock() for ns, listeners := range se.listeners { diff --git a/internal/events/system/events_test.go b/internal/events/system/events_test.go index 403da047fb..f36ab37feb 100644 --- a/internal/events/system/events_test.go +++ b/internal/events/system/events_test.go @@ -66,15 +66,19 @@ func TestDeliveryRequestOk(t *testing.T) { assert.NoError(t, err) err = se.DeliveryRequest(se.connID, &fftypes.Subscription{}, &fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + }, }, }, nil) assert.NoError(t, err) err = se.DeliveryRequest(se.connID, &fftypes.Subscription{}, &fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns2", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns2", + }, }, }, nil) assert.NoError(t, err) @@ -98,8 +102,10 @@ func TestDeliveryRequestFail(t *testing.T) { assert.NoError(t, err) err = se.DeliveryRequest(mock.Anything, &fftypes.Subscription{}, &fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + }, }, }, nil) assert.EqualError(t, err, "pop") diff --git a/internal/events/token_pool_created.go b/internal/events/token_pool_created.go index 2c2714d584..0964d0a625 100644 --- a/internal/events/token_pool_created.go +++ b/internal/events/token_pool_created.go @@ -18,7 +18,9 @@ package events import ( "context" + "fmt" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/blockchain" @@ -27,17 +29,23 @@ import ( "github.com/hyperledger/firefly/pkg/tokens" ) -func addPoolDetailsFromPlugin(ffPool *fftypes.TokenPool, pluginPool *tokens.TokenPool) { +func addPoolDetailsFromPlugin(ffPool *fftypes.TokenPool, pluginPool *tokens.TokenPool) error { ffPool.Type = pluginPool.Type ffPool.ProtocolID = pluginPool.ProtocolID ffPool.Connector = pluginPool.Connector ffPool.Standard = pluginPool.Standard - if pluginPool.TransactionID != nil { - ffPool.TX = fftypes.TransactionRef{ - Type: fftypes.TransactionTypeTokenPool, - ID: pluginPool.TransactionID, + if pluginPool.TX.ID != nil { + ffPool.TX = pluginPool.TX + } + if pluginPool.Symbol != "" { + if ffPool.Symbol == "" { + ffPool.Symbol = pluginPool.Symbol + } else if ffPool.Symbol != pluginPool.Symbol { + return fmt.Errorf("token symbol '%s' from blockchain does not match stored symbol '%s'", pluginPool.Symbol, ffPool.Symbol) } } + ffPool.Info = pluginPool.Info + return nil } func (em *eventManager) confirmPool(ctx context.Context, pool *fftypes.TokenPool, ev *blockchain.Event, blockchainTXID string) error { @@ -61,7 +69,7 @@ func (em *eventManager) confirmPool(ctx context.Context, pool *fftypes.TokenPool return err } log.L(ctx).Infof("Token pool confirmed, id=%s", pool.ID) - event := fftypes.NewEvent(fftypes.EventTypePoolConfirmed, pool.Namespace, pool.ID, pool.TX.ID) + event := fftypes.NewEvent(fftypes.EventTypePoolConfirmed, pool.Namespace, pool.ID, pool.TX.ID, pool.ID.String()) return em.database.InsertEvent(ctx, event) } @@ -84,14 +92,16 @@ func (em *eventManager) shouldConfirm(ctx context.Context, pool *tokens.TokenPoo if existingPool, err = em.database.GetTokenPoolByProtocolID(ctx, pool.Connector, pool.ProtocolID); err != nil || existingPool == nil { return existingPool, err } - addPoolDetailsFromPlugin(existingPool, pool) + if err = addPoolDetailsFromPlugin(existingPool, pool); err != nil { + log.L(ctx).Errorf("Error processing pool for transaction '%s' (%s) - ignoring", pool.TX.ID, err) + return nil, nil + } if existingPool.State == fftypes.TokenPoolStateUnknown { // Unknown pool state - should only happen on first run after database migration // Activate the pool, then immediately confirm // TODO: can this state eventually be removed? - ev := buildBlockchainEvent(existingPool.Namespace, nil, &pool.Event, &existingPool.TX) - if err = em.assets.ActivateTokenPool(ctx, existingPool, ev); err != nil { + if err = em.assets.ActivateTokenPool(ctx, existingPool, pool.Event.Info); err != nil { log.L(ctx).Errorf("Failed to activate token pool '%s': %s", existingPool.ID, err) return nil, err } @@ -100,19 +110,23 @@ func (em *eventManager) shouldConfirm(ctx context.Context, pool *tokens.TokenPoo } func (em *eventManager) shouldAnnounce(ctx context.Context, pool *tokens.TokenPool) (announcePool *fftypes.TokenPool, err error) { - op, err := em.findTXOperation(ctx, pool.TransactionID, fftypes.OpTypeTokenCreatePool) + op, err := em.findTXOperation(ctx, pool.TX.ID, fftypes.OpTypeTokenCreatePool) if err != nil { return nil, err } else if op == nil { return nil, nil } - announcePool = &fftypes.TokenPool{} - if err = txcommon.RetrieveTokenPoolCreateInputs(ctx, op, announcePool); err != nil { - log.L(ctx).Errorf("Error loading pool info for transaction '%s' (%s) - ignoring: %v", pool.TransactionID, err, op.Input) + announcePool, err = txcommon.RetrieveTokenPoolCreateInputs(ctx, op) + if err != nil || announcePool.ID == nil || announcePool.Namespace == "" || announcePool.Name == "" { + log.L(ctx).Errorf("Error loading pool info for transaction '%s' (%s) - ignoring: %v", pool.TX.ID, err, op.Input) + return nil, nil + } + + if err = addPoolDetailsFromPlugin(announcePool, pool); err != nil { + log.L(ctx).Errorf("Error processing pool for transaction '%s' (%s) - ignoring", pool.TX.ID, err) return nil, nil } - addPoolDetailsFromPlugin(announcePool, pool) return announcePool, nil } @@ -135,13 +149,13 @@ func (em *eventManager) TokenPoolCreated(ti tokens.Plugin, pool *tokens.TokenPoo if existingPool.State == fftypes.TokenPoolStateConfirmed { return nil // already confirmed } - if msg, err := em.database.GetMessageByID(ctx, existingPool.Message); err != nil { + if msg, _, _, err := em.data.GetMessageWithDataCached(ctx, existingPool.Message, data.CRORequireBatchID); err != nil { return err } else if msg != nil { batchID = msg.BatchID // trigger rewind after completion of database transaction } return em.confirmPool(ctx, existingPool, &pool.Event, pool.Event.BlockchainTXID) - } else if pool.TransactionID == nil { + } else if pool.TX.ID == nil { // TransactionID is required if the pool doesn't exist yet // (but it may be omitted when activating a pool that was received via definition broadcast) log.L(em.ctx).Errorf("Invalid token pool transaction - ID is nil") @@ -166,7 +180,7 @@ func (em *eventManager) TokenPoolCreated(ti tokens.Plugin, pool *tokens.TokenPoo // Initiate a rewind if a batch was potentially completed by the arrival of this transaction if batchID != nil { log.L(em.ctx).Infof("Batch '%s' contains reference to received pool '%s'", batchID, pool.ProtocolID) - em.aggregator.rewindBatches <- batchID + em.aggregator.rewindBatches <- *batchID } // Announce the details of the new token pool with the blockchain event details diff --git a/internal/events/token_pool_created_test.go b/internal/events/token_pool_created_test.go index 5a28068a07..8c020f25a8 100644 --- a/internal/events/token_pool_created_test.go +++ b/internal/events/token_pool_created_test.go @@ -20,9 +20,11 @@ import ( "fmt" "testing" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/mocks/assetmocks" "github.com/hyperledger/firefly/mocks/broadcastmocks" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/tokenmocks" "github.com/hyperledger/firefly/mocks/txcommonmocks" "github.com/hyperledger/firefly/pkg/blockchain" @@ -42,10 +44,13 @@ func TestTokenPoolCreatedIgnore(t *testing.T) { operations := []*fftypes.Operation{} info := fftypes.JSONObject{"some": "info"} pool := &tokens.TokenPool{ - Type: fftypes.TokenTypeFungible, - ProtocolID: "123", - TransactionID: txID, - Connector: "erc1155", + Type: fftypes.TokenTypeFungible, + ProtocolID: "123", + TX: fftypes.TransactionRef{ + ID: txID, + Type: fftypes.TransactionTypeTokenPool, + }, + Connector: "erc1155", Event: blockchain.Event{ BlockchainTXID: "0xffffeeee", ProtocolID: "tx1", @@ -70,10 +75,9 @@ func TestTokenPoolCreatedIgnoreNoTX(t *testing.T) { info := fftypes.JSONObject{"some": "info"} pool := &tokens.TokenPool{ - Type: fftypes.TokenTypeFungible, - ProtocolID: "123", - TransactionID: nil, - Connector: "erc1155", + Type: fftypes.TokenTypeFungible, + ProtocolID: "123", + Connector: "erc1155", Event: blockchain.Event{ BlockchainTXID: "0xffffeeee", ProtocolID: "tx1", @@ -93,20 +97,28 @@ func TestTokenPoolCreatedConfirm(t *testing.T) { mdi := em.database.(*databasemocks.Plugin) mti := &tokenmocks.Plugin{} mth := em.txHelper.(*txcommonmocks.Helper) + mdm := em.data.(*datamocks.Manager) opID := fftypes.NewUUID() txID := fftypes.NewUUID() - info := fftypes.JSONObject{"some": "info"} + info1 := fftypes.JSONObject{"pool": "info"} + info2 := fftypes.JSONObject{"block": "info"} chainPool := &tokens.TokenPool{ - Type: fftypes.TokenTypeFungible, - ProtocolID: "123", - Connector: "erc1155", - TransactionID: txID, + Type: fftypes.TokenTypeFungible, + ProtocolID: "123", + Connector: "erc1155", + TX: fftypes.TransactionRef{ + ID: txID, + Type: fftypes.TransactionTypeTokenPool, + }, + Standard: "ERC1155", + Symbol: "FFT", + Info: info1, Event: blockchain.Event{ BlockchainTXID: "0xffffeeee", Name: "TokenPool", ProtocolID: "tx1", - Info: info, + Info: info2, }, } storedPool := &fftypes.TokenPool{ @@ -140,13 +152,18 @@ func TestTokenPoolCreatedConfirm(t *testing.T) { mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { return e.Type == fftypes.EventTypePoolConfirmed && *e.Reference == *storedPool.ID })).Return(nil).Once() - mdi.On("GetMessageByID", em.ctx, storedPool.Message).Return(nil, fmt.Errorf("pop")).Once() - mdi.On("GetMessageByID", em.ctx, storedPool.Message).Return(storedMessage, nil).Once() + mdm.On("GetMessageWithDataCached", em.ctx, storedPool.Message, data.CRORequireBatchID).Return(nil, nil, false, fmt.Errorf("pop")).Once() + mdm.On("GetMessageWithDataCached", em.ctx, storedPool.Message, data.CRORequireBatchID).Return(storedMessage, nil, true, nil).Once() err := em.TokenPoolCreated(mti, chainPool) assert.NoError(t, err) + assert.Equal(t, "ERC1155", storedPool.Standard) + assert.Equal(t, "FFT", storedPool.Symbol) + assert.Equal(t, info1, storedPool.Info) + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestTokenPoolCreatedAlreadyConfirmed(t *testing.T) { @@ -158,10 +175,13 @@ func TestTokenPoolCreatedAlreadyConfirmed(t *testing.T) { txID := fftypes.NewUUID() info := fftypes.JSONObject{"some": "info"} chainPool := &tokens.TokenPool{ - Type: fftypes.TokenTypeFungible, - ProtocolID: "123", - Connector: "erc1155", - TransactionID: txID, + Type: fftypes.TokenTypeFungible, + ProtocolID: "123", + Connector: "erc1155", + TX: fftypes.TransactionRef{ + ID: txID, + Type: fftypes.TransactionTypeTokenPool, + }, Event: blockchain.Event{ BlockchainTXID: "0xffffeeee", ProtocolID: "tx1", @@ -186,6 +206,52 @@ func TestTokenPoolCreatedAlreadyConfirmed(t *testing.T) { mdi.AssertExpectations(t) } +func TestTokenPoolCreatedConfirmFailBadSymbol(t *testing.T) { + em, cancel := newTestEventManager(t) + defer cancel() + mdi := em.database.(*databasemocks.Plugin) + mti := &tokenmocks.Plugin{} + + opID := fftypes.NewUUID() + txID := fftypes.NewUUID() + info := fftypes.JSONObject{"some": "info"} + chainPool := &tokens.TokenPool{ + Type: fftypes.TokenTypeFungible, + ProtocolID: "123", + Connector: "erc1155", + TX: fftypes.TransactionRef{ + ID: txID, + Type: fftypes.TransactionTypeTokenPool, + }, + Symbol: "ETH", + Event: blockchain.Event{ + BlockchainTXID: "0xffffeeee", + ProtocolID: "tx1", + Info: info, + }, + } + storedPool := &fftypes.TokenPool{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + State: fftypes.TokenPoolStatePending, + Symbol: "FFT", + TX: fftypes.TransactionRef{ + Type: fftypes.TransactionTypeTokenPool, + ID: txID, + }, + } + + mdi.On("GetTokenPoolByProtocolID", em.ctx, "erc1155", "123").Return(storedPool, nil) + mdi.On("GetOperations", em.ctx, mock.Anything).Return([]*fftypes.Operation{{ + ID: opID, + }}, nil, nil) + + err := em.TokenPoolCreated(mti, chainPool) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + func TestTokenPoolCreatedMigrate(t *testing.T) { em, cancel := newTestEventManager(t) defer cancel() @@ -193,14 +259,18 @@ func TestTokenPoolCreatedMigrate(t *testing.T) { mam := em.assets.(*assetmocks.Manager) mti := &tokenmocks.Plugin{} mth := em.txHelper.(*txcommonmocks.Helper) + mdm := em.data.(*datamocks.Manager) txID := fftypes.NewUUID() info := fftypes.JSONObject{"some": "info"} chainPool := &tokens.TokenPool{ - Type: fftypes.TokenTypeFungible, - ProtocolID: "123", - Connector: "magic-tokens", - TransactionID: txID, + Type: fftypes.TokenTypeFungible, + ProtocolID: "123", + Connector: "magic-tokens", + TX: fftypes.TransactionRef{ + ID: txID, + Type: fftypes.TransactionTypeTokenPool, + }, Event: blockchain.Event{ BlockchainTXID: "0xffffeeee", ProtocolID: "tx1", @@ -233,19 +303,16 @@ func TestTokenPoolCreatedMigrate(t *testing.T) { mdi.On("InsertEvent", em.ctx, mock.MatchedBy(func(e *fftypes.Event) bool { return e.Type == fftypes.EventTypePoolConfirmed && *e.Reference == *storedPool.ID })).Return(nil).Once() - mam.On("ActivateTokenPool", em.ctx, storedPool, mock.MatchedBy(func(e *fftypes.BlockchainEvent) bool { - return e.ProtocolID == chainPool.Event.ProtocolID - })).Return(fmt.Errorf("pop")).Once() - mam.On("ActivateTokenPool", em.ctx, storedPool, mock.MatchedBy(func(e *fftypes.BlockchainEvent) bool { - return e.ProtocolID == chainPool.Event.ProtocolID - })).Return(nil).Once() - mdi.On("GetMessageByID", em.ctx, storedPool.Message).Return(storedMessage, nil) + mam.On("ActivateTokenPool", em.ctx, storedPool, info).Return(fmt.Errorf("pop")).Once() + mam.On("ActivateTokenPool", em.ctx, storedPool, info).Return(nil).Once() + mdm.On("GetMessageWithDataCached", em.ctx, storedPool.Message, data.CRORequireBatchID).Return(storedMessage, nil, true, nil).Once() err := em.TokenPoolCreated(mti, chainPool) assert.NoError(t, err) mdi.AssertExpectations(t) mam.AssertExpectations(t) + mdm.AssertExpectations(t) } func TestConfirmPoolBlockchainEventFail(t *testing.T) { @@ -444,10 +511,13 @@ func TestTokenPoolCreatedAnnounce(t *testing.T) { } info := fftypes.JSONObject{"some": "info"} pool := &tokens.TokenPool{ - Type: fftypes.TokenTypeFungible, - ProtocolID: "123", - TransactionID: txID, - Connector: "erc1155", + Type: fftypes.TokenTypeFungible, + ProtocolID: "123", + TX: fftypes.TransactionRef{ + ID: txID, + Type: fftypes.TransactionTypeTokenPool, + }, + Connector: "erc1155", Event: blockchain.Event{ BlockchainTXID: "0xffffeeee", ProtocolID: "tx1", @@ -486,10 +556,13 @@ func TestTokenPoolCreatedAnnounceBadOpInputID(t *testing.T) { } info := fftypes.JSONObject{"some": "info"} pool := &tokens.TokenPool{ - Type: fftypes.TokenTypeFungible, - ProtocolID: "123", - TransactionID: txID, - Connector: "erc1155", + Type: fftypes.TokenTypeFungible, + ProtocolID: "123", + TX: fftypes.TransactionRef{ + ID: txID, + Type: fftypes.TransactionTypeTokenPool, + }, + Connector: "erc1155", Event: blockchain.Event{ BlockchainTXID: "0xffffeeee", ProtocolID: "tx1", @@ -524,10 +597,13 @@ func TestTokenPoolCreatedAnnounceBadOpInputNS(t *testing.T) { } info := fftypes.JSONObject{"some": "info"} pool := &tokens.TokenPool{ - Type: fftypes.TokenTypeFungible, - ProtocolID: "123", - TransactionID: txID, - Connector: "erc1155", + Type: fftypes.TokenTypeFungible, + ProtocolID: "123", + TX: fftypes.TransactionRef{ + ID: txID, + Type: fftypes.TransactionTypeTokenPool, + }, + Connector: "erc1155", Event: blockchain.Event{ BlockchainTXID: "0xffffeeee", ProtocolID: "tx1", @@ -543,3 +619,50 @@ func TestTokenPoolCreatedAnnounceBadOpInputNS(t *testing.T) { mdi.AssertExpectations(t) } + +func TestTokenPoolCreatedAnnounceBadSymbol(t *testing.T) { + em, cancel := newTestEventManager(t) + defer cancel() + mdi := em.database.(*databasemocks.Plugin) + mti := &tokenmocks.Plugin{} + + poolID := fftypes.NewUUID() + txID := fftypes.NewUUID() + operations := []*fftypes.Operation{ + { + ID: fftypes.NewUUID(), + Input: fftypes.JSONObject{ + "id": poolID.String(), + "namespace": "test-ns", + "name": "my-pool", + "symbol": "FFT", + }, + }, + } + info := fftypes.JSONObject{"some": "info"} + pool := &tokens.TokenPool{ + Type: fftypes.TokenTypeFungible, + ProtocolID: "123", + TX: fftypes.TransactionRef{ + ID: txID, + Type: fftypes.TransactionTypeTokenPool, + }, + Connector: "erc1155", + Symbol: "ETH", + Event: blockchain.Event{ + BlockchainTXID: "0xffffeeee", + ProtocolID: "tx1", + Info: info, + }, + } + + mdi.On("GetTokenPoolByProtocolID", em.ctx, "erc1155", "123").Return(nil, nil).Times(2) + mdi.On("GetOperations", em.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")).Once() + mdi.On("GetOperations", em.ctx, mock.Anything).Return(operations, nil, nil).Once() + + err := em.TokenPoolCreated(mti, pool) + assert.NoError(t, err) + + mti.AssertExpectations(t) + mdi.AssertExpectations(t) +} diff --git a/internal/events/tokens_approved.go b/internal/events/tokens_approved.go index 71a7fac079..f1cfabda42 100644 --- a/internal/events/tokens_approved.go +++ b/internal/events/tokens_approved.go @@ -40,8 +40,10 @@ func (em *eventManager) loadApprovalOperation(ctx context.Context, tx *fftypes.U return err } if len(operations) > 0 { - if err = txcommon.RetrieveTokenApprovalInputs(ctx, operations[0], approval); err != nil { + if origApproval, err := txcommon.RetrieveTokenApprovalInputs(ctx, operations[0]); err != nil { log.L(ctx).Warnf("Failed to read operation inputs for token approval '%s': %s", approval.ProtocolID, err) + } else if origApproval != nil { + approval.LocalID = origApproval.LocalID } } @@ -102,7 +104,7 @@ func (em *eventManager) TokensApproved(ti tokens.Plugin, approval *tokens.TokenA return err } - event := fftypes.NewEvent(fftypes.EventTypeApprovalConfirmed, approval.Namespace, approval.LocalID, approval.TX.ID) + event := fftypes.NewEvent(fftypes.EventTypeApprovalConfirmed, approval.Namespace, approval.LocalID, approval.TX.ID, approval.Pool.String()) return em.database.InsertEvent(ctx, event) }) return err != nil, err // retry indefinitely (until context closes) diff --git a/internal/events/tokens_transferred.go b/internal/events/tokens_transferred.go index dd61093ca5..d0031b4e7f 100644 --- a/internal/events/tokens_transferred.go +++ b/internal/events/tokens_transferred.go @@ -40,8 +40,10 @@ func (em *eventManager) loadTransferOperation(ctx context.Context, tx *fftypes.U return err } if len(operations) > 0 { - if err = txcommon.RetrieveTokenTransferInputs(ctx, operations[0], transfer); err != nil { + if origTransfer, err := txcommon.RetrieveTokenTransferInputs(ctx, operations[0]); err != nil { log.L(ctx).Warnf("Failed to read operation inputs for token transfer '%s': %s", transfer.ProtocolID, err) + } else if origTransfer != nil { + transfer.LocalID = origTransfer.LocalID } } @@ -144,7 +146,7 @@ func (em *eventManager) TokensTransferred(ti tokens.Plugin, transfer *tokens.Tok } em.emitBlockchainEventMetric(transfer.Event) - event := fftypes.NewEvent(fftypes.EventTypeTransferConfirmed, transfer.Namespace, transfer.LocalID, transfer.TX.ID) + event := fftypes.NewEvent(fftypes.EventTypeTransferConfirmed, transfer.Namespace, transfer.LocalID, transfer.TX.ID, transfer.Pool.String()) return em.database.InsertEvent(ctx, event) }) return err != nil, err // retry indefinitely (until context closes) @@ -153,7 +155,7 @@ func (em *eventManager) TokensTransferred(ti tokens.Plugin, transfer *tokens.Tok // Initiate a rewind if a batch was potentially completed by the arrival of this transfer if err == nil && batchID != nil { log.L(em.ctx).Infof("Batch '%s' contains reference to received transfer. Transfer='%s' Message='%s'", batchID, transfer.ProtocolID, transfer.Message) - em.aggregator.rewindBatches <- batchID + em.aggregator.rewindBatches <- *batchID } return err diff --git a/internal/events/webhooks/webhooks.go b/internal/events/webhooks/webhooks.go index 3375266aa0..65e8875e9a 100644 --- a/internal/events/webhooks/webhooks.go +++ b/internal/events/webhooks/webhooks.go @@ -261,7 +261,7 @@ func (wh *WebHooks) ValidateOptions(options *fftypes.SubscriptionOptions) error return err } -func (wh *WebHooks) attemptRequest(sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) (req *whRequest, res *whResponse, err error) { +func (wh *WebHooks) attemptRequest(sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) (req *whRequest, res *whResponse, err error) { withData := sub.Options.WithData != nil && *sub.Options.WithData allData := make([]*fftypes.JSONAny, 0, len(data)) var firstData fftypes.JSONObject @@ -350,7 +350,7 @@ func (wh *WebHooks) attemptRequest(sub *fftypes.Subscription, event *fftypes.Eve return req, res, nil } -func (wh *WebHooks) doDelivery(connID string, reply bool, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (wh *WebHooks) doDelivery(connID string, reply bool, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { req, res, gwErr := wh.attemptRequest(sub, event, data) if gwErr != nil { // Generate a bad-gateway error response - we always want to send something back, @@ -400,7 +400,7 @@ func (wh *WebHooks) doDelivery(connID string, reply bool, sub *fftypes.Subscript return nil } -func (wh *WebHooks) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (wh *WebHooks) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { if event.Message == nil && sub.Options.WithData != nil && *sub.Options.WithData { log.L(wh.ctx).Debugf("Webhook withData=true subscription called with non-message event '%s'", event.ID) return nil diff --git a/internal/events/webhooks/webhooks_test.go b/internal/events/webhooks/webhooks_test.go index 6c65bc3f56..5783981db5 100644 --- a/internal/events/webhooks/webhooks_test.go +++ b/internal/events/webhooks/webhooks_test.go @@ -170,22 +170,24 @@ func TestRequestWithBodyReplyEndToEnd(t *testing.T) { "replytx": "in_replytx", } event := &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: msgID, + Group: groupHash, + Type: fftypes.MessageTypePrivate, + }, + Data: fftypes.DataRefs{ + {ID: dataID}, + }, + }, }, Subscription: fftypes.SubscriptionRef{ ID: sub.ID, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: msgID, - Group: groupHash, - Type: fftypes.MessageTypePrivate, - }, - Data: fftypes.DataRefs{ - {ID: dataID}, - }, - }, } data := &fftypes.Data{ ID: dataID, @@ -216,7 +218,7 @@ func TestRequestWithBodyReplyEndToEnd(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{data}) + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{data}) assert.NoError(t, err) mcb.AssertExpectations(t) @@ -275,22 +277,24 @@ func TestRequestWithEmptyStringBodyReplyEndToEnd(t *testing.T) { "replytx": "in_replytx", } event := &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: msgID, + Group: groupHash, + Type: fftypes.MessageTypePrivate, + }, + Data: fftypes.DataRefs{ + {ID: dataID}, + }, + }, }, Subscription: fftypes.SubscriptionRef{ ID: sub.ID, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: msgID, - Group: groupHash, - Type: fftypes.MessageTypePrivate, - }, - Data: fftypes.DataRefs{ - {ID: dataID}, - }, - }, } data := &fftypes.Data{ ID: dataID, @@ -321,7 +325,7 @@ func TestRequestWithEmptyStringBodyReplyEndToEnd(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{data}) + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{data}) assert.NoError(t, err) mcb.AssertExpectations(t) @@ -352,22 +356,24 @@ func TestRequestNoBodyNoReply(t *testing.T) { to := sub.Options.TransportOptions() to["url"] = fmt.Sprintf("http://%s/myapi", server.Listener.Addr()) event := &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: msgID, + Group: groupHash, + Type: fftypes.MessageTypePrivate, + }, + Data: fftypes.DataRefs{ + {ID: dataID}, + }, + }, }, Subscription: fftypes.SubscriptionRef{ ID: sub.ID, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: msgID, - Group: groupHash, - Type: fftypes.MessageTypePrivate, - }, - Data: fftypes.DataRefs{ - {ID: dataID}, - }, - }, } data := &fftypes.Data{ ID: dataID, @@ -376,7 +382,7 @@ func TestRequestNoBodyNoReply(t *testing.T) { }`), } - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{data}) + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{data}) assert.NoError(t, err) assert.True(t, called) } @@ -411,18 +417,20 @@ func TestRequestReplyEmptyData(t *testing.T) { to["url"] = fmt.Sprintf("http://%s/myapi", server.Listener.Addr()) to["reply"] = true event := &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: msgID, + Type: fftypes.MessageTypeBroadcast, + }, + }, }, Subscription: fftypes.SubscriptionRef{ ID: sub.ID, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: msgID, - Type: fftypes.MessageTypeBroadcast, - }, - }, } mcb := wh.callbacks.(*eventsmocks.Callbacks) @@ -433,7 +441,7 @@ func TestRequestReplyEmptyData(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{}) + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{}) assert.NoError(t, err) assert.True(t, called) } @@ -458,18 +466,20 @@ func TestRequestReplyBadJSON(t *testing.T) { to["reply"] = true to["json"] = true event := &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: msgID, + Type: fftypes.MessageTypeBroadcast, + }, + }, }, Subscription: fftypes.SubscriptionRef{ ID: sub.ID, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: msgID, - Type: fftypes.MessageTypeBroadcast, - }, - }, } mcb := wh.callbacks.(*eventsmocks.Callbacks) @@ -479,7 +489,7 @@ func TestRequestReplyBadJSON(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{}) + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{}) assert.NoError(t, err) } func TestRequestReplyDataArrayBadStatusB64(t *testing.T) { @@ -516,18 +526,20 @@ func TestRequestReplyDataArrayBadStatusB64(t *testing.T) { to["url"] = fmt.Sprintf("http://%s/myapi", server.Listener.Addr()) to["reply"] = true event := &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: msgID, + Type: fftypes.MessageTypeBroadcast, + }, + }, }, Subscription: fftypes.SubscriptionRef{ ID: sub.ID, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: msgID, - Type: fftypes.MessageTypeBroadcast, - }, - }, } mcb := wh.callbacks.(*eventsmocks.Callbacks) @@ -540,7 +552,7 @@ func TestRequestReplyDataArrayBadStatusB64(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{ + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) @@ -564,18 +576,20 @@ func TestRequestReplyDataArrayError(t *testing.T) { to["url"] = fmt.Sprintf("http://%s/myapi", server.Listener.Addr()) to["reply"] = true event := &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: msgID, + Type: fftypes.MessageTypeBroadcast, + }, + }, }, Subscription: fftypes.SubscriptionRef{ ID: sub.ID, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: msgID, - Type: fftypes.MessageTypeBroadcast, - }, - }, } mcb := wh.callbacks.(*eventsmocks.Callbacks) @@ -588,7 +602,7 @@ func TestRequestReplyDataArrayError(t *testing.T) { return true })).Return(nil) - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{ + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) @@ -610,18 +624,20 @@ func TestRequestReplyBuildRequestFailFastAsk(t *testing.T) { sub.Options.TransportOptions()["reply"] = true sub.Options.TransportOptions()["fastack"] = true event := &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: msgID, + Type: fftypes.MessageTypeBroadcast, + }, + }, }, Subscription: fftypes.SubscriptionRef{ ID: sub.ID, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: msgID, - Type: fftypes.MessageTypeBroadcast, - }, - }, } waiter := make(chan struct{}) @@ -638,7 +654,7 @@ func TestRequestReplyBuildRequestFailFastAsk(t *testing.T) { close(waiter) } - err := wh.DeliveryRequest(mock.Anything, sub, event, []*fftypes.Data{ + err := wh.DeliveryRequest(mock.Anything, sub, event, fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value1"`)}, {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`"value2"`)}, }) @@ -662,8 +678,10 @@ func TestDeliveryRequestNilMessage(t *testing.T) { } sub.Options.TransportOptions()["reply"] = true event := &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + }, }, Subscription: fftypes.SubscriptionRef{ ID: sub.ID, @@ -688,19 +706,21 @@ func TestDeliveryRequestReplyToReply(t *testing.T) { } sub.Options.TransportOptions()["reply"] = true event := &fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + }, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.MessageTypeBroadcast, + CID: fftypes.NewUUID(), + }, + }, }, Subscription: fftypes.SubscriptionRef{ ID: sub.ID, }, - Message: &fftypes.Message{ - Header: fftypes.MessageHeader{ - ID: fftypes.NewUUID(), - Type: fftypes.MessageTypeBroadcast, - CID: fftypes.NewUUID(), - }, - }, } mcb := wh.callbacks.(*eventsmocks.Callbacks) diff --git a/internal/events/websockets/websocket_connection.go b/internal/events/websockets/websocket_connection.go index e33dc74775..e87dafdf5d 100644 --- a/internal/events/websockets/websocket_connection.go +++ b/internal/events/websockets/websocket_connection.go @@ -81,17 +81,13 @@ func (wc *websocketConnection) processAutoStart(req *http.Request) { autoAck, hasAutoack := req.URL.Query()["autoack"] isAutoack := hasAutoack && (len(autoAck) == 0 || autoAck[0] != "false") if hasEphemeral || hasName { + filter := fftypes.NewSubscriptionFilterFromQuery(query) err := wc.handleStart(&fftypes.WSClientActionStartPayload{ - AutoAck: &isAutoack, - Ephemeral: isEphemeral, - Namespace: query.Get("namespace"), - Name: query.Get("name"), - Filter: fftypes.SubscriptionFilter{ - Events: query.Get("filter.events"), - Topics: query.Get("filter.topics"), - Group: query.Get("filter.group"), - Tag: query.Get("filter.tag"), - }, + AutoAck: &isAutoack, + Ephemeral: isEphemeral, + Namespace: query.Get("namespace"), + Name: query.Get("name"), + Filter: filter, ChangeEvents: query.Get("changeevents"), }) if err != nil { diff --git a/internal/events/websockets/websockets.go b/internal/events/websockets/websockets.go index cf2e63cf8b..9889db324c 100644 --- a/internal/events/websockets/websockets.go +++ b/internal/events/websockets/websockets.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -78,7 +78,7 @@ func (ws *WebSockets) ValidateOptions(options *fftypes.SubscriptionOptions) erro return nil } -func (ws *WebSockets) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (ws *WebSockets) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { ws.connMux.Lock() conn, ok := ws.connections[connID] ws.connMux.Unlock() diff --git a/internal/events/websockets/websockets_test.go b/internal/events/websockets/websockets_test.go index 10db21d4fc..5c14ac98ed 100644 --- a/internal/events/websockets/websockets_test.go +++ b/internal/events/websockets/websockets_test.go @@ -182,7 +182,9 @@ func TestStartReceiveAckEphemeral(t *testing.T) { <-waitSubscribed ws.DeliveryRequest(connID, nil, &fftypes.EventDelivery{ - Event: fftypes.Event{ID: fftypes.NewUUID()}, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ID: fftypes.NewUUID()}, + }, Subscription: fftypes.SubscriptionRef{ID: fftypes.NewUUID()}, }, nil) @@ -230,7 +232,9 @@ func TestStartReceiveDurable(t *testing.T) { <-waitSubscribed ws.DeliveryRequest(connID, nil, &fftypes.EventDelivery{ - Event: fftypes.Event{ID: fftypes.NewUUID()}, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ID: fftypes.NewUUID()}, + }, Subscription: fftypes.SubscriptionRef{ ID: fftypes.NewUUID(), Namespace: "ns1", @@ -239,7 +243,9 @@ func TestStartReceiveDurable(t *testing.T) { }, nil) // Put a second in flight ws.DeliveryRequest(connID, nil, &fftypes.EventDelivery{ - Event: fftypes.Event{ID: fftypes.NewUUID()}, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ID: fftypes.NewUUID()}, + }, Subscription: fftypes.SubscriptionRef{ ID: fftypes.NewUUID(), Namespace: "ns1", @@ -299,7 +305,9 @@ func TestAutoStartReceiveAckEphemeral(t *testing.T) { <-waitSubscribed ws.DeliveryRequest(connID, nil, &fftypes.EventDelivery{ - Event: fftypes.Event{ID: fftypes.NewUUID()}, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ID: fftypes.NewUUID()}, + }, Subscription: fftypes.SubscriptionRef{ID: fftypes.NewUUID()}, }, nil) @@ -600,7 +608,9 @@ func TestDispatchAutoAck(t *testing.T) { } wsc.ws.connections[wsc.connID] = wsc err := wsc.ws.DeliveryRequest(wsc.connID, nil, &fftypes.EventDelivery{ - Event: fftypes.Event{ID: fftypes.NewUUID()}, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ID: fftypes.NewUUID()}, + }, Subscription: fftypes.SubscriptionRef{ID: fftypes.NewUUID(), Namespace: "ns1", Name: "sub1"}, }, nil) assert.NoError(t, err) diff --git a/internal/i18n/en_translations.go b/internal/i18n/en_translations.go index f08d36f953..639af16e4e 100644 --- a/internal/i18n/en_translations.go +++ b/internal/i18n/en_translations.go @@ -287,4 +287,12 @@ var ( MsgNilOrNullObject = ffm("FF10368", "Object is null") MsgTokenApprovalFailed = ffm("FF10369", "Token approval with ID '%s' failed. Please check the FireFly logs for more information") MsgEventNotFound = ffm("FF10370", "Event with name '%s' not found", 400) + MsgOperationNotSupported = ffm("FF10371", "Operation not supported: %s", 400) + MsgFailedToRetrieve = ffm("FF10372", "Failed to retrieve %s %s", 500) + MsgBlobMissingPublic = ffm("FF10373", "Blob for data %s missing public payload reference while flushing batch", 500) + MsgDBMultiRowConfigError = ffm("FF10374", "Database invalid configuration - using multi-row insert on DB plugin that does not support query syntax for input") + MsgDBNoSequence = ffm("FF10375", "Failed to retrieve sequence for insert row %d (could mean duplicate insert)", 500) + MsgDownloadSharedFailed = ffm("FF10376", "Error downloading data with reference '%s' from shared storage") + MsgDownloadBatchMaxBytes = ffm("FF10377", "Error downloading batch with reference '%s' from shared storage - maximum size limit reached") + MsgOperationDataIncorrect = ffm("FF10378", "Operation data type incorrect: %T", 400) ) diff --git a/internal/networkmap/data_query.go b/internal/networkmap/data_query.go index 4acf915895..f2572318d5 100644 --- a/internal/networkmap/data_query.go +++ b/internal/networkmap/data_query.go @@ -25,23 +25,26 @@ import ( "github.com/hyperledger/firefly/pkg/fftypes" ) -func (nm *networkMap) GetOrganizationByID(ctx context.Context, id string) (*fftypes.Identity, error) { - u, err := fftypes.ParseUUID(ctx, id) - if err != nil { - return nil, err - } - o, err := nm.database.GetIdentityByID(ctx, u) +func (nm *networkMap) GetOrganizationByNameOrID(ctx context.Context, nameOrID string) (org *fftypes.Identity, err error) { + u, err := fftypes.ParseUUID(ctx, nameOrID) if err != nil { + if err := fftypes.ValidateFFNameField(ctx, nameOrID, "name"); err != nil { + return nil, err + } + if org, err = nm.database.GetIdentityByName(ctx, fftypes.IdentityTypeOrg, fftypes.SystemNamespace, nameOrID); err != nil { + return nil, err + } + } else if org, err = nm.database.GetIdentityByID(ctx, u); err != nil { return nil, err } - if o == nil { + if org == nil { return nil, i18n.NewError(ctx, i18n.Msg404NotFound) } - if o.Type != fftypes.IdentityTypeOrg { - log.L(ctx).Warnf("Identity '%s' (%s) is not an org identity", o.DID, o.ID) + if org.Type != fftypes.IdentityTypeOrg { + log.L(ctx).Warnf("Identity '%s' (%s) is not an org identity", org.DID, org.ID) return nil, nil } - return o, nil + return org, nil } func (nm *networkMap) GetOrganizations(ctx context.Context, filter database.AndFilter) ([]*fftypes.Identity, *database.FilterResult, error) { @@ -50,23 +53,26 @@ func (nm *networkMap) GetOrganizations(ctx context.Context, filter database.AndF return nm.database.GetIdentities(ctx, filter) } -func (nm *networkMap) GetNodeByID(ctx context.Context, id string) (*fftypes.Identity, error) { - u, err := fftypes.ParseUUID(ctx, id) - if err != nil { - return nil, err - } - n, err := nm.database.GetIdentityByID(ctx, u) +func (nm *networkMap) GetNodeByNameOrID(ctx context.Context, nameOrID string) (node *fftypes.Identity, err error) { + u, err := fftypes.ParseUUID(ctx, nameOrID) if err != nil { + if err := fftypes.ValidateFFNameField(ctx, nameOrID, "name"); err != nil { + return nil, err + } + if node, err = nm.database.GetIdentityByName(ctx, fftypes.IdentityTypeNode, fftypes.SystemNamespace, nameOrID); err != nil { + return nil, err + } + } else if node, err = nm.database.GetIdentityByID(ctx, u); err != nil { return nil, err } - if n == nil { + if node == nil { return nil, i18n.NewError(ctx, i18n.Msg404NotFound) } - if n.Type != fftypes.IdentityTypeNode { - log.L(ctx).Warnf("Identity '%s' (%s) is not a node identity", n.DID, n.ID) + if node.Type != fftypes.IdentityTypeNode { + log.L(ctx).Warnf("Identity '%s' (%s) is not a node identity", node.DID, node.ID) return nil, nil } - return n, nil + return node, nil } func (nm *networkMap) GetNodes(ctx context.Context, filter database.AndFilter) ([]*fftypes.Identity, *database.FilterResult, error) { diff --git a/internal/networkmap/data_query_test.go b/internal/networkmap/data_query_test.go index 27cdb08b35..6644766a69 100644 --- a/internal/networkmap/data_query_test.go +++ b/internal/networkmap/data_query_test.go @@ -27,98 +27,114 @@ import ( "github.com/stretchr/testify/mock" ) -func TestGetOrganizationByIDOk(t *testing.T) { +func TestGetOrganizationByNameOrIDOk(t *testing.T) { nm, cancel := newTestNetworkmap(t) defer cancel() id := fftypes.NewUUID() nm.database.(*databasemocks.Plugin).On("GetIdentityByID", nm.ctx, id). Return(&fftypes.Identity{IdentityBase: fftypes.IdentityBase{ID: id, Type: fftypes.IdentityTypeOrg}}, nil) - res, err := nm.GetOrganizationByID(nm.ctx, id.String()) + res, err := nm.GetOrganizationByNameOrID(nm.ctx, id.String()) assert.NoError(t, err) assert.Equal(t, *id, *res.ID) } -func TestGetOrganizationByIDNotOrg(t *testing.T) { +func TestGetOrganizationByNameOrIDNotOrg(t *testing.T) { nm, cancel := newTestNetworkmap(t) defer cancel() id := fftypes.NewUUID() nm.database.(*databasemocks.Plugin).On("GetIdentityByID", nm.ctx, id). Return(&fftypes.Identity{IdentityBase: fftypes.IdentityBase{ID: id, Type: fftypes.IdentityTypeNode}}, nil) - res, err := nm.GetOrganizationByID(nm.ctx, id.String()) + res, err := nm.GetOrganizationByNameOrID(nm.ctx, id.String()) assert.NoError(t, err) assert.Nil(t, res) } -func TestGetOrganizationByIDNotFound(t *testing.T) { +func TestGetOrganizationByNameOrIDNotFound(t *testing.T) { nm, cancel := newTestNetworkmap(t) defer cancel() id := fftypes.NewUUID() nm.database.(*databasemocks.Plugin).On("GetIdentityByID", nm.ctx, id).Return(nil, nil) - _, err := nm.GetOrganizationByID(nm.ctx, id.String()) + _, err := nm.GetOrganizationByNameOrID(nm.ctx, id.String()) assert.Regexp(t, "FF10109", err) } -func TestGetOrganizationByIDError(t *testing.T) { +func TestGetOrganizationByNameOrIDError(t *testing.T) { nm, cancel := newTestNetworkmap(t) defer cancel() id := fftypes.NewUUID() nm.database.(*databasemocks.Plugin).On("GetIdentityByID", nm.ctx, id).Return(nil, fmt.Errorf("pop")) - _, err := nm.GetOrganizationByID(nm.ctx, id.String()) + _, err := nm.GetOrganizationByNameOrID(nm.ctx, id.String()) assert.Regexp(t, "pop", err) } -func TestGetOrganizationByIDBadUUID(t *testing.T) { +func TestGetOrganizationByNameBadName(t *testing.T) { nm, cancel := newTestNetworkmap(t) defer cancel() - _, err := nm.GetOrganizationByID(nm.ctx, "bad") - assert.Regexp(t, "FF10142", err) + _, err := nm.GetOrganizationByNameOrID(nm.ctx, "!bad") + assert.Regexp(t, "FF10131", err) +} + +func TestGetOrganizationByNameError(t *testing.T) { + nm, cancel := newTestNetworkmap(t) + defer cancel() + nm.database.(*databasemocks.Plugin).On("GetIdentityByName", nm.ctx, fftypes.IdentityTypeOrg, fftypes.SystemNamespace, "bad").Return(nil, fmt.Errorf("pop")) + _, err := nm.GetOrganizationByNameOrID(nm.ctx, "bad") + assert.Regexp(t, "pop", err) } -func TestGetNodeByIDOk(t *testing.T) { +func TestGetNodeByNameOrIDOk(t *testing.T) { nm, cancel := newTestNetworkmap(t) defer cancel() id := fftypes.NewUUID() nm.database.(*databasemocks.Plugin).On("GetIdentityByID", nm.ctx, id). Return(&fftypes.Identity{IdentityBase: fftypes.IdentityBase{ID: id, Type: fftypes.IdentityTypeNode}}, nil) - res, err := nm.GetNodeByID(nm.ctx, id.String()) + res, err := nm.GetNodeByNameOrID(nm.ctx, id.String()) assert.NoError(t, err) assert.Equal(t, *id, *res.ID) } -func TestGetNodeByIDWrongType(t *testing.T) { +func TestGetNodeByNameOrIDWrongType(t *testing.T) { nm, cancel := newTestNetworkmap(t) defer cancel() id := fftypes.NewUUID() nm.database.(*databasemocks.Plugin).On("GetIdentityByID", nm.ctx, id). Return(&fftypes.Identity{IdentityBase: fftypes.IdentityBase{ID: id, Type: fftypes.IdentityTypeOrg}}, nil) - res, err := nm.GetNodeByID(nm.ctx, id.String()) + res, err := nm.GetNodeByNameOrID(nm.ctx, id.String()) assert.NoError(t, err) assert.Nil(t, res) } -func TestGetNodeByIDNotFound(t *testing.T) { +func TestGetNodeByNameOrIDNotFound(t *testing.T) { nm, cancel := newTestNetworkmap(t) defer cancel() id := fftypes.NewUUID() nm.database.(*databasemocks.Plugin).On("GetIdentityByID", nm.ctx, id).Return(nil, nil) - _, err := nm.GetNodeByID(nm.ctx, id.String()) + _, err := nm.GetNodeByNameOrID(nm.ctx, id.String()) assert.Regexp(t, "FF10109", err) } -func TestGetNodeByIDError(t *testing.T) { +func TestGetNodeByNameOrIDError(t *testing.T) { nm, cancel := newTestNetworkmap(t) defer cancel() id := fftypes.NewUUID() nm.database.(*databasemocks.Plugin).On("GetIdentityByID", nm.ctx, id).Return(nil, fmt.Errorf("pop")) - _, err := nm.GetNodeByID(nm.ctx, id.String()) + _, err := nm.GetNodeByNameOrID(nm.ctx, id.String()) assert.Regexp(t, "pop", err) } -func TestGetNodeByIDBadUUID(t *testing.T) { +func TestGetNodeByNameBadName(t *testing.T) { nm, cancel := newTestNetworkmap(t) defer cancel() - _, err := nm.GetNodeByID(nm.ctx, "bad") - assert.Regexp(t, "FF10142", err) + _, err := nm.GetNodeByNameOrID(nm.ctx, "!bad") + assert.Regexp(t, "FF10131", err) +} + +func TestGetNodeByNameError(t *testing.T) { + nm, cancel := newTestNetworkmap(t) + defer cancel() + nm.database.(*databasemocks.Plugin).On("GetIdentityByName", nm.ctx, fftypes.IdentityTypeNode, fftypes.SystemNamespace, "bad").Return(nil, fmt.Errorf("pop")) + _, err := nm.GetNodeByNameOrID(nm.ctx, "bad") + assert.Regexp(t, "pop", err) } func TestGetOrganizations(t *testing.T) { diff --git a/internal/networkmap/manager.go b/internal/networkmap/manager.go index 6570fd7736..875652eb94 100644 --- a/internal/networkmap/manager.go +++ b/internal/networkmap/manager.go @@ -35,9 +35,9 @@ type Manager interface { RegisterIdentity(ctx context.Context, ns string, dto *fftypes.IdentityCreateDTO, waitConfirm bool) (identity *fftypes.Identity, err error) UpdateIdentity(ctx context.Context, ns string, id string, dto *fftypes.IdentityUpdateDTO, waitConfirm bool) (identity *fftypes.Identity, err error) - GetOrganizationByID(ctx context.Context, id string) (*fftypes.Identity, error) + GetOrganizationByNameOrID(ctx context.Context, nameOrID string) (*fftypes.Identity, error) GetOrganizations(ctx context.Context, filter database.AndFilter) ([]*fftypes.Identity, *database.FilterResult, error) - GetNodeByID(ctx context.Context, id string) (*fftypes.Identity, error) + GetNodeByNameOrID(ctx context.Context, nameOrID string) (*fftypes.Identity, error) GetNodes(ctx context.Context, filter database.AndFilter) ([]*fftypes.Identity, *database.FilterResult, error) GetIdentityByID(ctx context.Context, ns string, id string) (*fftypes.Identity, error) GetIdentities(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Identity, *database.FilterResult, error) diff --git a/internal/operations/cache.go b/internal/operations/cache.go new file mode 100644 index 0000000000..4384511fd7 --- /dev/null +++ b/internal/operations/cache.go @@ -0,0 +1,84 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operations + +import ( + "context" + "encoding/json" + + "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +type operationCacheKey struct{} +type operationCache map[string]*fftypes.Operation + +func getOperationCache(ctx context.Context) operationCache { + ctxKey := operationCacheKey{} + cacheVal := ctx.Value(ctxKey) + if cacheVal != nil { + if cache, ok := cacheVal.(operationCache); ok { + return cache + } + } + return nil +} + +func getCacheKey(op *fftypes.Operation) (string, error) { + opCopy := &fftypes.Operation{ + Namespace: op.Namespace, + Transaction: op.Transaction, + Type: op.Type, + Plugin: op.Plugin, + Input: op.Input, + } + key, err := json.Marshal(opCopy) + if err != nil { + return "", err + } + return string(key), nil +} + +func CreateOperationRetryContext(ctx context.Context) (ctx1 context.Context) { + l := log.L(ctx).WithField("opcache", fftypes.ShortID()) + ctx1 = log.WithLogger(ctx, l) + return context.WithValue(ctx1, operationCacheKey{}, operationCache{}) +} + +func RunWithOperationCache(ctx context.Context, fn func(ctx context.Context) error) error { + return fn(CreateOperationRetryContext(ctx)) +} + +func (om *operationsManager) AddOrReuseOperation(ctx context.Context, op *fftypes.Operation) error { + // If a cache has been created via RunWithOperationCache, detect duplicate operation inserts + cache := getOperationCache(ctx) + if cache != nil { + if cacheKey, err := getCacheKey(op); err == nil { + if cached, ok := cache[cacheKey]; ok { + // Identical operation already added in this context + *op = *cached + return nil + } + if err = om.database.InsertOperation(ctx, op); err != nil { + return err + } + cache[cacheKey] = op + return nil + } + } + return om.database.InsertOperation(ctx, op) +} diff --git a/internal/operations/cache_test.go b/internal/operations/cache_test.go new file mode 100644 index 0000000000..bf361ae562 --- /dev/null +++ b/internal/operations/cache_test.go @@ -0,0 +1,129 @@ +// Copyright © 2021 Kaleido, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in comdiliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or imdilied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operations + +import ( + "context" + "fmt" + "testing" + + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestRunWithOperationCache(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + op1 := &fftypes.Operation{ + ID: fftypes.NewUUID(), + Type: fftypes.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "1"}, + Status: fftypes.OpStatusFailed, + } + op1Copy := &fftypes.Operation{ + ID: fftypes.NewUUID(), + Type: fftypes.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "1"}, + Status: fftypes.OpStatusPending, + } + op2 := &fftypes.Operation{ + ID: fftypes.NewUUID(), + Type: fftypes.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "2"}, + Status: fftypes.OpStatusFailed, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("InsertOperation", mock.Anything, op1).Return(nil).Once() + mdi.On("InsertOperation", mock.Anything, op2).Return(nil).Once() + + err := RunWithOperationCache(context.Background(), func(ctx context.Context) error { + if err := om.AddOrReuseOperation(ctx, op1); err != nil { + return err + } + if err := om.AddOrReuseOperation(ctx, op1Copy); err != nil { + return err + } + return om.AddOrReuseOperation(ctx, op2) + }) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestRunWithOperationCacheFail(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + op1 := &fftypes.Operation{ + ID: fftypes.NewUUID(), + Type: fftypes.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "1"}, + Status: fftypes.OpStatusFailed, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("InsertOperation", mock.Anything, op1).Return(fmt.Errorf("pop")).Once() + + err := RunWithOperationCache(context.Background(), func(ctx context.Context) error { + return om.AddOrReuseOperation(ctx, op1) + }) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestAddOrReuseOperationNoCache(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + op1 := &fftypes.Operation{ + ID: fftypes.NewUUID(), + Type: fftypes.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "1"}, + Status: fftypes.OpStatusFailed, + } + op2 := &fftypes.Operation{ + ID: fftypes.NewUUID(), + Type: fftypes.OpTypeBlockchainPinBatch, + Input: fftypes.JSONObject{"batch": "1"}, + Status: fftypes.OpStatusPending, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("InsertOperation", ctx, op1).Return(nil).Once() + mdi.On("InsertOperation", ctx, op2).Return(nil).Once() + + err := om.AddOrReuseOperation(ctx, op1) + assert.NoError(t, err) + err = om.AddOrReuseOperation(ctx, op2) + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestGetCacheKeyBadJSON(t *testing.T) { + op := &fftypes.Operation{ + Input: fftypes.JSONObject{ + "test": map[bool]bool{true: false}, + }, + } + _, err := getCacheKey(op) + assert.Error(t, err) +} diff --git a/internal/operations/manager.go b/internal/operations/manager.go new file mode 100644 index 0000000000..f59f1067bc --- /dev/null +++ b/internal/operations/manager.go @@ -0,0 +1,160 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operations + +import ( + "context" + + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +type OperationHandler interface { + fftypes.Named + PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) + RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) +} + +type Manager interface { + RegisterHandler(ctx context.Context, handler OperationHandler, ops []fftypes.OpType) + PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) + RunOperation(ctx context.Context, op *fftypes.PreparedOperation, options ...RunOperationOption) error + RetryOperation(ctx context.Context, ns string, opID *fftypes.UUID) (*fftypes.Operation, error) + AddOrReuseOperation(ctx context.Context, op *fftypes.Operation) error +} + +type RunOperationOption int + +const ( + RemainPendingOnFailure RunOperationOption = iota +) + +type operationsManager struct { + ctx context.Context + database database.Plugin + handlers map[fftypes.OpType]OperationHandler +} + +func NewOperationsManager(ctx context.Context, di database.Plugin) (Manager, error) { + if di == nil { + return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) + } + om := &operationsManager{ + ctx: ctx, + database: di, + handlers: make(map[fftypes.OpType]OperationHandler), + } + return om, nil +} + +func (om *operationsManager) RegisterHandler(ctx context.Context, handler OperationHandler, ops []fftypes.OpType) { + for _, opType := range ops { + log.L(ctx).Debugf("OpType=%s registered to handler %s", opType, handler.Name()) + om.handlers[opType] = handler + } +} + +func (om *operationsManager) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + handler, ok := om.handlers[op.Type] + if !ok { + return nil, i18n.NewError(ctx, i18n.MsgOperationNotSupported, op.Type) + } + return handler.PrepareOperation(ctx, op) +} + +func (om *operationsManager) RunOperation(ctx context.Context, op *fftypes.PreparedOperation, options ...RunOperationOption) error { + failState := fftypes.OpStatusFailed + for _, o := range options { + if o == RemainPendingOnFailure { + failState = fftypes.OpStatusPending + } + } + + handler, ok := om.handlers[op.Type] + if !ok { + return i18n.NewError(ctx, i18n.MsgOperationNotSupported, op.Type) + } + log.L(ctx).Infof("Executing %s operation %s via handler %s", op.Type, op.ID, handler.Name()) + log.L(ctx).Tracef("Operation detail: %+v", op) + if outputs, complete, err := handler.RunOperation(ctx, op); err != nil { + om.writeOperationFailure(ctx, op.ID, outputs, err, failState) + return err + } else if complete { + om.writeOperationSuccess(ctx, op.ID, outputs) + } + return nil +} + +func (om *operationsManager) findLatestRetry(ctx context.Context, opID *fftypes.UUID) (op *fftypes.Operation, err error) { + op, err = om.database.GetOperationByID(ctx, opID) + if err != nil { + return nil, err + } + if op.Retry == nil { + return op, nil + } + return om.findLatestRetry(ctx, op.Retry) +} + +func (om *operationsManager) RetryOperation(ctx context.Context, ns string, opID *fftypes.UUID) (op *fftypes.Operation, err error) { + var po *fftypes.PreparedOperation + err = om.database.RunAsGroup(ctx, func(ctx context.Context) error { + op, err = om.findLatestRetry(ctx, opID) + if err != nil { + return err + } + + // Create a copy of the operation with a new ID + op.ID = fftypes.NewUUID() + op.Status = fftypes.OpStatusPending + op.Error = "" + op.Output = nil + op.Created = fftypes.Now() + op.Updated = op.Created + if err = om.database.InsertOperation(ctx, op); err != nil { + return err + } + + // Update the old operation to point to the new one + update := database.OperationQueryFactory.NewUpdate(ctx).Set("retry", op.ID) + if err = om.database.UpdateOperation(ctx, opID, update); err != nil { + return err + } + + po, err = om.PrepareOperation(ctx, op) + return err + }) + if err != nil { + return nil, err + } + + return op, om.RunOperation(ctx, po) +} + +func (om *operationsManager) writeOperationSuccess(ctx context.Context, opID *fftypes.UUID, outputs fftypes.JSONObject) { + if err := om.database.ResolveOperation(ctx, opID, fftypes.OpStatusSucceeded, "", outputs); err != nil { + log.L(ctx).Errorf("Failed to update operation %s: %s", opID, err) + } +} + +func (om *operationsManager) writeOperationFailure(ctx context.Context, opID *fftypes.UUID, outputs fftypes.JSONObject, err error, newState fftypes.OpStatus) { + if err := om.database.ResolveOperation(ctx, opID, newState, err.Error(), outputs); err != nil { + log.L(ctx).Errorf("Failed to update operation %s: %s", opID, err) + } +} diff --git a/internal/operations/manager_test.go b/internal/operations/manager_test.go new file mode 100644 index 0000000000..fcb816140c --- /dev/null +++ b/internal/operations/manager_test.go @@ -0,0 +1,383 @@ +// Copyright © 2021 Kaleido, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in comdiliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or imdilied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operations + +import ( + "context" + "fmt" + "testing" + + "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +type mockHandler struct { + Complete bool + Err error + Prepared *fftypes.PreparedOperation + Outputs fftypes.JSONObject +} + +func (m *mockHandler) Name() string { + return "MockHandler" +} + +func (m *mockHandler) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + return m.Prepared, m.Err +} + +func (m *mockHandler) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { + return m.Outputs, m.Complete, m.Err +} + +func newTestOperations(t *testing.T) (*operationsManager, func()) { + config.Reset() + mdi := &databasemocks.Plugin{} + + rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() + rag.RunFn = func(a mock.Arguments) { + rag.ReturnArguments = mock.Arguments{ + a[1].(func(context.Context) error)(a[0].(context.Context)), + } + } + + ctx, cancel := context.WithCancel(context.Background()) + om, err := NewOperationsManager(ctx, mdi) + assert.NoError(t, err) + return om.(*operationsManager), cancel +} + +func TestInitFail(t *testing.T) { + _, err := NewOperationsManager(context.Background(), nil) + assert.Regexp(t, "FF10128", err) +} + +func TestPrepareOperationNotSupported(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + op := &fftypes.Operation{} + + _, err := om.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10371", err) +} + +func TestPrepareOperationSuccess(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + op := &fftypes.Operation{ + Type: fftypes.OpTypeBlockchainPinBatch, + } + + om.RegisterHandler(ctx, &mockHandler{}, []fftypes.OpType{fftypes.OpTypeBlockchainPinBatch}) + _, err := om.PrepareOperation(context.Background(), op) + + assert.NoError(t, err) +} + +func TestRunOperationNotSupported(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + op := &fftypes.PreparedOperation{} + + err := om.RunOperation(context.Background(), op) + assert.Regexp(t, "FF10371", err) +} + +func TestRunOperationSuccess(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + op := &fftypes.PreparedOperation{ + Type: fftypes.OpTypeBlockchainPinBatch, + } + + om.RegisterHandler(ctx, &mockHandler{}, []fftypes.OpType{fftypes.OpTypeBlockchainPinBatch}) + err := om.RunOperation(context.Background(), op) + + assert.NoError(t, err) +} + +func TestRunOperationSyncSuccess(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + op := &fftypes.PreparedOperation{ + ID: fftypes.NewUUID(), + Type: fftypes.OpTypeBlockchainPinBatch, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("ResolveOperation", ctx, op.ID, fftypes.OpStatusSucceeded, "", mock.Anything).Return(nil) + + om.RegisterHandler(ctx, &mockHandler{Complete: true}, []fftypes.OpType{fftypes.OpTypeBlockchainPinBatch}) + err := om.RunOperation(ctx, op) + + assert.NoError(t, err) + + mdi.AssertExpectations(t) +} + +func TestRunOperationFail(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + op := &fftypes.PreparedOperation{ + ID: fftypes.NewUUID(), + Type: fftypes.OpTypeBlockchainPinBatch, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("ResolveOperation", ctx, op.ID, fftypes.OpStatusFailed, "pop", mock.Anything).Return(nil) + + om.RegisterHandler(ctx, &mockHandler{Err: fmt.Errorf("pop")}, []fftypes.OpType{fftypes.OpTypeBlockchainPinBatch}) + err := om.RunOperation(ctx, op) + + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestRunOperationFailRemainPending(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + op := &fftypes.PreparedOperation{ + ID: fftypes.NewUUID(), + Type: fftypes.OpTypeBlockchainPinBatch, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("ResolveOperation", ctx, op.ID, fftypes.OpStatusPending, "pop", mock.Anything).Return(nil) + + om.RegisterHandler(ctx, &mockHandler{Err: fmt.Errorf("pop")}, []fftypes.OpType{fftypes.OpTypeBlockchainPinBatch}) + err := om.RunOperation(ctx, op, RemainPendingOnFailure) + + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestRetryOperationSuccess(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + opID := fftypes.NewUUID() + op := &fftypes.Operation{ + ID: opID, + Plugin: "blockchain", + Type: fftypes.OpTypeBlockchainPinBatch, + Status: fftypes.OpStatusFailed, + } + po := &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("GetOperationByID", ctx, opID).Return(op, nil) + mdi.On("InsertOperation", ctx, mock.MatchedBy(func(newOp *fftypes.Operation) bool { + assert.NotEqual(t, opID, newOp.ID) + assert.Equal(t, "blockchain", newOp.Plugin) + assert.Equal(t, fftypes.OpStatusPending, newOp.Status) + assert.Equal(t, fftypes.OpTypeBlockchainPinBatch, newOp.Type) + return true + })).Return(nil) + mdi.On("UpdateOperation", ctx, op.ID, mock.MatchedBy(func(update database.Update) bool { + info, err := update.Finalize() + assert.NoError(t, err) + assert.Equal(t, 1, len(info.SetOperations)) + assert.Equal(t, "retry", info.SetOperations[0].Field) + val, err := info.SetOperations[0].Value.Value() + assert.NoError(t, err) + assert.Equal(t, op.ID.String(), val) + return true + })).Return(nil) + + om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []fftypes.OpType{fftypes.OpTypeBlockchainPinBatch}) + newOp, err := om.RetryOperation(ctx, "ns1", op.ID) + + assert.NoError(t, err) + assert.NotNil(t, newOp) + + mdi.AssertExpectations(t) +} + +func TestRetryOperationGetFail(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + opID := fftypes.NewUUID() + op := &fftypes.Operation{ + ID: opID, + Plugin: "blockchain", + Type: fftypes.OpTypeBlockchainPinBatch, + Status: fftypes.OpStatusFailed, + } + po := &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("GetOperationByID", ctx, opID).Return(op, fmt.Errorf("pop")) + + om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []fftypes.OpType{fftypes.OpTypeBlockchainPinBatch}) + _, err := om.RetryOperation(ctx, "ns1", op.ID) + + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestRetryTwiceOperationInsertFail(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + opID := fftypes.NewUUID() + opID2 := fftypes.NewUUID() + op := &fftypes.Operation{ + ID: opID, + Plugin: "blockchain", + Type: fftypes.OpTypeBlockchainPinBatch, + Status: fftypes.OpStatusFailed, + Retry: opID2, + } + op2 := &fftypes.Operation{ + ID: opID2, + Plugin: "blockchain", + Type: fftypes.OpTypeBlockchainPinBatch, + Status: fftypes.OpStatusFailed, + } + po := &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("GetOperationByID", ctx, opID).Return(op, nil) + mdi.On("GetOperationByID", ctx, opID2).Return(op2, nil) + mdi.On("InsertOperation", ctx, mock.Anything).Return(fmt.Errorf("pop")) + + om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []fftypes.OpType{fftypes.OpTypeBlockchainPinBatch}) + _, err := om.RetryOperation(ctx, "ns1", op.ID) + + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestRetryOperationInsertFail(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + opID := fftypes.NewUUID() + op := &fftypes.Operation{ + ID: opID, + Plugin: "blockchain", + Type: fftypes.OpTypeBlockchainPinBatch, + Status: fftypes.OpStatusFailed, + } + po := &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("GetOperationByID", ctx, opID).Return(op, nil) + mdi.On("InsertOperation", ctx, mock.Anything).Return(fmt.Errorf("pop")) + + om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []fftypes.OpType{fftypes.OpTypeBlockchainPinBatch}) + _, err := om.RetryOperation(ctx, "ns1", op.ID) + + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestRetryOperationUpdateFail(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + opID := fftypes.NewUUID() + op := &fftypes.Operation{ + ID: opID, + Plugin: "blockchain", + Type: fftypes.OpTypeBlockchainPinBatch, + Status: fftypes.OpStatusFailed, + } + po := &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + } + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("GetOperationByID", ctx, opID).Return(op, nil) + mdi.On("InsertOperation", ctx, mock.Anything).Return(nil) + mdi.On("UpdateOperation", ctx, op.ID, mock.Anything).Return(fmt.Errorf("pop")) + + om.RegisterHandler(ctx, &mockHandler{Prepared: po}, []fftypes.OpType{fftypes.OpTypeBlockchainPinBatch}) + _, err := om.RetryOperation(ctx, "ns1", op.ID) + + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestWriteOperationSuccess(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + opID := fftypes.NewUUID() + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("ResolveOperation", ctx, opID, fftypes.OpStatusSucceeded, "", mock.Anything).Return(fmt.Errorf("pop")) + + om.writeOperationSuccess(ctx, opID, nil) + + mdi.AssertExpectations(t) +} + +func TestWriteOperationFailure(t *testing.T) { + om, cancel := newTestOperations(t) + defer cancel() + + ctx := context.Background() + opID := fftypes.NewUUID() + + mdi := om.database.(*databasemocks.Plugin) + mdi.On("ResolveOperation", ctx, opID, fftypes.OpStatusFailed, "pop", mock.Anything).Return(fmt.Errorf("pop")) + + om.writeOperationFailure(ctx, opID, nil, fmt.Errorf("pop"), fftypes.OpStatusFailed) + + mdi.AssertExpectations(t) +} diff --git a/internal/orchestrator/bound_callbacks.go b/internal/orchestrator/bound_callbacks.go index 217d62eb0e..2e203669e5 100644 --- a/internal/orchestrator/bound_callbacks.go +++ b/internal/orchestrator/bound_callbacks.go @@ -21,12 +21,14 @@ import ( "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/dataexchange" "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/hyperledger/firefly/pkg/sharedstorage" "github.com/hyperledger/firefly/pkg/tokens" ) type boundCallbacks struct { bi blockchain.Plugin dx dataexchange.Plugin + ss sharedstorage.Plugin ei events.EventManager } @@ -46,8 +48,8 @@ func (bc *boundCallbacks) TransferResult(trackingID string, status fftypes.OpSta return bc.ei.TransferResult(bc.dx, trackingID, status, update) } -func (bc *boundCallbacks) BLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error { - return bc.ei.BLOBReceived(bc.dx, peerID, hash, size, payloadRef) +func (bc *boundCallbacks) PrivateBLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error { + return bc.ei.PrivateBLOBReceived(bc.dx, peerID, hash, size, payloadRef) } func (bc *boundCallbacks) MessageReceived(peerID string, data []byte) (manifest string, err error) { @@ -69,3 +71,11 @@ func (bc *boundCallbacks) BlockchainEvent(event *blockchain.EventWithSubscriptio func (bc *boundCallbacks) TokensApproved(plugin tokens.Plugin, approval *tokens.TokenApproval) error { return bc.ei.TokensApproved(plugin, approval) } + +func (bc *boundCallbacks) SharedStorageBatchDownloaded(ns, payloadRef string, data []byte) (*fftypes.UUID, error) { + return bc.ei.SharedStorageBatchDownloaded(bc.ss, ns, payloadRef, data) +} + +func (bc *boundCallbacks) SharedStorageBLOBDownloaded(hash fftypes.Bytes32, size int64, payloadRef string) error { + return bc.ei.SharedStorageBLOBDownloaded(bc.ss, hash, size, payloadRef) +} diff --git a/internal/orchestrator/bound_callbacks_test.go b/internal/orchestrator/bound_callbacks_test.go index 94e1dfa375..c524d5076b 100644 --- a/internal/orchestrator/bound_callbacks_test.go +++ b/internal/orchestrator/bound_callbacks_test.go @@ -23,6 +23,7 @@ import ( "github.com/hyperledger/firefly/mocks/blockchainmocks" "github.com/hyperledger/firefly/mocks/dataexchangemocks" "github.com/hyperledger/firefly/mocks/eventmocks" + "github.com/hyperledger/firefly/mocks/sharedstoragemocks" "github.com/hyperledger/firefly/mocks/tokenmocks" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/fftypes" @@ -36,7 +37,8 @@ func TestBoundCallbacks(t *testing.T) { mbi := &blockchainmocks.Plugin{} mdx := &dataexchangemocks.Plugin{} mti := &tokenmocks.Plugin{} - bc := boundCallbacks{bi: mbi, dx: mdx, ei: mei} + mss := &sharedstoragemocks.Plugin{} + bc := boundCallbacks{bi: mbi, dx: mdx, ei: mei, ss: mss} info := fftypes.JSONObject{"hello": "world"} batch := &blockchain.BatchPin{TransactionID: fftypes.NewUUID()} @@ -64,8 +66,8 @@ func TestBoundCallbacks(t *testing.T) { }) assert.EqualError(t, err, "pop") - mei.On("BLOBReceived", mdx, "peer1", *hash, int64(12345), "ns1/id1").Return(fmt.Errorf("pop")) - err = bc.BLOBReceived("peer1", *hash, 12345, "ns1/id1") + mei.On("PrivateBLOBReceived", mdx, "peer1", *hash, int64(12345), "ns1/id1").Return(fmt.Errorf("pop")) + err = bc.PrivateBLOBReceived("peer1", *hash, 12345, "ns1/id1") assert.EqualError(t, err, "pop") mei.On("MessageReceived", mdx, "peer1", []byte{}).Return("manifest data", fmt.Errorf("pop")) @@ -87,4 +89,12 @@ func TestBoundCallbacks(t *testing.T) { mei.On("BlockchainEvent", mock.AnythingOfType("*blockchain.EventWithSubscription")).Return(fmt.Errorf("pop")) err = bc.BlockchainEvent(&blockchain.EventWithSubscription{}) assert.EqualError(t, err, "pop") + + mei.On("SharedStorageBatchDownloaded", mss, "ns1", "payload1", []byte(`{}`)).Return(nil, fmt.Errorf("pop")) + _, err = bc.SharedStorageBatchDownloaded("ns1", "payload1", []byte(`{}`)) + assert.EqualError(t, err, "pop") + + mei.On("SharedStorageBLOBDownloaded", mss, *hash, int64(12345), "payload1").Return(fmt.Errorf("pop")) + err = bc.SharedStorageBLOBDownloaded(*hash, 12345, "payload1") + assert.EqualError(t, err, "pop") } diff --git a/internal/orchestrator/data_query.go b/internal/orchestrator/data_query.go index 2251262d69..e948cbe8a3 100644 --- a/internal/orchestrator/data_query.go +++ b/internal/orchestrator/data_query.go @@ -84,7 +84,7 @@ func (or *orchestrator) fetchMessageData(ctx context.Context, msg *fftypes.Messa Message: *msg, } // Lookup the full data - data, _, err := or.data.GetMessageData(ctx, msg, true) + data, _, err := or.data.GetMessageDataCached(ctx, msg) if err != nil { return nil, err } @@ -100,7 +100,7 @@ func (or *orchestrator) GetMessageByIDWithData(ctx context.Context, ns, id strin return or.fetchMessageData(ctx, msg) } -func (or *orchestrator) GetBatchByID(ctx context.Context, ns, id string) (*fftypes.Batch, error) { +func (or *orchestrator) GetBatchByID(ctx context.Context, ns, id string) (*fftypes.BatchPersisted, error) { u, err := or.verifyIDAndNamespace(ctx, ns, id) if err != nil { return nil, err @@ -183,12 +183,12 @@ func (or *orchestrator) GetMessagesWithData(ctx context.Context, ns string, filt return msgsData, fr, err } -func (or *orchestrator) GetMessageData(ctx context.Context, ns, id string) ([]*fftypes.Data, error) { +func (or *orchestrator) GetMessageData(ctx context.Context, ns, id string) (fftypes.DataArray, error) { msg, err := or.getMessageByID(ctx, ns, id) if err != nil || msg == nil { return nil, err } - data, _, err := or.data.GetMessageData(ctx, msg, true) + data, _, err := or.data.GetMessageDataCached(ctx, msg) return data, err } @@ -209,7 +209,7 @@ func (or *orchestrator) getMessageTransactionID(ctx context.Context, ns, id stri if batch == nil { return nil, i18n.NewError(ctx, i18n.MsgBatchNotFound, msg.BatchID) } - txID = batch.Payload.TX.ID + txID = batch.TX.ID if txID == nil { return nil, i18n.NewError(ctx, i18n.MsgBatchTXNotSet, msg.BatchID) } @@ -253,12 +253,12 @@ func (or *orchestrator) GetMessageEvents(ctx context.Context, ns, id string, fil return or.database.GetEvents(ctx, filter) } -func (or *orchestrator) GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Batch, *database.FilterResult, error) { +func (or *orchestrator) GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.BatchPersisted, *database.FilterResult, error) { filter = or.scopeNS(ns, filter) return or.database.GetBatches(ctx, filter) } -func (or *orchestrator) GetData(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Data, *database.FilterResult, error) { +func (or *orchestrator) GetData(ctx context.Context, ns string, filter database.AndFilter) (fftypes.DataArray, *database.FilterResult, error) { filter = or.scopeNS(ns, filter) return or.database.GetData(ctx, filter) } @@ -307,3 +307,25 @@ func (or *orchestrator) GetTransactionBlockchainEvents(ctx context.Context, ns, ) return or.database.GetBlockchainEvents(ctx, filter) } + +func (or *orchestrator) GetPins(ctx context.Context, filter database.AndFilter) ([]*fftypes.Pin, *database.FilterResult, error) { + return or.database.GetPins(ctx, filter) +} + +func (or *orchestrator) GetEventsWithReferences(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.EnrichedEvent, *database.FilterResult, error) { + filter = or.scopeNS(ns, filter) + events, fr, err := or.database.GetEvents(ctx, filter) + if err != nil { + return nil, nil, err + } + + enriched := make([]*fftypes.EnrichedEvent, len(events)) + for i, event := range events { + enrichedEvent, err := or.txHelper.EnrichEvent(or.ctx, event) + if err != nil { + return nil, nil, err + } + enriched[i] = enrichedEvent + } + return enriched, fr, err +} diff --git a/internal/orchestrator/data_query_test.go b/internal/orchestrator/data_query_test.go index 16218dd6c8..e8fa4614fa 100644 --- a/internal/orchestrator/data_query_test.go +++ b/internal/orchestrator/data_query_test.go @@ -121,7 +121,7 @@ func TestGetMessageByIDWithDataOk(t *testing.T) { }, } or.mdi.On("GetMessageByID", mock.Anything, mock.MatchedBy(func(u *fftypes.UUID) bool { return u.Equals(msgID) })).Return(msg, nil) - or.mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{ + or.mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Value: fftypes.JSONAnyPtr("{}")}, {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Value: fftypes.JSONAnyPtr("{}")}, }, true, nil) @@ -158,7 +158,7 @@ func TestGetMessageByIDWithDataFail(t *testing.T) { }, } or.mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, nil) - or.mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + or.mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, false, fmt.Errorf("pop")) _, err := or.GetMessageByIDWithData(context.Background(), "ns1", msgID.String()) assert.EqualError(t, err, "pop") @@ -195,7 +195,7 @@ func TestGetMessagesWithDataOk(t *testing.T) { } or.mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil) fb := database.MessageQueryFactory.NewFilter(context.Background()) - or.mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return([]*fftypes.Data{}, true, nil) + or.mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{}, true, nil) f := fb.And(fb.Eq("id", u)) _, _, err := or.GetMessagesWithData(context.Background(), "ns1", f) assert.NoError(t, err) @@ -213,7 +213,7 @@ func TestGetMessagesWithDataFail(t *testing.T) { } or.mdi.On("GetMessages", mock.Anything, mock.Anything).Return([]*fftypes.Message{msg}, nil, nil) fb := database.MessageQueryFactory.NewFilter(context.Background()) - or.mdm.On("GetMessageData", mock.Anything, mock.Anything, true).Return(nil, true, fmt.Errorf("pop")) + or.mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(nil, true, fmt.Errorf("pop")) f := fb.And(fb.Eq("id", u)) _, _, err := or.GetMessagesWithData(context.Background(), "ns1", f) assert.EqualError(t, err, "pop") @@ -247,12 +247,10 @@ func TestGetMessageTransactionOk(t *testing.T) { TxType: fftypes.TransactionTypeBatchPin, }, }, nil) - or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.Batch{ - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: txID, - }, + or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.BatchPersisted{ + TX: fftypes.TransactionRef{ + Type: fftypes.TransactionTypeBatchPin, + ID: txID, }, }, nil) or.mdi.On("GetTransactionByID", mock.Anything, txID).Return(&fftypes.Transaction{ @@ -275,12 +273,10 @@ func TestGetMessageTransactionOperations(t *testing.T) { TxType: fftypes.TransactionTypeBatchPin, }, }, nil) - or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.Batch{ - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - Type: fftypes.TransactionTypeBatchPin, - ID: txID, - }, + or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.BatchPersisted{ + TX: fftypes.TransactionRef{ + Type: fftypes.TransactionTypeBatchPin, + ID: txID, }, }, nil) or.mdi.On("GetOperations", mock.Anything, mock.Anything).Return([]*fftypes.Operation{}, nil, nil) @@ -309,7 +305,7 @@ func TestGetMessageTransactionNoBatchTX(t *testing.T) { TxType: fftypes.TransactionTypeBatchPin, }, }, nil) - or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.Batch{}, nil) + or.mdi.On("GetBatchByID", mock.Anything, batchID).Return(&fftypes.BatchPersisted{}, nil) _, err := or.GetMessageTransaction(context.Background(), "ns1", msgID.String()) assert.Regexp(t, "FF10210", err) } @@ -384,7 +380,7 @@ func TestGetMessageData(t *testing.T) { }, } or.mdi.On("GetMessageByID", mock.Anything, mock.Anything).Return(msg, nil) - or.mdm.On("GetMessageData", mock.Anything, msg, true).Return([]*fftypes.Data{}, true, nil) + or.mdm.On("GetMessageDataCached", mock.Anything, mock.Anything).Return(fftypes.DataArray{}, true, nil) _, err := or.GetMessageData(context.Background(), "ns1", fftypes.NewUUID().String()) assert.NoError(t, err) } @@ -449,7 +445,7 @@ func TestGetBatchByIDBadID(t *testing.T) { func TestGetBatches(t *testing.T) { or := newTestOrchestrator() u := fftypes.NewUUID() - or.mdi.On("GetBatches", mock.Anything, mock.Anything).Return([]*fftypes.Batch{}, nil, nil) + or.mdi.On("GetBatches", mock.Anything, mock.Anything).Return([]*fftypes.BatchPersisted{}, nil, nil) fb := database.BatchQueryFactory.NewFilter(context.Background()) f := fb.And(fb.Eq("id", u)) _, _, err := or.GetBatches(context.Background(), "ns1", f) @@ -473,7 +469,7 @@ func TestGetDataByIDBadID(t *testing.T) { func TestGetData(t *testing.T) { or := newTestOrchestrator() u := fftypes.NewUUID() - or.mdi.On("GetData", mock.Anything, mock.Anything).Return([]*fftypes.Data{}, nil, nil) + or.mdi.On("GetData", mock.Anything, mock.Anything).Return(fftypes.DataArray{}, nil, nil) fb := database.DataQueryFactory.NewFilter(context.Background()) f := fb.And(fb.Eq("id", u)) _, _, err := or.GetData(context.Background(), "ns1", f) @@ -571,6 +567,95 @@ func TestGetEvents(t *testing.T) { assert.NoError(t, err) } +func TestGetEventsWithReferencesFail(t *testing.T) { + or := newTestOrchestrator() + u := fftypes.NewUUID() + or.mdi.On("GetEvents", mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + fb := database.EventQueryFactory.NewFilter(context.Background()) + f := fb.And(fb.Eq("id", u)) + _, _, err := or.GetEventsWithReferences(context.Background(), "ns1", f) + assert.EqualError(t, err, "pop") +} + +func TestGetEventsWithReferences(t *testing.T) { + or := newTestOrchestrator() + u := fftypes.NewUUID() + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + ref2 := fftypes.NewUUID() + ev2 := fftypes.NewUUID() + ref3 := fftypes.NewUUID() + ev3 := fftypes.NewUUID() + + blockchainEvent := &fftypes.Event{ + ID: ev1, + Sequence: 10000001, + Reference: ref1, + Type: fftypes.EventTypeBlockchainEventReceived, + } + + txEvent := &fftypes.Event{ + ID: ev2, + Sequence: 10000002, + Reference: ref2, + Type: fftypes.EventTypeTransactionSubmitted, + } + + msgEvent := &fftypes.Event{ + ID: ev3, + Sequence: 10000003, + Reference: ref3, + Type: fftypes.EventTypeMessageConfirmed, + } + + or.mth.On("EnrichEvent", mock.Anything, blockchainEvent).Return(&fftypes.EnrichedEvent{ + Event: *blockchainEvent, + BlockchainEvent: &fftypes.BlockchainEvent{ + ID: ref1, + }, + }, nil) + + or.mth.On("EnrichEvent", mock.Anything, txEvent).Return(&fftypes.EnrichedEvent{ + Event: *txEvent, + Transaction: &fftypes.Transaction{ + ID: ref2, + }, + }, nil) + + or.mth.On("EnrichEvent", mock.Anything, msgEvent).Return(&fftypes.EnrichedEvent{ + Event: *msgEvent, + Message: &fftypes.Message{ + Header: fftypes.MessageHeader{ + ID: ref3, + }, + }, + }, nil) + + or.mdi.On("GetEvents", mock.Anything, mock.Anything).Return([]*fftypes.Event{ + blockchainEvent, + txEvent, + msgEvent, + }, nil, nil) + fb := database.EventQueryFactory.NewFilter(context.Background()) + f := fb.And(fb.Eq("id", u)) + _, _, err := or.GetEventsWithReferences(context.Background(), "ns1", f) + assert.NoError(t, err) +} + +func TestGetEventsWithReferencesEnrichFail(t *testing.T) { + or := newTestOrchestrator() + u := fftypes.NewUUID() + + or.mdi.On("GetEvents", mock.Anything, mock.Anything).Return([]*fftypes.Event{{ID: fftypes.NewUUID()}}, nil, nil) + or.mth.On("EnrichEvent", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) + fb := database.EventQueryFactory.NewFilter(context.Background()) + f := fb.And(fb.Eq("id", u)) + _, _, err := or.GetEventsWithReferences(context.Background(), "ns1", f) + assert.EqualError(t, err, "pop") +} + func TestGetBlockchainEventByID(t *testing.T) { or := newTestOrchestrator() @@ -603,3 +688,13 @@ func TestGetTransactionBlockchainEventsBadID(t *testing.T) { _, _, err := or.GetTransactionBlockchainEvents(context.Background(), "ns1", "") assert.Regexp(t, "FF10142", err) } + +func TestGetPins(t *testing.T) { + or := newTestOrchestrator() + u := fftypes.NewUUID() + or.mdi.On("GetPins", mock.Anything, mock.Anything).Return([]*fftypes.Pin{}, nil, nil) + fb := database.PinQueryFactory.NewFilter(context.Background()) + f := fb.And(fb.Eq("hash", u)) + _, _, err := or.GetPins(context.Background(), f) + assert.NoError(t, err) +} diff --git a/internal/orchestrator/orchestrator.go b/internal/orchestrator/orchestrator.go index 14e82855c6..79ddc478cb 100644 --- a/internal/orchestrator/orchestrator.go +++ b/internal/orchestrator/orchestrator.go @@ -38,10 +38,13 @@ import ( "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/metrics" "github.com/hyperledger/firefly/internal/networkmap" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/internal/privatemessaging" + "github.com/hyperledger/firefly/internal/shareddownload" "github.com/hyperledger/firefly/internal/sharedstorage/ssfactory" "github.com/hyperledger/firefly/internal/syncasync" "github.com/hyperledger/firefly/internal/tokens/tifactory" + "github.com/hyperledger/firefly/internal/txcommon" "github.com/hyperledger/firefly/pkg/blockchain" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/dataexchange" @@ -76,6 +79,7 @@ type Orchestrator interface { Contracts() contracts.Manager Metrics() metrics.Manager BatchManager() batch.Manager + Operations() operations.Manager IsPreInit() bool // Status @@ -103,12 +107,12 @@ type Orchestrator interface { GetMessageTransaction(ctx context.Context, ns, id string) (*fftypes.Transaction, error) GetMessageOperations(ctx context.Context, ns, id string) ([]*fftypes.Operation, *database.FilterResult, error) GetMessageEvents(ctx context.Context, ns, id string, filter database.AndFilter) ([]*fftypes.Event, *database.FilterResult, error) - GetMessageData(ctx context.Context, ns, id string) ([]*fftypes.Data, error) + GetMessageData(ctx context.Context, ns, id string) (fftypes.DataArray, error) GetMessagesForData(ctx context.Context, ns, dataID string, filter database.AndFilter) ([]*fftypes.Message, *database.FilterResult, error) - GetBatchByID(ctx context.Context, ns, id string) (*fftypes.Batch, error) - GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Batch, *database.FilterResult, error) + GetBatchByID(ctx context.Context, ns, id string) (*fftypes.BatchPersisted, error) + GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.BatchPersisted, *database.FilterResult, error) GetDataByID(ctx context.Context, ns, id string) (*fftypes.Data, error) - GetData(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Data, *database.FilterResult, error) + GetData(ctx context.Context, ns string, filter database.AndFilter) (fftypes.DataArray, *database.FilterResult, error) GetDatatypeByID(ctx context.Context, ns, id string) (*fftypes.Datatype, error) GetDatatypeByName(ctx context.Context, ns, name, version string) (*fftypes.Datatype, error) GetDatatypes(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Datatype, *database.FilterResult, error) @@ -116,8 +120,10 @@ type Orchestrator interface { GetOperations(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Operation, *database.FilterResult, error) GetEventByID(ctx context.Context, ns, id string) (*fftypes.Event, error) GetEvents(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Event, *database.FilterResult, error) + GetEventsWithReferences(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.EnrichedEvent, *database.FilterResult, error) GetBlockchainEventByID(ctx context.Context, id *fftypes.UUID) (*fftypes.BlockchainEvent, error) GetBlockchainEvents(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.BlockchainEvent, *database.FilterResult, error) + GetPins(ctx context.Context, filter database.AndFilter) ([]*fftypes.Pin, *database.FilterResult, error) // Charts GetChartHistogram(ctx context.Context, ns string, startTime int64, endTime int64, buckets int64, tableName database.CollectionName) ([]*fftypes.ChartHistogram, error) @@ -160,6 +166,9 @@ type orchestrator struct { contracts contracts.Manager node *fftypes.UUID metrics metrics.Manager + operations operations.Manager + sharedDownload shareddownload.Manager + txHelper txcommon.Helper } func NewOrchestrator() Orchestrator { @@ -194,6 +203,7 @@ func (or *orchestrator) Init(ctx context.Context, cancelCtx context.CancelFunc) or.bc.bi = or.blockchain or.bc.ei = or.events or.bc.dx = or.dataexchange + or.bc.ss = or.sharedstorage return err } @@ -215,6 +225,9 @@ func (or *orchestrator) Start() error { if err == nil { err = or.messaging.Start() } + if err == nil { + err = or.sharedDownload.Start() + } if err == nil { for _, el := range or.tokens { if err = el.Start(); err != nil { @@ -241,6 +254,14 @@ func (or *orchestrator) WaitStop() { or.broadcast.WaitStop() or.broadcast = nil } + if or.data != nil { + or.data.WaitStop() + or.data = nil + } + if or.sharedDownload != nil { + or.sharedDownload.WaitStop() + or.sharedDownload = nil + } or.started = false } @@ -284,6 +305,10 @@ func (or *orchestrator) Metrics() metrics.Manager { return or.metrics } +func (or *orchestrator) Operations() operations.Manager { + return or.operations +} + func (or *orchestrator) initDatabaseCheckPreinit(ctx context.Context) (err error) { if or.database == nil { diType := config.GetString(config.DatabaseType) @@ -445,6 +470,10 @@ func (or *orchestrator) initComponents(ctx context.Context) (err error) { } } + if or.txHelper == nil { + or.txHelper = txcommon.NewTransactionHelper(or.database, or.data) + } + if or.identity == nil { or.identity, err = identity.NewIdentityManager(ctx, or.database, or.identityPlugin, or.blockchain, or.data) if err != nil { @@ -453,36 +482,47 @@ func (or *orchestrator) initComponents(ctx context.Context) (err error) { } if or.batch == nil { - or.batch, err = batch.NewBatchManager(ctx, or, or.database, or.data) + or.batch, err = batch.NewBatchManager(ctx, or, or.database, or.data, or.txHelper) if err != nil { return err } } + if or.operations == nil { + if or.operations, err = operations.NewOperationsManager(ctx, or.database); err != nil { + return err + } + } + or.syncasync = syncasync.NewSyncAsyncBridge(ctx, or.database, or.data) - or.batchpin = batchpin.NewBatchPinSubmitter(or.database, or.identity, or.blockchain, or.metrics) + + if or.batchpin == nil { + if or.batchpin, err = batchpin.NewBatchPinSubmitter(ctx, or.database, or.identity, or.blockchain, or.metrics, or.operations); err != nil { + return err + } + } if or.messaging == nil { - if or.messaging, err = privatemessaging.NewPrivateMessaging(ctx, or.database, or.identity, or.dataexchange, or.blockchain, or.batch, or.data, or.syncasync, or.batchpin, or.metrics); err != nil { + if or.messaging, err = privatemessaging.NewPrivateMessaging(ctx, or.database, or.identity, or.dataexchange, or.blockchain, or.batch, or.data, or.syncasync, or.batchpin, or.metrics, or.operations); err != nil { return err } } if or.broadcast == nil { - if or.broadcast, err = broadcast.NewBroadcastManager(ctx, or.database, or.identity, or.data, or.blockchain, or.dataexchange, or.sharedstorage, or.batch, or.syncasync, or.batchpin, or.metrics); err != nil { + if or.broadcast, err = broadcast.NewBroadcastManager(ctx, or.database, or.identity, or.data, or.blockchain, or.dataexchange, or.sharedstorage, or.batch, or.syncasync, or.batchpin, or.metrics, or.operations); err != nil { return err } } if or.assets == nil { - or.assets, err = assets.NewAssetManager(ctx, or.database, or.identity, or.data, or.syncasync, or.broadcast, or.messaging, or.tokens, or.metrics) + or.assets, err = assets.NewAssetManager(ctx, or.database, or.identity, or.data, or.syncasync, or.broadcast, or.messaging, or.tokens, or.metrics, or.operations, or.txHelper) if err != nil { return err } } if or.contracts == nil { - or.contracts, err = contracts.NewContractManager(ctx, or.database, or.broadcast, or.identity, or.blockchain) + or.contracts, err = contracts.NewContractManager(ctx, or.database, or.broadcast, or.identity, or.blockchain, or.operations, or.txHelper) if err != nil { return err } @@ -490,8 +530,15 @@ func (or *orchestrator) initComponents(ctx context.Context) (err error) { or.definitions = definitions.NewDefinitionHandlers(or.database, or.blockchain, or.dataexchange, or.data, or.identity, or.broadcast, or.messaging, or.assets, or.contracts) + if or.sharedDownload == nil { + or.sharedDownload, err = shareddownload.NewDownloadManager(ctx, or.database, or.sharedstorage, or.dataexchange, or.operations, &or.bc) + if err != nil { + return err + } + } + if or.events == nil { - or.events, err = events.NewEventManager(ctx, or, or.sharedstorage, or.database, or.blockchain, or.identity, or.definitions, or.data, or.broadcast, or.messaging, or.assets, or.metrics) + or.events, err = events.NewEventManager(ctx, or, or.sharedstorage, or.database, or.blockchain, or.identity, or.definitions, or.data, or.broadcast, or.messaging, or.assets, or.sharedDownload, or.metrics, or.txHelper) if err != nil { return err } diff --git a/internal/orchestrator/orchestrator_test.go b/internal/orchestrator/orchestrator_test.go index 7f09835f88..4fb4e3c6cd 100644 --- a/internal/orchestrator/orchestrator_test.go +++ b/internal/orchestrator/orchestrator_test.go @@ -27,6 +27,7 @@ import ( "github.com/hyperledger/firefly/internal/tokens/tifactory" "github.com/hyperledger/firefly/mocks/assetmocks" "github.com/hyperledger/firefly/mocks/batchmocks" + "github.com/hyperledger/firefly/mocks/batchpinmocks" "github.com/hyperledger/firefly/mocks/blockchainmocks" "github.com/hyperledger/firefly/mocks/broadcastmocks" "github.com/hyperledger/firefly/mocks/contractmocks" @@ -38,9 +39,12 @@ import ( "github.com/hyperledger/firefly/mocks/identitymocks" "github.com/hyperledger/firefly/mocks/metricsmocks" "github.com/hyperledger/firefly/mocks/networkmapmocks" + "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/mocks/privatemessagingmocks" + "github.com/hyperledger/firefly/mocks/shareddownloadmocks" "github.com/hyperledger/firefly/mocks/sharedstoragemocks" "github.com/hyperledger/firefly/mocks/tokenmocks" + "github.com/hyperledger/firefly/mocks/txcommonmocks" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/hyperledger/firefly/pkg/tokens" "github.com/stretchr/testify/assert" @@ -68,6 +72,10 @@ type testOrchestrator struct { mti *tokenmocks.Plugin mcm *contractmocks.Manager mmi *metricsmocks.Manager + mom *operationmocks.Manager + mbp *batchpinmocks.Submitter + mth *txcommonmocks.Helper + msd *shareddownloadmocks.Manager } func newTestOrchestrator() *testOrchestrator { @@ -94,6 +102,10 @@ func newTestOrchestrator() *testOrchestrator { mti: &tokenmocks.Plugin{}, mcm: &contractmocks.Manager{}, mmi: &metricsmocks.Manager{}, + mom: &operationmocks.Manager{}, + mbp: &batchpinmocks.Submitter{}, + mth: &txcommonmocks.Helper{}, + msd: &shareddownloadmocks.Manager{}, } tor.orchestrator.database = tor.mdi tor.orchestrator.data = tor.mdm @@ -111,6 +123,10 @@ func newTestOrchestrator() *testOrchestrator { tor.orchestrator.contracts = tor.mcm tor.orchestrator.tokens = map[string]tokens.Plugin{"token": tor.mti} tor.orchestrator.metrics = tor.mmi + tor.orchestrator.operations = tor.mom + tor.orchestrator.batchpin = tor.mbp + tor.orchestrator.sharedDownload = tor.msd + tor.orchestrator.txHelper = tor.mth tor.mdi.On("Name").Return("mock-di").Maybe() tor.mem.On("Name").Return("mock-ei").Maybe() tor.mps.On("Name").Return("mock-ps").Maybe() @@ -470,6 +486,14 @@ func TestInitNetworkMapComponentFail(t *testing.T) { assert.Regexp(t, "FF10128", err) } +func TestInitSharedStorageDownloadComponentFail(t *testing.T) { + or := newTestOrchestrator() + or.database = nil + or.sharedDownload = nil + err := or.initComponents(context.Background()) + assert.Regexp(t, "FF10128", err) +} + func TestInitBatchComponentFail(t *testing.T) { or := newTestOrchestrator() or.database = nil @@ -508,6 +532,7 @@ func TestInitIdentityComponentFail(t *testing.T) { or := newTestOrchestrator() or.database = nil or.identity = nil + or.txHelper = nil err := or.initComponents(context.Background()) assert.Regexp(t, "FF10128", err) } @@ -528,6 +553,22 @@ func TestInitContractsComponentFail(t *testing.T) { assert.Regexp(t, "FF10128", err) } +func TestInitBatchPinComponentFail(t *testing.T) { + or := newTestOrchestrator() + or.database = nil + or.batchpin = nil + err := or.initComponents(context.Background()) + assert.Regexp(t, "FF10128", err) +} + +func TestInitOperationsComponentFail(t *testing.T) { + or := newTestOrchestrator() + or.database = nil + or.operations = nil + err := or.initComponents(context.Background()) + assert.Regexp(t, "FF10128", err) +} + func TestStartBatchFail(t *testing.T) { config.Reset() or := newTestOrchestrator() @@ -546,6 +587,7 @@ func TestStartTokensFail(t *testing.T) { or.mbm.On("Start").Return(nil) or.mpm.On("Start").Return(nil) or.mam.On("Start").Return(nil) + or.msd.On("Start").Return(nil) or.mti.On("Start").Return(fmt.Errorf("pop")) err := or.Start() assert.EqualError(t, err, "pop") @@ -562,12 +604,15 @@ func TestStartStopOk(t *testing.T) { or.mam.On("Start").Return(nil) or.mti.On("Start").Return(nil) or.mmi.On("Start").Return(nil) + or.msd.On("Start").Return(nil) or.mbi.On("WaitStop").Return(nil) or.mba.On("WaitStop").Return(nil) or.mem.On("WaitStop").Return(nil) or.mbm.On("WaitStop").Return(nil) or.mam.On("WaitStop").Return(nil) or.mti.On("WaitStop").Return(nil) + or.mdm.On("WaitStop").Return(nil) + or.msd.On("WaitStop").Return(nil) err := or.Start() assert.NoError(t, err) or.WaitStop() @@ -660,6 +705,7 @@ func TestInitOK(t *testing.T) { assert.Equal(t, or.mam, or.Assets()) assert.Equal(t, or.mcm, or.Contracts()) assert.Equal(t, or.mmi, or.Metrics()) + assert.Equal(t, or.mom, or.Operations()) } func TestInitDataExchangeGetNodesFail(t *testing.T) { diff --git a/internal/orchestrator/txn_status.go b/internal/orchestrator/txn_status.go index c41161c5fa..40721b2f26 100644 --- a/internal/orchestrator/txn_status.go +++ b/internal/orchestrator/txn_status.go @@ -38,6 +38,29 @@ func pendingPlaceholder(t fftypes.TransactionStatusType) *fftypes.TransactionSta } } +func txOperationStatus(op *fftypes.Operation) *fftypes.TransactionStatusDetails { + return &fftypes.TransactionStatusDetails{ + Status: op.Status, + Type: fftypes.TransactionStatusTypeOperation, + SubType: op.Type.String(), + Timestamp: op.Updated, + ID: op.ID, + Error: op.Error, + Info: op.Output, + } +} + +func txBlockchainEventStatus(event *fftypes.BlockchainEvent) *fftypes.TransactionStatusDetails { + return &fftypes.TransactionStatusDetails{ + Status: fftypes.OpStatusSucceeded, + Type: fftypes.TransactionStatusTypeBlockchainEvent, + SubType: event.Name, + Timestamp: event.Timestamp, + ID: event.ID, + Info: event.Info, + } +} + func (or *orchestrator) GetTransactionStatus(ctx context.Context, ns, id string) (*fftypes.TransactionStatus, error) { result := &fftypes.TransactionStatus{ Status: fftypes.OpStatusSucceeded, @@ -56,16 +79,10 @@ func (or *orchestrator) GetTransactionStatus(ctx context.Context, ns, id string) return nil, err } for _, op := range ops { - result.Details = append(result.Details, &fftypes.TransactionStatusDetails{ - Status: op.Status, - Type: fftypes.TransactionStatusTypeOperation, - SubType: op.Type.String(), - Timestamp: op.Updated, - ID: op.ID, - Error: op.Error, - Info: op.Output, - }) - updateStatus(result, op.Status) + result.Details = append(result.Details, txOperationStatus(op)) + if op.Retry == nil { + updateStatus(result, op.Status) + } } events, _, err := or.GetTransactionBlockchainEvents(ctx, ns, id) @@ -73,14 +90,7 @@ func (or *orchestrator) GetTransactionStatus(ctx context.Context, ns, id string) return nil, err } for _, event := range events { - result.Details = append(result.Details, &fftypes.TransactionStatusDetails{ - Status: fftypes.OpStatusSucceeded, - Type: fftypes.TransactionStatusTypeBlockchainEvent, - SubType: event.Name, - Timestamp: event.Timestamp, - ID: event.ID, - Info: event.Info, - }) + result.Details = append(result.Details, txBlockchainEventStatus(event)) } switch tx.Type { diff --git a/internal/orchestrator/txn_status_test.go b/internal/orchestrator/txn_status_test.go index 92b26a5066..340da5974a 100644 --- a/internal/orchestrator/txn_status_test.go +++ b/internal/orchestrator/txn_status_test.go @@ -47,7 +47,7 @@ func TestGetTransactionStatusBatchPinSuccess(t *testing.T) { { Status: fftypes.OpStatusSucceeded, ID: fftypes.NewUUID(), - Type: fftypes.OpTypeBlockchainBatchPin, + Type: fftypes.OpTypeBlockchainPinBatch, Updated: fftypes.UnixTime(0), Output: fftypes.JSONObject{"transactionHash": "0x100"}, }, @@ -60,10 +60,12 @@ func TestGetTransactionStatusBatchPinSuccess(t *testing.T) { Info: fftypes.JSONObject{"transactionHash": "0x100"}, }, } - batches := []*fftypes.Batch{ + batches := []*fftypes.BatchPersisted{ { - ID: fftypes.NewUUID(), - Type: fftypes.MessageTypeBroadcast, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Type: fftypes.BatchTypeBroadcast, + }, Confirmed: fftypes.UnixTime(2), }, } @@ -96,7 +98,7 @@ func TestGetTransactionStatusBatchPinSuccess(t *testing.T) { }, { "type": "Operation", - "subtype": "blockchain_batch_pin", + "subtype": "blockchain_pin_batch", "status": "Succeeded", "timestamp": "1970-01-01T00:00:00Z", "id": "` + ops[0].ID.String() + `", @@ -121,12 +123,12 @@ func TestGetTransactionStatusBatchPinFail(t *testing.T) { { Status: fftypes.OpStatusFailed, ID: fftypes.NewUUID(), - Type: fftypes.OpTypeBlockchainBatchPin, + Type: fftypes.OpTypeBlockchainPinBatch, Error: "complete failure", }, } events := []*fftypes.BlockchainEvent{} - batches := []*fftypes.Batch{} + batches := []*fftypes.BatchPersisted{} or.mdi.On("GetTransactionByID", mock.Anything, txID).Return(tx, nil) or.mdi.On("GetOperations", mock.Anything, mock.Anything).Return(ops, nil, nil) @@ -141,7 +143,7 @@ func TestGetTransactionStatusBatchPinFail(t *testing.T) { "details": [ { "type": "Operation", - "subtype": "blockchain_batch_pin", + "subtype": "blockchain_pin_batch", "status": "Failed", "id": "` + ops[0].ID.String() + `", "error": "complete failure" @@ -173,12 +175,12 @@ func TestGetTransactionStatusBatchPinPending(t *testing.T) { { Status: fftypes.OpStatusSucceeded, ID: fftypes.NewUUID(), - Type: fftypes.OpTypeBlockchainBatchPin, + Type: fftypes.OpTypeBlockchainPinBatch, Updated: fftypes.UnixTime(0), }, } events := []*fftypes.BlockchainEvent{} - batches := []*fftypes.Batch{} + batches := []*fftypes.BatchPersisted{} or.mdi.On("GetTransactionByID", mock.Anything, txID).Return(tx, nil) or.mdi.On("GetOperations", mock.Anything, mock.Anything).Return(ops, nil, nil) @@ -201,7 +203,7 @@ func TestGetTransactionStatusBatchPinPending(t *testing.T) { }, { "type": "Operation", - "subtype": "blockchain_batch_pin", + "subtype": "blockchain_pin_batch", "status": "Succeeded", "timestamp": "1970-01-01T00:00:00Z", "id": "` + ops[0].ID.String() + `" @@ -598,6 +600,70 @@ func TestGetTransactionStatusTokenTransferPending(t *testing.T) { or.mdi.AssertExpectations(t) } +func TestGetTransactionStatusTokenTransferRetry(t *testing.T) { + or := newTestOrchestrator() + + txID := fftypes.NewUUID() + tx := &fftypes.Transaction{ + Type: fftypes.TransactionTypeTokenTransfer, + } + op1ID := fftypes.NewUUID() + op2ID := fftypes.NewUUID() + ops := []*fftypes.Operation{ + { + Status: fftypes.OpStatusFailed, + ID: op1ID, + Type: fftypes.OpTypeTokenTransfer, + Retry: op2ID, + }, + { + Status: fftypes.OpStatusPending, + ID: op2ID, + Type: fftypes.OpTypeTokenTransfer, + }, + } + events := []*fftypes.BlockchainEvent{} + transfers := []*fftypes.TokenTransfer{} + + or.mdi.On("GetTransactionByID", mock.Anything, txID).Return(tx, nil) + or.mdi.On("GetOperations", mock.Anything, mock.Anything).Return(ops, nil, nil) + or.mdi.On("GetBlockchainEvents", mock.Anything, mock.Anything).Return(events, nil, nil) + or.mdi.On("GetTokenTransfers", mock.Anything, mock.Anything).Return(transfers, nil, nil) + + status, err := or.GetTransactionStatus(context.Background(), "ns1", txID.String()) + assert.NoError(t, err) + + expectedStatus := compactJSON(`{ + "status": "Pending", + "details": [ + { + "type": "Operation", + "subtype": "token_transfer", + "status": "Failed", + "id": "` + op1ID.String() + `" + }, + { + "type": "Operation", + "subtype": "token_transfer", + "status": "Pending", + "id": "` + op2ID.String() + `" + }, + { + "type": "BlockchainEvent", + "status": "Pending" + }, + { + "type": "TokenTransfer", + "status": "Pending" + } + ] + }`) + statusJSON, _ := json.Marshal(status) + assert.Equal(t, expectedStatus, string(statusJSON)) + + or.mdi.AssertExpectations(t) +} + func TestGetTransactionStatusTokenApprovalPending(t *testing.T) { or := newTestOrchestrator() diff --git a/internal/privatemessaging/groupmanager.go b/internal/privatemessaging/groupmanager.go index 4b46e2fa9e..fdc251b57d 100644 --- a/internal/privatemessaging/groupmanager.go +++ b/internal/privatemessaging/groupmanager.go @@ -157,7 +157,7 @@ func (gm *groupManager) GetGroups(ctx context.Context, filter database.AndFilter return gm.database.GetGroups(ctx, filter) } -func (gm *groupManager) getGroupNodes(ctx context.Context, groupHash *fftypes.Bytes32) (*fftypes.Group, []*fftypes.Identity, error) { +func (gm *groupManager) getGroupNodes(ctx context.Context, groupHash *fftypes.Bytes32, allowNil bool) (*fftypes.Group, []*fftypes.Identity, error) { if cached := gm.groupCache.Get(groupHash.String()); cached != nil { cached.Extend(gm.groupCacheTTL) @@ -166,7 +166,7 @@ func (gm *groupManager) getGroupNodes(ctx context.Context, groupHash *fftypes.By } group, err := gm.database.GetGroupByHash(ctx, groupHash) - if err != nil { + if err != nil || (allowNil && group == nil) { return nil, nil, err } if group == nil { @@ -206,7 +206,7 @@ func (gm *groupManager) getGroupNodes(ctx context.Context, groupHash *fftypes.By func (gm *groupManager) ResolveInitGroup(ctx context.Context, msg *fftypes.Message) (*fftypes.Group, error) { if msg.Header.Tag == fftypes.SystemTagDefineGroup { // Store the new group - data, foundAll, err := gm.data.GetMessageData(ctx, msg, true) + data, foundAll, err := gm.data.GetMessageDataCached(ctx, msg) if err != nil || !foundAll || len(data) == 0 { log.L(ctx).Warnf("Group %s definition in message %s invalid: missing data", msg.Header.Group, msg.Header.ID) return nil, err diff --git a/internal/privatemessaging/groupmanager_test.go b/internal/privatemessaging/groupmanager_test.go index 7f2dc26ccf..993f647ca2 100644 --- a/internal/privatemessaging/groupmanager_test.go +++ b/internal/privatemessaging/groupmanager_test.go @@ -86,7 +86,7 @@ func TestResolveInitGroupMissingData(t *testing.T) { defer cancel() mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{}, false, nil) + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{}, false, nil) _, err := pm.ResolveInitGroup(pm.ctx, &fftypes.Message{ Header: fftypes.MessageHeader{ @@ -109,7 +109,7 @@ func TestResolveInitGroupBadData(t *testing.T) { defer cancel() mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`!json`)}, }, true, nil) @@ -134,7 +134,7 @@ func TestResolveInitGroupBadValidation(t *testing.T) { defer cancel() mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr(`{}`)}, }, true, nil) @@ -172,7 +172,7 @@ func TestResolveInitGroupBadGroupID(t *testing.T) { b, _ := json.Marshal(&group) mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) @@ -210,7 +210,7 @@ func TestResolveInitGroupUpsertFail(t *testing.T) { b, _ := json.Marshal(&group) mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) mdi := pm.database.(*databasemocks.Plugin) @@ -250,7 +250,7 @@ func TestResolveInitGroupNewOk(t *testing.T) { b, _ := json.Marshal(&group) mdm := pm.data.(*datamocks.Manager) - mdm.On("GetMessageData", pm.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", pm.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtrBytes(b)}, }, true, nil) mdi := pm.database.(*databasemocks.Plugin) @@ -416,13 +416,13 @@ func TestGetGroupNodesCache(t *testing.T) { }, }, nil).Once() - g, nodes, err := pm.getGroupNodes(pm.ctx, group.Hash) + g, nodes, err := pm.getGroupNodes(pm.ctx, group.Hash, false) assert.NoError(t, err) assert.Equal(t, *node1, *nodes[0].ID) assert.Equal(t, *group.Hash, *g.Hash) // Note this validates the cache as we only mocked the calls once - g, nodes, err = pm.getGroupNodes(pm.ctx, group.Hash) + g, nodes, err = pm.getGroupNodes(pm.ctx, group.Hash, false) assert.NoError(t, err) assert.Equal(t, *node1, *nodes[0].ID) assert.Equal(t, *group.Hash, *g.Hash) @@ -436,7 +436,7 @@ func TestGetGroupNodesGetGroupFail(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - _, _, err := pm.getGroupNodes(pm.ctx, groupID) + _, _, err := pm.getGroupNodes(pm.ctx, groupID, false) assert.EqualError(t, err, "pop") } @@ -448,7 +448,7 @@ func TestGetGroupNodesGetGroupNotFound(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(nil, nil) - _, _, err := pm.getGroupNodes(pm.ctx, groupID) + _, _, err := pm.getGroupNodes(pm.ctx, groupID, false) assert.Regexp(t, "FF10226", err) } @@ -470,7 +470,7 @@ func TestGetGroupNodesNodeLookupFail(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(group, nil).Once() mdi.On("GetIdentityByID", pm.ctx, node1).Return(nil, fmt.Errorf("pop")).Once() - _, _, err := pm.getGroupNodes(pm.ctx, group.Hash) + _, _, err := pm.getGroupNodes(pm.ctx, group.Hash, false) assert.EqualError(t, err, "pop") } @@ -491,7 +491,7 @@ func TestGetGroupNodesNodeLookupNotFound(t *testing.T) { mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(group, nil).Once() mdi.On("GetIdentityByID", pm.ctx, node1).Return(nil, nil).Once() - _, _, err := pm.getGroupNodes(pm.ctx, group.Hash) + _, _, err := pm.getGroupNodes(pm.ctx, group.Hash, false) assert.Regexp(t, "FF10224", err) } diff --git a/internal/privatemessaging/message.go b/internal/privatemessaging/message.go index b7a82ed685..e2526c1ce6 100644 --- a/internal/privatemessaging/message.go +++ b/internal/privatemessaging/message.go @@ -19,10 +19,10 @@ package privatemessaging import ( "context" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/sysmessaging" - "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" ) @@ -30,7 +30,9 @@ func (pm *privateMessaging) NewMessage(ns string, in *fftypes.MessageInOut) sysm message := &messageSender{ mgr: pm, namespace: ns, - msg: in, + msg: &data.NewMessage{ + Message: in, + }, } message.setDefaults() return message @@ -65,7 +67,7 @@ func (pm *privateMessaging) RequestReply(ctx context.Context, ns string, in *fft type messageSender struct { mgr *privateMessaging namespace string - msg *fftypes.MessageInOut + msg *data.NewMessage resolved bool } @@ -93,48 +95,34 @@ func (s *messageSender) SendAndWait(ctx context.Context) error { } func (s *messageSender) setDefaults() { - s.msg.Header.ID = fftypes.NewUUID() - s.msg.Header.Namespace = s.namespace - s.msg.State = fftypes.MessageStateReady - if s.msg.Header.Type == "" { - s.msg.Header.Type = fftypes.MessageTypePrivate + msg := s.msg.Message + msg.Header.ID = fftypes.NewUUID() + msg.Header.Namespace = s.namespace + msg.State = fftypes.MessageStateReady + if msg.Header.Type == "" { + msg.Header.Type = fftypes.MessageTypePrivate } - switch s.msg.Header.TxType { + switch msg.Header.TxType { case fftypes.TransactionTypeUnpinned, fftypes.TransactionTypeNone: // "unpinned" used to be called "none" (before we introduced batching + a TX on unppinned sends) - s.msg.Header.TxType = fftypes.TransactionTypeUnpinned + msg.Header.TxType = fftypes.TransactionTypeUnpinned default: // the only other valid option is "batch_pin" - s.msg.Header.TxType = fftypes.TransactionTypeBatchPin + msg.Header.TxType = fftypes.TransactionTypeBatchPin } } func (s *messageSender) resolveAndSend(ctx context.Context, method sendMethod) error { - sent := false - - // We optimize the DB storage of all the parts of the message using transaction semantics (assuming those are supported by the DB plugin) - err := s.mgr.database.RunAsGroup(ctx, func(ctx context.Context) (err error) { - if !s.resolved { - if err := s.resolve(ctx); err != nil { - return err - } - msgSizeEstimate := s.msg.EstimateSize(true) - if msgSizeEstimate > s.mgr.maxBatchPayloadLength { - return i18n.NewError(ctx, i18n.MsgTooLargePrivate, float64(msgSizeEstimate)/1024, float64(s.mgr.maxBatchPayloadLength)/1024) - } - s.resolved = true - } - // If we aren't waiting for blockchain confirmation, insert the local message immediately within the same DB transaction. - if method != methodSendAndWait { - err = s.sendInternal(ctx, method) - sent = true + if !s.resolved { + if err := s.resolve(ctx); err != nil { + return err } - return err - }) - - if err != nil || sent { - return err + msgSizeEstimate := s.msg.Message.EstimateSize(true) + if msgSizeEstimate > s.mgr.maxBatchPayloadLength { + return i18n.NewError(ctx, i18n.MsgTooLargePrivate, float64(msgSizeEstimate)/1024, float64(s.mgr.maxBatchPayloadLength)/1024) + } + s.resolved = true } return s.sendInternal(ctx, method) @@ -142,34 +130,36 @@ func (s *messageSender) resolveAndSend(ctx context.Context, method sendMethod) e func (s *messageSender) resolve(ctx context.Context) error { // Resolve the sending identity - if err := s.mgr.identity.ResolveInputSigningIdentity(ctx, s.msg.Header.Namespace, &s.msg.Header.SignerRef); err != nil { + msg := s.msg.Message + if err := s.mgr.identity.ResolveInputSigningIdentity(ctx, msg.Header.Namespace, &msg.Header.SignerRef); err != nil { return i18n.WrapError(ctx, err, i18n.MsgAuthorInvalid) } // Resolve the member list into a group - if err := s.mgr.resolveRecipientList(ctx, s.msg); err != nil { + if err := s.mgr.resolveRecipientList(ctx, s.msg.Message); err != nil { return err } // The data manager is responsible for the heavy lifting of storing/validating all our in-line data elements - dataRefs, err := s.mgr.data.ResolveInlineDataPrivate(ctx, s.namespace, s.msg.InlineData) - s.msg.Message.Data = dataRefs + err := s.mgr.data.ResolveInlineData(ctx, s.msg) return err } func (s *messageSender) sendInternal(ctx context.Context, method sendMethod) error { + msg := &s.msg.Message.Message + if method == methodSendAndWait { // Pass it to the sync-async handler to wait for the confirmation to come back in. // NOTE: Our caller makes sure we are not in a RunAsGroup (which would be bad) - out, err := s.mgr.syncasync.WaitForMessage(ctx, s.namespace, s.msg.Header.ID, s.Send) + out, err := s.mgr.syncasync.WaitForMessage(ctx, s.namespace, msg.Header.ID, s.Send) if out != nil { - s.msg.Message = *out + *msg = *out } return err } // Seal the message - if err := s.msg.Seal(ctx); err != nil { + if err := s.msg.Message.Seal(ctx); err != nil { return err } if method == methodPrepare { @@ -177,10 +167,10 @@ func (s *messageSender) sendInternal(ctx context.Context, method sendMethod) err } // Store the message - this asynchronously triggers the next step in process - if err := s.mgr.database.UpsertMessage(ctx, &s.msg.Message, database.UpsertOptimizationNew); err != nil { + if err := s.mgr.data.WriteNewMessage(ctx, s.msg); err != nil { return err } - log.L(ctx).Infof("Sent private message %s:%s sequence=%d", s.msg.Header.Namespace, s.msg.Header.ID, s.msg.Sequence) + log.L(ctx).Infof("Sent private message %s:%s sequence=%d", msg.Header.Namespace, msg.Header.ID, msg.Sequence) return nil } diff --git a/internal/privatemessaging/message_test.go b/internal/privatemessaging/message_test.go index 5901e14bf8..ff8f49cb8c 100644 --- a/internal/privatemessaging/message_test.go +++ b/internal/privatemessaging/message_test.go @@ -21,13 +21,15 @@ import ( "fmt" "testing" + "github.com/hyperledger/firefly/internal/batch" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/syncasync" "github.com/hyperledger/firefly/mocks/databasemocks" "github.com/hyperledger/firefly/mocks/dataexchangemocks" "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" + "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" - "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -82,18 +84,14 @@ func TestSendConfirmMessageE2EOk(t *testing.T) { mim.On("CachedIdentityLookup", pm.ctx, "org1").Return(intermediateOrg, false, nil) mim.On("CachedIdentityLookupByID", pm.ctx, rootOrg.ID).Return(rootOrg, nil) - dataID := fftypes.NewUUID() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: dataID, Hash: fftypes.NewRandB32()}, - }, nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(nil) + mdm.On("WriteNewMessage", pm.ctx, mock.Anything).Return(nil).Once() mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{}, nil, nil).Once() mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil).Once() - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{ - {Hash: fftypes.NewRandB32()}, - }, nil, nil).Once() + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() retMsg := &fftypes.Message{ Header: fftypes.MessageHeader{ @@ -107,7 +105,6 @@ func TestSendConfirmMessageE2EOk(t *testing.T) { send(pm.ctx) }). Return(retMsg, nil).Once() - mdi.On("UpsertMessage", pm.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil).Once() msg, err := pm.SendMessage(pm.ctx, "ns1", &fftypes.MessageInOut{ InlineData: fftypes.InlineData{ @@ -139,15 +136,12 @@ func TestSendUnpinnedMessageE2EOk(t *testing.T) { identity.Key = "localkey" }).Return(nil) - dataID := fftypes.NewUUID() groupID := fftypes.NewRandB32() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: dataID, Hash: fftypes.NewRandB32()}, - }, nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(nil) + mdm.On("WriteNewMessage", pm.ctx, mock.Anything).Return(nil).Once() mdi := pm.database.(*databasemocks.Plugin) - mdi.On("UpsertMessage", pm.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil).Once() mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{Hash: groupID}, nil) msg, err := pm.SendMessage(pm.ctx, "ns1", &fftypes.MessageInOut{ @@ -167,7 +161,6 @@ func TestSendUnpinnedMessageE2EOk(t *testing.T) { }, }, false) assert.NoError(t, err) - assert.Equal(t, *dataID, *msg.Data[0].ID) assert.NotNil(t, msg.Header.Group) mdm.AssertExpectations(t) @@ -220,54 +213,6 @@ func TestSendMessageBadIdentity(t *testing.T) { } -func TestSendMessageFail(t *testing.T) { - - pm, cancel := newTestPrivateMessaging(t) - defer cancel() - - mim := pm.identity.(*identitymanagermocks.Manager) - localOrg := newTestOrg("localorg") - localNode := newTestNode("node1", localOrg) - mim.On("ResolveInputSigningIdentity", pm.ctx, "ns1", mock.Anything).Return(nil) - mim.On("GetNodeOwnerOrg", pm.ctx).Return(localOrg, nil) - mim.On("ResolveInputSigningIdentity", pm.ctx, "ns1", mock.Anything).Run(func(args mock.Arguments) { - identity := args[2].(*fftypes.SignerRef) - identity.Author = "localorg" - identity.Key = "localkey" - }).Return(nil) - mim.On("CachedIdentityLookup", pm.ctx, "localorg").Return(localOrg, false, nil) - - mdi := pm.database.(*databasemocks.Plugin) - mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil) - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{ - {Hash: fftypes.NewRandB32()}, - }, nil, nil) - mdi.On("UpsertMessage", pm.ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")) - - dataID := fftypes.NewUUID() - mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: dataID, Hash: fftypes.NewRandB32()}, - }, nil) - - _, err := pm.SendMessage(pm.ctx, "ns1", &fftypes.MessageInOut{ - InlineData: fftypes.InlineData{ - {Value: fftypes.JSONAnyPtr(`{"some": "data"}`)}, - }, - Group: &fftypes.InputGroup{ - Members: []fftypes.MemberInput{ - {Identity: "localorg"}, - }, - }, - }, false) - assert.EqualError(t, err, "pop") - - mim.AssertExpectations(t) - mdi.AssertExpectations(t) - mdm.AssertExpectations(t) - -} - func TestResolveAndSendBadInlineData(t *testing.T) { pm, cancel := newTestPrivateMessaging(t) @@ -287,21 +232,21 @@ func TestResolveAndSendBadInlineData(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil).Once() - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{ - {Hash: fftypes.NewRandB32()}, - }, nil, nil).Once() + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(nil, fmt.Errorf("pop")) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) message := &messageSender{ mgr: pm, namespace: "ns1", - msg: &fftypes.MessageInOut{ - Message: fftypes.Message{Header: fftypes.MessageHeader{Namespace: "ns1"}}, - Group: &fftypes.InputGroup{ - Members: []fftypes.MemberInput{ - {Identity: "localorg"}, + msg: &data.NewMessage{ + Message: &fftypes.MessageInOut{ + Message: fftypes.Message{Header: fftypes.MessageHeader{Namespace: "ns1"}}, + Group: &fftypes.InputGroup{ + Members: []fftypes.MemberInput{ + {Identity: "localorg"}, + }, }, }, }, @@ -332,9 +277,12 @@ func TestSendUnpinnedMessageTooLarge(t *testing.T) { dataID := fftypes.NewUUID() groupID := fftypes.NewRandB32() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: dataID, Hash: fftypes.NewRandB32(), ValueSize: 100001}, - }, nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Run(func(args mock.Arguments) { + newMsg := args[1].(*data.NewMessage) + newMsg.Message.Data = fftypes.DataRefs{ + {ID: dataID, Hash: fftypes.NewRandB32(), ValueSize: 100001}, + } + }).Return(nil) mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{Hash: groupID}, nil) @@ -401,14 +349,10 @@ func TestMessagePrepare(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil).Once() - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{ - {Hash: fftypes.NewRandB32()}, - }, nil, nil).Once() + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, - }, nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(nil) message := pm.NewMessage("ns1", &fftypes.MessageInOut{ Message: fftypes.Message{ @@ -441,23 +385,25 @@ func TestSendUnpinnedMessageGroupLookupFail(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, groupID).Return(nil, fmt.Errorf("pop")).Once() - err := pm.dispatchUnpinnedBatch(pm.ctx, &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, - Payload: fftypes.BatchPayload{ - Messages: []*fftypes.Message{ - { - Header: fftypes.MessageHeader{ - SignerRef: fftypes.SignerRef{ - Author: "org1", - }, - TxType: fftypes.TransactionTypeUnpinned, - Group: groupID, + err := pm.dispatchUnpinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, + }, + Messages: []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + SignerRef: fftypes.SignerRef{ + Author: "org1", }, + TxType: fftypes.TransactionTypeUnpinned, + Group: groupID, }, }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -475,15 +421,12 @@ func TestSendUnpinnedMessageInsertFail(t *testing.T) { return true })).Return(nil) - dataID := fftypes.NewUUID() groupID := fftypes.NewRandB32() mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: dataID, Hash: fftypes.NewRandB32()}, - }, nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(nil) + mdm.On("WriteNewMessage", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")).Once() mdi := pm.database.(*databasemocks.Plugin) - mdi.On("UpsertMessage", pm.ctx, mock.Anything, database.UpsertOptimizationNew).Return(fmt.Errorf("pop")).Once() mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{Hash: groupID}, nil) _, err := pm.SendMessage(pm.ctx, "ns1", &fftypes.MessageInOut{ @@ -661,14 +604,12 @@ func TestRequestReplySuccess(t *testing.T) { Return(nil, nil) mdm := pm.data.(*datamocks.Manager) - mdm.On("ResolveInlineDataPrivate", pm.ctx, "ns1", mock.Anything).Return(fftypes.DataRefs{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32()}, - }, nil) + mdm.On("ResolveInlineData", pm.ctx, mock.Anything).Return(nil) + mdm.On("WriteNewMessage", pm.ctx, mock.Anything).Return(nil).Once() groupID := fftypes.NewRandB32() mdi := pm.database.(*databasemocks.Plugin) - mdi.On("UpsertMessage", pm.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil).Once() mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{Hash: groupID}, nil) _, err := pm.RequestReply(pm.ctx, "ns1", &fftypes.MessageInOut{ @@ -685,49 +626,6 @@ func TestRequestReplySuccess(t *testing.T) { assert.NoError(t, err) } -func TestDispatchedUnpinnedMessageMarshalFail(t *testing.T) { - - pm, cancel := newTestPrivateMessaging(t) - defer cancel() - - mim := pm.identity.(*identitymanagermocks.Manager) - mim.On("ResolveInputSigningIdentity", pm.ctx, "ns1", mock.MatchedBy(func(identity *fftypes.SignerRef) bool { - assert.Equal(t, "localorg", identity.Author) - return true - })).Return(nil) - - groupID := fftypes.NewRandB32() - node1 := newTestNode("node1", newTestOrg("localorg")) - node2 := newTestNode("node2", newTestOrg("remoteorg")) - - mdi := pm.database.(*databasemocks.Plugin) - mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{ - Hash: groupID, - GroupIdentity: fftypes.GroupIdentity{ - Members: fftypes.Members{ - {Node: node1.ID, Identity: "localorg"}, - {Node: node1.ID, Identity: "remoteorg"}, - }, - }, - }, nil).Once() - mdi.On("GetIdentityByID", pm.ctx, node1.ID).Return(node1, nil).Once() - mdi.On("GetIdentityByID", pm.ctx, node1.ID).Return(node2, nil).Once() - - err := pm.dispatchUnpinnedBatch(pm.ctx, &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, - Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ - {Value: fftypes.JSONAnyPtr("!Bad JSON")}, - }, - }, - }, []*fftypes.Bytes32{}) - assert.Regexp(t, "FF10137", err) - - mdi.AssertExpectations(t) - -} - func TestDispatchedUnpinnedMessageOK(t *testing.T) { pm, cancel := newTestPrivateMessaging(t) @@ -749,6 +647,7 @@ func TestDispatchedUnpinnedMessageOK(t *testing.T) { mdx.On("SendMessage", pm.ctx, mock.Anything, "node2-peer", mock.Anything).Return(nil) mdi := pm.database.(*databasemocks.Plugin) + mom := pm.operations.(*operationmocks.Manager) mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{ Hash: groupID, GroupIdentity: fftypes.GroupIdentity{ @@ -761,32 +660,35 @@ func TestDispatchedUnpinnedMessageOK(t *testing.T) { mdi.On("GetIdentityByID", pm.ctx, node1.ID).Return(node1, nil).Once() mdi.On("GetIdentityByID", pm.ctx, node2.ID).Return(node2, nil).Once() - mdi.On("InsertOperation", pm.ctx, mock.Anything).Return(nil) - - err := pm.dispatchUnpinnedBatch(pm.ctx, &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - ID: fftypes.NewUUID(), - Type: fftypes.TransactionTypeUnpinned, - }, - Messages: []*fftypes.Message{ - { - Header: fftypes.MessageHeader{ - Tag: "mytag", - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "org1", - }, + mom.On("AddOrReuseOperation", pm.ctx, mock.Anything).Return(nil) + mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(batchSendData) + return op.Type == fftypes.OpTypeDataExchangeSendBatch && *data.Node.ID == *node2.ID + })).Return(nil) + + err := pm.dispatchUnpinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, + }, + Messages: []*fftypes.Message{ + { + Header: fftypes.MessageHeader{ + Tag: "mytag", + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "org1", }, }, }, }, - }, []*fftypes.Bytes32{}) + }) assert.NoError(t, err) mdi.AssertExpectations(t) + mom.AssertExpectations(t) } @@ -812,8 +714,10 @@ func TestSendDataTransferBlobsFail(t *testing.T) { err := pm.sendData(pm.ctx, &fftypes.TransportWrapper{ Batch: &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -826,7 +730,7 @@ func TestSendDataTransferBlobsFail(t *testing.T) { }, }, }, - Data: []*fftypes.Data{ + Data: fftypes.DataArray{ {ID: fftypes.NewUUID(), Value: fftypes.JSONAnyPtr("{}"), Blob: &fftypes.BlobRef{ Hash: fftypes.NewRandB32(), }}, @@ -851,22 +755,21 @@ func TestSendDataTransferFail(t *testing.T) { nodes := []*fftypes.Identity{node2} mim := pm.identity.(*identitymanagermocks.Manager) - mim.On("ResolveInputIdentity", pm.ctx, mock.MatchedBy(func(identity *fftypes.SignerRef) bool { - assert.Equal(t, "localorg", identity.Author) - return true - })).Return(nil) mim.On("GetNodeOwnerOrg", pm.ctx).Return(localOrg, nil) - mdi := pm.database.(*databasemocks.Plugin) - mdi.On("InsertOperation", pm.ctx, mock.Anything).Return(nil) - - mdx := pm.exchange.(*dataexchangemocks.Plugin) - mdx.On("SendMessage", pm.ctx, mock.Anything, "node2-peer", mock.Anything).Return(fmt.Errorf("pop")) + mom := pm.operations.(*operationmocks.Manager) + mom.On("AddOrReuseOperation", pm.ctx, mock.Anything).Return(nil) + mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(batchSendData) + return op.Type == fftypes.OpTypeDataExchangeSendBatch && *data.Node.ID == *node2.ID + })).Return(fmt.Errorf("pop")) err := pm.sendData(pm.ctx, &fftypes.TransportWrapper{ Batch: &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { @@ -884,7 +787,8 @@ func TestSendDataTransferFail(t *testing.T) { }, nodes) assert.Regexp(t, "pop", err) - mdx.AssertExpectations(t) + mim.AssertExpectations(t) + mom.AssertExpectations(t) } @@ -905,13 +809,15 @@ func TestSendDataTransferInsertOperationFail(t *testing.T) { })).Return(nil) mim.On("GetNodeOwnerOrg", pm.ctx).Return(localOrg, nil) - mdi := pm.database.(*databasemocks.Plugin) - mdi.On("InsertOperation", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) + mom := pm.operations.(*operationmocks.Manager) + mom.On("AddOrReuseOperation", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) err := pm.sendData(pm.ctx, &fftypes.TransportWrapper{ Batch: &fftypes.Batch{ - ID: fftypes.NewUUID(), - Group: groupID, + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + Group: groupID, + }, Payload: fftypes.BatchPayload{ Messages: []*fftypes.Message{ { diff --git a/internal/privatemessaging/operations.go b/internal/privatemessaging/operations.go new file mode 100644 index 0000000000..9e62a0c04e --- /dev/null +++ b/internal/privatemessaging/operations.go @@ -0,0 +1,158 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package privatemessaging + +import ( + "context" + "encoding/json" + + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +type transferBlobData struct { + Node *fftypes.Identity `json:"node"` + Blob *fftypes.Blob `json:"blob"` +} + +type batchSendData struct { + Node *fftypes.Identity `json:"node"` + Transport *fftypes.TransportWrapper `json:"transport"` +} + +func addTransferBlobInputs(op *fftypes.Operation, nodeID *fftypes.UUID, blobHash *fftypes.Bytes32) { + op.Input = fftypes.JSONObject{ + "node": nodeID.String(), + "hash": blobHash.String(), + } +} + +func retrieveSendBlobInputs(ctx context.Context, op *fftypes.Operation) (nodeID *fftypes.UUID, blobHash *fftypes.Bytes32, err error) { + nodeID, err = fftypes.ParseUUID(ctx, op.Input.GetString("node")) + if err == nil { + blobHash, err = fftypes.ParseBytes32(ctx, op.Input.GetString("hash")) + } + return nodeID, blobHash, err +} + +func addBatchSendInputs(op *fftypes.Operation, nodeID *fftypes.UUID, groupHash *fftypes.Bytes32, batchID *fftypes.UUID) { + op.Input = fftypes.JSONObject{ + "node": nodeID.String(), + "group": groupHash.String(), + "batch": batchID.String(), + } +} + +func retrieveBatchSendInputs(ctx context.Context, op *fftypes.Operation) (nodeID *fftypes.UUID, groupHash *fftypes.Bytes32, batchID *fftypes.UUID, err error) { + nodeID, err = fftypes.ParseUUID(ctx, op.Input.GetString("node")) + if err == nil { + groupHash, err = fftypes.ParseBytes32(ctx, op.Input.GetString("group")) + } + if err == nil { + batchID, err = fftypes.ParseUUID(ctx, op.Input.GetString("batch")) + } + return nodeID, groupHash, batchID, err +} + +func (pm *privateMessaging) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + switch op.Type { + case fftypes.OpTypeDataExchangeSendBlob: + nodeID, blobHash, err := retrieveSendBlobInputs(ctx, op) + if err != nil { + return nil, err + } + node, err := pm.database.GetIdentityByID(ctx, nodeID) + if err != nil { + return nil, err + } else if node == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + blob, err := pm.database.GetBlobMatchingHash(ctx, blobHash) + if err != nil { + return nil, err + } else if blob == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + return opSendBlob(op, node, blob), nil + + case fftypes.OpTypeDataExchangeSendBatch: + nodeID, groupHash, batchID, err := retrieveBatchSendInputs(ctx, op) + if err != nil { + return nil, err + } + node, err := pm.database.GetIdentityByID(ctx, nodeID) + if err != nil { + return nil, err + } else if node == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + group, err := pm.database.GetGroupByHash(ctx, groupHash) + if err != nil { + return nil, err + } else if group == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + bp, err := pm.database.GetBatchByID(ctx, batchID) + if err != nil { + return nil, err + } else if bp == nil { + return nil, i18n.NewError(ctx, i18n.Msg404NotFound) + } + batch, err := pm.data.HydrateBatch(ctx, bp) + if err != nil { + return nil, err + } + transport := &fftypes.TransportWrapper{Group: group, Batch: batch} + return opSendBatch(op, node, transport), nil + + default: + return nil, i18n.NewError(ctx, i18n.MsgOperationNotSupported, op.Type) + } +} + +func (pm *privateMessaging) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { + switch data := op.Data.(type) { + case transferBlobData: + return nil, false, pm.exchange.TransferBLOB(ctx, op.ID, data.Node.Profile.GetString("id"), data.Blob.PayloadRef) + + case batchSendData: + payload, err := json.Marshal(data.Transport) + if err != nil { + return nil, false, i18n.WrapError(ctx, err, i18n.MsgSerializationFailed) + } + return nil, false, pm.exchange.SendMessage(ctx, op.ID, data.Node.Profile.GetString("id"), payload) + + default: + return nil, false, i18n.NewError(ctx, i18n.MsgOperationDataIncorrect, op.Data) + } +} + +func opSendBlob(op *fftypes.Operation, node *fftypes.Identity, blob *fftypes.Blob) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: transferBlobData{Node: node, Blob: blob}, + } +} + +func opSendBatch(op *fftypes.Operation, node *fftypes.Identity, transport *fftypes.TransportWrapper) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: batchSendData{Node: node, Transport: transport}, + } +} diff --git a/internal/privatemessaging/operations_test.go b/internal/privatemessaging/operations_test.go new file mode 100644 index 0000000000..51b35dcddf --- /dev/null +++ b/internal/privatemessaging/operations_test.go @@ -0,0 +1,534 @@ +// Copyright © 2021 Kaleido, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in comdiliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or imdilied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package privatemessaging + +import ( + "context" + "fmt" + "testing" + + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/dataexchangemocks" + "github.com/hyperledger/firefly/mocks/datamocks" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestPrepareAndRunTransferBlob(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBlob, + ID: fftypes.NewUUID(), + } + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + IdentityProfile: fftypes.IdentityProfile{ + Profile: fftypes.JSONObject{ + "id": "peer1", + }, + }, + } + blob := &fftypes.Blob{ + Hash: fftypes.NewRandB32(), + PayloadRef: "payload", + } + addTransferBlobInputs(op, node.ID, blob.Hash) + + mdi := pm.database.(*databasemocks.Plugin) + mdx := pm.exchange.(*dataexchangemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) + mdi.On("GetBlobMatchingHash", context.Background(), blob.Hash).Return(blob, nil) + mdx.On("TransferBLOB", context.Background(), op.ID, "peer1", "payload").Return(nil) + + po, err := pm.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, node, po.Data.(transferBlobData).Node) + assert.Equal(t, blob, po.Data.(transferBlobData).Blob) + + _, complete, err := pm.RunOperation(context.Background(), po) + + assert.False(t, complete) + assert.NoError(t, err) + + mdi.AssertExpectations(t) + mdx.AssertExpectations(t) +} + +func TestPrepareAndRunBatchSend(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBatch, + ID: fftypes.NewUUID(), + } + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + IdentityProfile: fftypes.IdentityProfile{ + Profile: fftypes.JSONObject{ + "id": "peer1", + }, + }, + } + group := &fftypes.Group{ + Hash: fftypes.NewRandB32(), + } + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } + batch := &fftypes.Batch{ + BatchHeader: bp.BatchHeader, + } + addBatchSendInputs(op, node.ID, group.Hash, batch.ID) + + mdi := pm.database.(*databasemocks.Plugin) + mdx := pm.exchange.(*dataexchangemocks.Plugin) + mdm := pm.data.(*datamocks.Manager) + mdm.On("HydrateBatch", context.Background(), bp).Return(batch, nil) + mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) + mdi.On("GetGroupByHash", context.Background(), group.Hash).Return(group, nil) + mdi.On("GetBatchByID", context.Background(), batch.ID).Return(bp, nil) + mdx.On("SendMessage", context.Background(), op.ID, "peer1", mock.Anything).Return(nil) + + po, err := pm.PrepareOperation(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, node, po.Data.(batchSendData).Node) + assert.Equal(t, group, po.Data.(batchSendData).Transport.Group) + assert.Equal(t, batch, po.Data.(batchSendData).Transport.Batch) + + _, complete, err := pm.RunOperation(context.Background(), po) + + assert.False(t, complete) + assert.NoError(t, err) + + mdi.AssertExpectations(t) + mdx.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestPrepareAndRunBatchSendHydrateFail(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBatch, + ID: fftypes.NewUUID(), + } + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + IdentityProfile: fftypes.IdentityProfile{ + Profile: fftypes.JSONObject{ + "id": "peer1", + }, + }, + } + group := &fftypes.Group{ + Hash: fftypes.NewRandB32(), + } + bp := &fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: fftypes.NewUUID(), + }, + } + batch := &fftypes.Batch{ + BatchHeader: bp.BatchHeader, + } + addBatchSendInputs(op, node.ID, group.Hash, batch.ID) + + mdi := pm.database.(*databasemocks.Plugin) + mdm := pm.data.(*datamocks.Manager) + mdm.On("HydrateBatch", context.Background(), bp).Return(nil, fmt.Errorf("pop")) + mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) + mdi.On("GetGroupByHash", context.Background(), group.Hash).Return(group, nil) + mdi.On("GetBatchByID", context.Background(), batch.ID).Return(bp, nil) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) + mdm.AssertExpectations(t) +} + +func TestPrepareOperationNotSupported(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + po, err := pm.PrepareOperation(context.Background(), &fftypes.Operation{}) + + assert.Nil(t, po) + assert.Regexp(t, "FF10371", err) +} + +func TestPrepareOperationBlobSendBadInput(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBlob, + Input: fftypes.JSONObject{"node": "bad"}, + } + + _, err := pm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10142", err) +} + +func TestPrepareOperationBlobSendNodeFail(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + nodeID := fftypes.NewUUID() + blobHash := fftypes.NewRandB32() + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBlob, + Input: fftypes.JSONObject{ + "node": nodeID.String(), + "hash": blobHash.String(), + }, + } + + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), nodeID).Return(nil, fmt.Errorf("pop")) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationBlobSendNodeNotFound(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + nodeID := fftypes.NewUUID() + blobHash := fftypes.NewRandB32() + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBlob, + Input: fftypes.JSONObject{ + "node": nodeID.String(), + "hash": blobHash.String(), + }, + } + + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), nodeID).Return(nil, nil) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationBlobSendBlobFail(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + blobHash := fftypes.NewRandB32() + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + IdentityProfile: fftypes.IdentityProfile{ + Profile: fftypes.JSONObject{ + "id": "peer1", + }, + }, + } + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBlob, + Input: fftypes.JSONObject{ + "node": node.ID.String(), + "hash": blobHash.String(), + }, + } + + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) + mdi.On("GetBlobMatchingHash", context.Background(), blobHash).Return(nil, fmt.Errorf("pop")) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationBlobSendBlobNotFound(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + blobHash := fftypes.NewRandB32() + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + IdentityProfile: fftypes.IdentityProfile{ + Profile: fftypes.JSONObject{ + "id": "peer1", + }, + }, + } + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBlob, + Input: fftypes.JSONObject{ + "node": node.ID.String(), + "hash": blobHash.String(), + }, + } + + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) + mdi.On("GetBlobMatchingHash", context.Background(), blobHash).Return(nil, nil) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationBatchSendBadInput(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBatch, + Input: fftypes.JSONObject{"node": "bad"}, + } + + _, err := pm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10142", err) +} + +func TestPrepareOperationBatchSendNodeFail(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + nodeID := fftypes.NewUUID() + groupHash := fftypes.NewRandB32() + batchID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBatch, + Input: fftypes.JSONObject{ + "node": nodeID.String(), + "group": groupHash.String(), + "batch": batchID.String(), + }, + } + + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), nodeID).Return(nil, fmt.Errorf("pop")) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationBatchSendNodeNotFound(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + nodeID := fftypes.NewUUID() + groupHash := fftypes.NewRandB32() + batchID := fftypes.NewUUID() + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBatch, + Input: fftypes.JSONObject{ + "node": nodeID.String(), + "group": groupHash.String(), + "batch": batchID.String(), + }, + } + + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), nodeID).Return(nil, nil) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationBatchSendGroupFail(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + groupHash := fftypes.NewRandB32() + batchID := fftypes.NewUUID() + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + } + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBatch, + Input: fftypes.JSONObject{ + "node": node.ID.String(), + "group": groupHash.String(), + "batch": batchID.String(), + }, + } + + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) + mdi.On("GetGroupByHash", context.Background(), groupHash).Return(nil, fmt.Errorf("pop")) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationBatchSendGroupNotFound(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + groupHash := fftypes.NewRandB32() + batchID := fftypes.NewUUID() + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + } + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBatch, + Input: fftypes.JSONObject{ + "node": node.ID.String(), + "group": groupHash.String(), + "batch": batchID.String(), + }, + } + + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) + mdi.On("GetGroupByHash", context.Background(), groupHash).Return(nil, nil) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationBatchSendBatchFail(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + batchID := fftypes.NewUUID() + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + } + group := &fftypes.Group{ + Hash: fftypes.NewRandB32(), + } + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBatch, + Input: fftypes.JSONObject{ + "node": node.ID.String(), + "group": group.Hash.String(), + "batch": batchID.String(), + }, + } + + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) + mdi.On("GetGroupByHash", context.Background(), group.Hash).Return(group, nil) + mdi.On("GetBatchByID", context.Background(), batchID).Return(nil, fmt.Errorf("pop")) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.EqualError(t, err, "pop") + + mdi.AssertExpectations(t) +} + +func TestPrepareOperationBatchSendBatchNotFound(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + batchID := fftypes.NewUUID() + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + } + group := &fftypes.Group{ + Hash: fftypes.NewRandB32(), + } + op := &fftypes.Operation{ + Type: fftypes.OpTypeDataExchangeSendBatch, + Input: fftypes.JSONObject{ + "node": node.ID.String(), + "group": group.Hash.String(), + "batch": batchID.String(), + }, + } + + mdi := pm.database.(*databasemocks.Plugin) + mdi.On("GetIdentityByID", context.Background(), node.ID).Return(node, nil) + mdi.On("GetGroupByHash", context.Background(), group.Hash).Return(group, nil) + mdi.On("GetBatchByID", context.Background(), batchID).Return(nil, nil) + + _, err := pm.PrepareOperation(context.Background(), op) + assert.Regexp(t, "FF10109", err) + + mdi.AssertExpectations(t) +} + +func TestRunOperationNotSupported(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + _, complete, err := pm.RunOperation(context.Background(), &fftypes.PreparedOperation{}) + + assert.False(t, complete) + assert.Regexp(t, "FF10378", err) +} + +func TestRunOperationBatchSendInvalidData(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + + op := &fftypes.Operation{} + node := &fftypes.Identity{ + IdentityBase: fftypes.IdentityBase{ + ID: fftypes.NewUUID(), + }, + } + transport := &fftypes.TransportWrapper{ + Group: &fftypes.Group{}, + Batch: &fftypes.Batch{ + Payload: fftypes.BatchPayload{ + Data: fftypes.DataArray{ + {Value: fftypes.JSONAnyPtr(`!json`)}, + }, + }, + }, + } + + _, complete, err := pm.RunOperation(context.Background(), opSendBatch(op, node, transport)) + + assert.False(t, complete) + assert.Regexp(t, "FF10137", err) +} diff --git a/internal/privatemessaging/privatemessaging.go b/internal/privatemessaging/privatemessaging.go index 2d30393070..14475def33 100644 --- a/internal/privatemessaging/privatemessaging.go +++ b/internal/privatemessaging/privatemessaging.go @@ -18,7 +18,6 @@ package privatemessaging import ( "context" - "encoding/json" "github.com/hyperledger/firefly/internal/batch" "github.com/hyperledger/firefly/internal/batchpin" @@ -28,6 +27,7 @@ import ( "github.com/hyperledger/firefly/internal/identity" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/internal/metrics" + "github.com/hyperledger/firefly/internal/operations" "github.com/hyperledger/firefly/internal/retry" "github.com/hyperledger/firefly/internal/syncasync" "github.com/hyperledger/firefly/internal/sysmessaging" @@ -42,12 +42,17 @@ const pinnedPrivateDispatcherName = "pinned_private" const unpinnedPrivateDispatcherName = "unpinned_private" type Manager interface { + fftypes.Named GroupManager Start() error NewMessage(ns string, msg *fftypes.MessageInOut) sysmessaging.MessageSender SendMessage(ctx context.Context, ns string, in *fftypes.MessageInOut, waitConfirm bool) (out *fftypes.Message, err error) RequestReply(ctx context.Context, ns string, request *fftypes.MessageInOut) (reply *fftypes.MessageInOut, err error) + + // From operations.OperationHandler + PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) + RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) } type privateMessaging struct { @@ -68,10 +73,12 @@ type privateMessaging struct { opCorrelationRetries int maxBatchPayloadLength int64 metrics metrics.Manager + operations operations.Manager + orgFirstNodes map[fftypes.UUID]*fftypes.Identity } -func NewPrivateMessaging(ctx context.Context, di database.Plugin, im identity.Manager, dx dataexchange.Plugin, bi blockchain.Plugin, ba batch.Manager, dm data.Manager, sa syncasync.Bridge, bp batchpin.Submitter, mm metrics.Manager) (Manager, error) { - if di == nil || im == nil || dx == nil || bi == nil || ba == nil || dm == nil { +func NewPrivateMessaging(ctx context.Context, di database.Plugin, im identity.Manager, dx dataexchange.Plugin, bi blockchain.Plugin, ba batch.Manager, dm data.Manager, sa syncasync.Bridge, bp batchpin.Submitter, mm metrics.Manager, om operations.Manager) (Manager, error) { + if di == nil || im == nil || dx == nil || bi == nil || ba == nil || dm == nil || mm == nil || om == nil { return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) } @@ -99,6 +106,8 @@ func NewPrivateMessaging(ctx context.Context, di database.Plugin, im identity.Ma opCorrelationRetries: config.GetInt(config.PrivateMessagingOpCorrelationRetries), maxBatchPayloadLength: config.GetByteSize(config.PrivateMessagingBatchPayloadLimit), metrics: mm, + operations: om, + orgFirstNodes: make(map[fftypes.UUID]*fftypes.Identity), } pm.groupManager.groupCache = ccache.New( // We use a LRU cache with a size-aware max @@ -107,6 +116,7 @@ func NewPrivateMessaging(ctx context.Context, di database.Plugin, im identity.Ma ) bo := batch.DispatcherOptions{ + BatchType: fftypes.BatchTypePrivate, BatchMaxSize: config.GetUint(config.PrivateMessagingBatchSize), BatchMaxBytes: pm.maxBatchPayloadLength, BatchTimeout: config.GetDuration(config.PrivateMessagingBatchTimeout), @@ -129,34 +139,44 @@ func NewPrivateMessaging(ctx context.Context, di database.Plugin, im identity.Ma }, pm.dispatchUnpinnedBatch, bo) + om.RegisterHandler(ctx, pm, []fftypes.OpType{ + fftypes.OpTypeDataExchangeSendBlob, + fftypes.OpTypeDataExchangeSendBatch, + }) + return pm, nil } +func (pm *privateMessaging) Name() string { + return "PrivateMessaging" +} + func (pm *privateMessaging) Start() error { return pm.exchange.Start() } -func (pm *privateMessaging) dispatchPinnedBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error { - err := pm.dispatchBatchCommon(ctx, batch) +func (pm *privateMessaging) dispatchPinnedBatch(ctx context.Context, state *batch.DispatchState) error { + err := pm.dispatchBatchCommon(ctx, state) if err != nil { return err } - log.L(ctx).Infof("Pinning private batch %s with author=%s key=%s group=%s", batch.ID, batch.Author, batch.Key, batch.Group) - return pm.batchpin.SubmitPinnedBatch(ctx, batch, contexts) + log.L(ctx).Infof("Pinning private batch %s with author=%s key=%s group=%s", state.Persisted.ID, state.Persisted.Author, state.Persisted.Key, state.Persisted.Group) + return pm.batchpin.SubmitPinnedBatch(ctx, &state.Persisted, state.Pins) } -func (pm *privateMessaging) dispatchUnpinnedBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error { - return pm.dispatchBatchCommon(ctx, batch) +func (pm *privateMessaging) dispatchUnpinnedBatch(ctx context.Context, state *batch.DispatchState) error { + return pm.dispatchBatchCommon(ctx, state) } -func (pm *privateMessaging) dispatchBatchCommon(ctx context.Context, batch *fftypes.Batch) error { +func (pm *privateMessaging) dispatchBatchCommon(ctx context.Context, state *batch.DispatchState) error { + batch := state.Persisted.GenInflight(state.Messages, state.Data) tw := &fftypes.TransportWrapper{ Batch: batch, } // Retrieve the group - group, nodes, err := pm.groupManager.getGroupNodes(ctx, batch.Group) + group, nodes, err := pm.groupManager.getGroupNodes(ctx, batch.Group, false /* fail if not found */) if err != nil { return err } @@ -170,7 +190,7 @@ func (pm *privateMessaging) dispatchBatchCommon(ctx context.Context, batch *ffty return pm.sendData(ctx, tw, nodes) } -func (pm *privateMessaging) transferBlobs(ctx context.Context, data []*fftypes.Data, txid *fftypes.UUID, node *fftypes.Identity) error { +func (pm *privateMessaging) transferBlobs(ctx context.Context, data fftypes.DataArray, txid *fftypes.UUID, node *fftypes.Identity) error { // Send all the blobs associated with this batch for _, d := range data { // We only need to send a blob if there is one, and it's not been uploaded to the shared storage @@ -187,17 +207,13 @@ func (pm *privateMessaging) transferBlobs(ctx context.Context, data []*fftypes.D pm.exchange, d.Namespace, txid, - fftypes.OpTypeDataExchangeBlobSend) - op.Input = fftypes.JSONObject{ - "hash": d.Blob.Hash, - } - if err = pm.database.InsertOperation(ctx, op); err != nil { + fftypes.OpTypeDataExchangeSendBlob) + addTransferBlobInputs(op, node.ID, blob.Hash) + if err = pm.operations.AddOrReuseOperation(ctx, op); err != nil { return err } - if err := pm.exchange.TransferBLOB(ctx, op.ID, node.Profile.GetString("id"), blob.PayloadRef); err != nil { - return err - } + return pm.operations.RunOperation(ctx, opSendBlob(op, node, blob)) } } return nil @@ -207,11 +223,6 @@ func (pm *privateMessaging) sendData(ctx context.Context, tw *fftypes.TransportW l := log.L(ctx) batch := tw.Batch - payload, err := json.Marshal(tw) - if err != nil { - return i18n.WrapError(ctx, err, i18n.MsgSerializationFailed) - } - // Lookup the local org localOrg, err := pm.identity.GetNodeOwnerOrg(ctx) if err != nil { @@ -237,17 +248,16 @@ func (pm *privateMessaging) sendData(ctx context.Context, tw *fftypes.TransportW pm.exchange, batch.Namespace, batch.Payload.TX.ID, - fftypes.OpTypeDataExchangeBatchSend) - op.Input = fftypes.JSONObject{ - "manifest": tw.Batch.Manifest().String(), + fftypes.OpTypeDataExchangeSendBatch) + var groupHash *fftypes.Bytes32 + if tw.Group != nil { + groupHash = tw.Group.Hash } - if err = pm.database.InsertOperation(ctx, op); err != nil { + addBatchSendInputs(op, node.ID, groupHash, batch.ID) + if err = pm.operations.AddOrReuseOperation(ctx, op); err != nil { return err } - - // Send the payload itself - err := pm.exchange.SendMessage(ctx, op.ID, node.Profile.GetString("id"), payload) - if err != nil { + if err = pm.operations.RunOperation(ctx, opSendBatch(op, node, tw)); err != nil { return err } } diff --git a/internal/privatemessaging/privatemessaging_test.go b/internal/privatemessaging/privatemessaging_test.go index bc88e93adf..6516ba0783 100644 --- a/internal/privatemessaging/privatemessaging_test.go +++ b/internal/privatemessaging/privatemessaging_test.go @@ -21,6 +21,7 @@ import ( "fmt" "testing" + "github.com/hyperledger/firefly/internal/batch" "github.com/hyperledger/firefly/internal/config" "github.com/hyperledger/firefly/mocks/batchmocks" "github.com/hyperledger/firefly/mocks/batchpinmocks" @@ -30,6 +31,7 @@ import ( "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/mocks/identitymanagermocks" "github.com/hyperledger/firefly/mocks/metricsmocks" + "github.com/hyperledger/firefly/mocks/operationmocks" "github.com/hyperledger/firefly/mocks/syncasyncmocks" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" @@ -51,6 +53,7 @@ func newTestPrivateMessagingCommon(t *testing.T, metricsEnabled bool) (*privateM msa := &syncasyncmocks.Bridge{} mbp := &batchpinmocks.Submitter{} mmi := &metricsmocks.Manager{} + mom := &operationmocks.Manager{} mba.On("RegisterDispatcher", pinnedPrivateDispatcherName, @@ -68,16 +71,10 @@ func newTestPrivateMessagingCommon(t *testing.T, metricsEnabled bool) (*privateM fftypes.MessageTypePrivate, }, mock.Anything, mock.Anything).Return() mmi.On("IsMetricsEnabled").Return(metricsEnabled) - - rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything).Maybe() - rag.RunFn = func(a mock.Arguments) { - rag.ReturnArguments = mock.Arguments{ - a[1].(func(context.Context) error)(a[0].(context.Context)), - } - } + mom.On("RegisterHandler", mock.Anything, mock.Anything, mock.Anything) ctx, cancel := context.WithCancel(context.Background()) - pm, err := NewPrivateMessaging(ctx, mdi, mim, mdx, mbi, mba, mdm, msa, mbp, mmi) + pm, err := NewPrivateMessaging(ctx, mdi, mim, mdx, mbi, mba, mdm, msa, mbp, mmi, mom) assert.NoError(t, err) // Default mocks to save boilerplate in the tests @@ -98,6 +95,12 @@ func newTestPrivateMessagingWithMetrics(t *testing.T) (*privateMessaging, func() return pm, cancel } +func TestName(t *testing.T) { + pm, cancel := newTestPrivateMessaging(t) + defer cancel() + assert.Equal(t, "PrivateMessaging", pm.Name()) +} + func TestDispatchBatchWithBlobs(t *testing.T) { pm, cancel := newTestPrivateMessaging(t) @@ -119,12 +122,8 @@ func TestDispatchBatchWithBlobs(t *testing.T) { mbp := pm.batchpin.(*batchpinmocks.Submitter) mdx := pm.exchange.(*dataexchangemocks.Plugin) mim := pm.identity.(*identitymanagermocks.Manager) + mom := pm.operations.(*operationmocks.Manager) - mim.On("ResolveInputIdentity", pm.ctx, mock.Anything).Run(func(args mock.Arguments) { - identity := args[1].(*fftypes.SignerRef) - assert.Equal(t, "org1", identity.Author) - identity.Key = "0x12345" - }).Return(nil) mim.On("GetNodeOwnerOrg", pm.ctx).Return(localOrg, nil) mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{ Hash: fftypes.NewRandB32(), @@ -142,65 +141,70 @@ func TestDispatchBatchWithBlobs(t *testing.T) { Hash: blob1, PayloadRef: "/blob/1", }, nil) - mdx.On("TransferBLOB", pm.ctx, mock.Anything, "node2-peer", "/blob/1").Return(nil).Once() - mdi.On("InsertOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { - return op.Type == fftypes.OpTypeDataExchangeBlobSend + mom.On("AddOrReuseOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { + return op.Type == fftypes.OpTypeDataExchangeSendBlob })).Return(nil, nil) - - mdx.On("SendMessage", pm.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - mdi.On("InsertOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { - return op.Type == fftypes.OpTypeDataExchangeBatchSend + mom.On("AddOrReuseOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { + return op.Type == fftypes.OpTypeDataExchangeSendBlob + })).Return(nil, nil) + mom.On("AddOrReuseOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { + return op.Type == fftypes.OpTypeDataExchangeSendBatch })).Return(nil, nil) + mom.On("AddOrReuseOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.Operation) bool { + return op.Type == fftypes.OpTypeDataExchangeSendBatch + })).Return(nil, nil) + mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + if op.Type != fftypes.OpTypeDataExchangeSendBlob { + return false + } + data := op.Data.(transferBlobData) + return *data.Node.ID == *node2.ID + })).Return(nil) + mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + if op.Type != fftypes.OpTypeDataExchangeSendBatch { + return false + } + data := op.Data.(batchSendData) + return *data.Node.ID == *node2.ID + })).Return(nil) mbp.On("SubmitPinnedBatch", pm.ctx, mock.Anything, mock.Anything).Return(nil) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - ID: batchID, - SignerRef: fftypes.SignerRef{ - Author: "org1", - }, - Group: groupID, - Namespace: "ns1", - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - ID: txID, + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + ID: batchID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, + Group: groupID, + Namespace: "ns1", }, - Data: []*fftypes.Data{ - {ID: dataID1, Blob: &fftypes.BlobRef{Hash: blob1}}, + TX: fftypes.TransactionRef{ + Type: fftypes.TransactionTypeUnpinned, + ID: txID, }, + Hash: batchHash, }, - Hash: batchHash, - }, []*fftypes.Bytes32{pin1, pin2}) + Data: fftypes.DataArray{ + {ID: dataID1, Blob: &fftypes.BlobRef{Hash: blob1}}, + }, + Pins: []*fftypes.Bytes32{pin1, pin2}, + }) assert.NoError(t, err) mdi.AssertExpectations(t) + mbp.AssertExpectations(t) mdx.AssertExpectations(t) + mim.AssertExpectations(t) + mom.AssertExpectations(t) } func TestNewPrivateMessagingMissingDeps(t *testing.T) { - _, err := NewPrivateMessaging(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil) + _, err := NewPrivateMessaging(context.Background(), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) assert.Regexp(t, "FF10128", err) } -func TestDispatchBatchBadData(t *testing.T) { - pm, cancel := newTestPrivateMessaging(t) - defer cancel() - - groupID := fftypes.NewRandB32() - mdi := pm.database.(*databasemocks.Plugin) - mdi.On("GetGroupByHash", pm.ctx, groupID).Return(&fftypes.Group{}, nil) - - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - Group: groupID, - Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ - {Value: fftypes.JSONAnyPtr(`{!json}`)}, - }, - }, - }, []*fftypes.Bytes32{}) - assert.Regexp(t, "FF10137", err) -} - func TestDispatchErrorFindingGroup(t *testing.T) { pm, cancel := newTestPrivateMessaging(t) defer cancel() @@ -208,7 +212,7 @@ func TestDispatchErrorFindingGroup(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{}, []*fftypes.Bytes32{}) + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{}) assert.Regexp(t, "pop", err) } @@ -222,11 +226,15 @@ func TestSendAndSubmitBatchBadID(t *testing.T) { mbp := pm.batchpin.(*batchpinmocks.Submitter) mbp.On("SubmitPinnedBatch", pm.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - SignerRef: fftypes.SignerRef{ - Author: "badauthor", + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + SignerRef: fftypes.SignerRef{ + Author: "badauthor", + }, + }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -257,12 +265,16 @@ func TestSendAndSubmitBatchUnregisteredNode(t *testing.T) { mim := pm.identity.(*identitymanagermocks.Manager) mim.On("GetNodeOwnerOrg", pm.ctx).Return(nil, fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "badauthor", + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "badauthor", + }, + }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -276,11 +288,15 @@ func TestSendImmediateFail(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetGroupByHash", pm.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - SignerRef: fftypes.SignerRef{ - Author: "org1", + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, + }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -311,19 +327,19 @@ func TestSendSubmitInsertOperationFail(t *testing.T) { }, }, }, nil) - mdi.On("InsertOperation", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) - - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "org1", - }, - Payload: fftypes.BatchPayload{ - TX: fftypes.TransactionRef{ - ID: fftypes.NewUUID(), + mom := pm.operations.(*operationmocks.Manager) + mom.On("AddOrReuseOperation", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) + + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, }, - }, []*fftypes.Bytes32{}) + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) @@ -356,31 +372,37 @@ func TestSendSubmitBlobTransferFail(t *testing.T) { }, }, }, nil) - mdi.On("InsertOperation", pm.ctx, mock.Anything).Return(nil) + + mom := pm.operations.(*operationmocks.Manager) + mom.On("AddOrReuseOperation", pm.ctx, mock.Anything).Return(nil) mdi.On("GetBlobMatchingHash", pm.ctx, blob1).Return(&fftypes.Blob{ Hash: blob1, PayloadRef: "/blob/1", }, nil) - mdx := pm.exchange.(*dataexchangemocks.Plugin) - mdx.On("TransferBLOB", pm.ctx, mock.Anything, "node2-peer", "/blob/1").Return(fmt.Errorf("pop")).Once() - - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "org1", - }, - Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ - {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{Hash: blob1}}, + mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + data := op.Data.(transferBlobData) + return op.Type == fftypes.OpTypeDataExchangeSendBlob && *data.Node.ID == *node2.ID + })).Return(fmt.Errorf("pop")) + + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, }, - }, []*fftypes.Bytes32{}) + Data: fftypes.DataArray{ + {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{Hash: blob1}}, + }, + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) mim.AssertExpectations(t) - mdx.AssertExpectations(t) + mom.AssertExpectations(t) } func TestWriteTransactionSubmitBatchPinFail(t *testing.T) { @@ -409,36 +431,51 @@ func TestWriteTransactionSubmitBatchPinFail(t *testing.T) { }, }, }, nil) - mdi.On("InsertOperation", pm.ctx, mock.Anything).Return(nil) + + mom := pm.operations.(*operationmocks.Manager) + mom.On("AddOrReuseOperation", pm.ctx, mock.Anything).Return(nil) + mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + if op.Type != fftypes.OpTypeDataExchangeSendBlob { + return false + } + data := op.Data.(transferBlobData) + return *data.Node.ID == *node2.ID + })).Return(nil) + mom.On("RunOperation", pm.ctx, mock.MatchedBy(func(op *fftypes.PreparedOperation) bool { + if op.Type != fftypes.OpTypeDataExchangeSendBatch { + return false + } + data := op.Data.(batchSendData) + return *data.Node.ID == *node2.ID + })).Return(nil) + mdi.On("GetBlobMatchingHash", pm.ctx, blob1).Return(&fftypes.Blob{ Hash: blob1, PayloadRef: "/blob/1", }, nil) - mdx := pm.exchange.(*dataexchangemocks.Plugin) - mdx.On("TransferBLOB", pm.ctx, mock.Anything, "node2-peer", "/blob/1").Return(nil).Once() - mdx.On("SendMessage", pm.ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - mbp := pm.batchpin.(*batchpinmocks.Submitter) mbp.On("SubmitPinnedBatch", pm.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) - err := pm.dispatchPinnedBatch(pm.ctx, &fftypes.Batch{ - Group: groupID, - SignerRef: fftypes.SignerRef{ - Author: "org1", - }, - Payload: fftypes.BatchPayload{ - Data: []*fftypes.Data{ - {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{Hash: blob1}}, + err := pm.dispatchPinnedBatch(pm.ctx, &batch.DispatchState{ + Persisted: fftypes.BatchPersisted{ + BatchHeader: fftypes.BatchHeader{ + Group: groupID, + SignerRef: fftypes.SignerRef{ + Author: "org1", + }, }, }, - }, []*fftypes.Bytes32{}) + Data: fftypes.DataArray{ + {ID: fftypes.NewUUID(), Blob: &fftypes.BlobRef{Hash: blob1}}, + }, + }) assert.Regexp(t, "pop", err) mdi.AssertExpectations(t) mim.AssertExpectations(t) - mdx.AssertExpectations(t) mbp.AssertExpectations(t) + mom.AssertExpectations(t) } func TestTransferBlobsNotFound(t *testing.T) { @@ -448,7 +485,7 @@ func TestTransferBlobsNotFound(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetBlobMatchingHash", pm.ctx, mock.Anything).Return(nil, nil) - err := pm.transferBlobs(pm.ctx, []*fftypes.Data{ + err := pm.transferBlobs(pm.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Blob: &fftypes.BlobRef{Hash: fftypes.NewRandB32()}}, }, fftypes.NewUUID(), newTestNode("node1", newTestOrg("org1"))) assert.Regexp(t, "FF10239", err) @@ -456,35 +493,19 @@ func TestTransferBlobsNotFound(t *testing.T) { mdi.AssertExpectations(t) } -func TestTransferBlobsFail(t *testing.T) { - pm, cancel := newTestPrivateMessaging(t) - defer cancel() - - mdi := pm.database.(*databasemocks.Plugin) - mdi.On("GetBlobMatchingHash", pm.ctx, mock.Anything).Return(&fftypes.Blob{PayloadRef: "blob/1"}, nil) - mdx := pm.exchange.(*dataexchangemocks.Plugin) - mdx.On("TransferBLOB", pm.ctx, mock.Anything, "node1-peer", "blob/1").Return(fmt.Errorf("pop")) - mdi.On("InsertOperation", pm.ctx, mock.Anything).Return(nil) - - err := pm.transferBlobs(pm.ctx, []*fftypes.Data{ - {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Blob: &fftypes.BlobRef{Hash: fftypes.NewRandB32()}}, - }, fftypes.NewUUID(), newTestNode("node1", newTestOrg("org1"))) - assert.Regexp(t, "pop", err) - - mdi.AssertExpectations(t) - mdx.AssertExpectations(t) -} - func TestTransferBlobsOpInsertFail(t *testing.T) { pm, cancel := newTestPrivateMessaging(t) defer cancel() mdi := pm.database.(*databasemocks.Plugin) + mdx := pm.exchange.(*dataexchangemocks.Plugin) + mom := pm.operations.(*operationmocks.Manager) mdi.On("GetBlobMatchingHash", pm.ctx, mock.Anything).Return(&fftypes.Blob{PayloadRef: "blob/1"}, nil) - mdi.On("InsertOperation", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) + mdx.On("TransferBLOB", pm.ctx, mock.Anything, "peer1", "blob/1").Return(nil) + mom.On("AddOrReuseOperation", pm.ctx, mock.Anything).Return(fmt.Errorf("pop")) - err := pm.transferBlobs(pm.ctx, []*fftypes.Data{ + err := pm.transferBlobs(pm.ctx, fftypes.DataArray{ {ID: fftypes.NewUUID(), Hash: fftypes.NewRandB32(), Blob: &fftypes.BlobRef{Hash: fftypes.NewRandB32()}}, }, fftypes.NewUUID(), newTestNode("node1", newTestOrg("org1"))) assert.Regexp(t, "pop", err) diff --git a/internal/privatemessaging/recipients.go b/internal/privatemessaging/recipients.go index 4bffb9703b..90606863cc 100644 --- a/internal/privatemessaging/recipients.go +++ b/internal/privatemessaging/recipients.go @@ -56,6 +56,24 @@ func (pm *privateMessaging) resolveRecipientList(ctx context.Context, in *fftype return err } +func (pm *privateMessaging) getFirstNodeForOrg(ctx context.Context, identity *fftypes.Identity) (*fftypes.Identity, error) { + node := pm.orgFirstNodes[*identity.ID] + if node == nil && identity.Type == fftypes.IdentityTypeOrg { + fb := database.IdentityQueryFactory.NewFilterLimit(ctx, 1) + filter := fb.And( + fb.Eq("parent", identity.ID), + fb.Eq("type", fftypes.IdentityTypeNode), + ) + nodes, _, err := pm.database.GetIdentities(ctx, filter) + if err != nil || len(nodes) == 0 { + return nil, err + } + node = nodes[0] + pm.orgFirstNodes[*identity.ID] = node + } + return node, nil +} + func (pm *privateMessaging) resolveNode(ctx context.Context, identity *fftypes.Identity, nodeInput string) (node *fftypes.Identity, err error) { retryable := true if nodeInput != "" { @@ -64,19 +82,10 @@ func (pm *privateMessaging) resolveNode(ctx context.Context, identity *fftypes.I // Find any node owned by this organization inputIdentityDebugInfo := fmt.Sprintf("%s (%s)", identity.DID, identity.ID) for identity != nil && node == nil { - var nodes []*fftypes.Identity - if identity.Type == fftypes.IdentityTypeOrg { - fb := database.IdentityQueryFactory.NewFilterLimit(ctx, 1) - filter := fb.And( - fb.Eq("parent", identity.ID), - fb.Eq("type", fftypes.IdentityTypeNode), - ) - nodes, _, err = pm.database.GetIdentities(ctx, filter) - } + node, err = pm.getFirstNodeForOrg(ctx, identity) switch { - case err == nil && len(nodes) > 0: + case err == nil && node != nil: // This is an org, and it owns a node - node = nodes[0] case err == nil && identity.Parent != nil: // This identity has a parent, maybe that org owns a node identity, err = pm.identity.CachedIdentityLookupByID(ctx, identity.Parent) @@ -178,13 +187,12 @@ func (pm *privateMessaging) findOrGenerateGroup(ctx context.Context, in *fftypes } newCandidate.Seal() - filter := database.GroupQueryFactory.NewFilterLimit(ctx, 1).Eq("hash", newCandidate.Hash) - groups, _, err := pm.database.GetGroups(ctx, filter) + group, _, err = pm.getGroupNodes(ctx, newCandidate.Hash, true) if err != nil { return nil, false, err } - if len(groups) > 0 { - return groups[0], false, nil + if group != nil { + return group, false, nil } return newCandidate, true, nil } diff --git a/internal/privatemessaging/recipients_test.go b/internal/privatemessaging/recipients_test.go index 47ba503e1d..33ff8daaa5 100644 --- a/internal/privatemessaging/recipients_test.go +++ b/internal/privatemessaging/recipients_test.go @@ -43,7 +43,7 @@ func TestResolveMemberListNewGroupE2E(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{remoteNode}, nil, nil).Once() mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil).Once() - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{}, nil, nil) + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(nil, nil).Once() mdi.On("UpsertGroup", pm.ctx, mock.Anything, database.UpsertOptimizationNew).Return(nil) mim := pm.identity.(*identitymanagermocks.Manager) @@ -114,9 +114,7 @@ func TestResolveMemberListExistingGroup(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil) - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{ - {Hash: fftypes.NewRandB32()}, - }, nil, nil) + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mim := pm.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookup", pm.ctx, "org1").Return(localOrg, false, nil) mim.On("GetNodeOwnerOrg", pm.ctx).Return(localNode, nil) @@ -182,7 +180,7 @@ func TestResolveMemberListGetGroupsFail(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil) - mdi.On("GetGroups", pm.ctx, mock.Anything).Return(nil, nil, fmt.Errorf("pop")) + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("pop")) mim := pm.identity.(*identitymanagermocks.Manager) mim.On("CachedIdentityLookup", pm.ctx, "org1").Return(localOrg, false, nil) mim.On("GetNodeOwnerOrg", pm.ctx).Return(localNode, nil) @@ -321,7 +319,7 @@ func TestResolveMemberNodeOwnedParentOrg(t *testing.T) { mdi := pm.database.(*databasemocks.Plugin) mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{}, nil, nil).Once() mdi.On("GetIdentities", pm.ctx, mock.Anything).Return([]*fftypes.Identity{localNode}, nil, nil) - mdi.On("GetGroups", pm.ctx, mock.Anything).Return([]*fftypes.Group{{Hash: fftypes.NewRandB32()}}, nil, nil) + mdi.On("GetGroupByHash", pm.ctx, mock.Anything, mock.Anything).Return(&fftypes.Group{Hash: fftypes.NewRandB32()}, nil, nil).Once() mim := pm.identity.(*identitymanagermocks.Manager) mim.On("GetNodeOwnerOrg", pm.ctx).Return(parentOrg, nil) mim.On("CachedIdentityLookup", pm.ctx, "org1").Return(childOrg, false, nil) diff --git a/internal/restclient/ffresty.go b/internal/restclient/ffresty.go index 36f93c80b2..1c03a9c3d9 100644 --- a/internal/restclient/ffresty.go +++ b/internal/restclient/ffresty.go @@ -31,6 +31,7 @@ import ( "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/sirupsen/logrus" ) type retryCtxKey struct{} @@ -52,7 +53,12 @@ func OnAfterResponse(c *resty.Client, resp *resty.Response) { rctx := resp.Request.Context() rc := rctx.Value(retryCtxKey{}).(*retryCtx) elapsed := float64(time.Since(rc.start)) / float64(time.Millisecond) - log.L(rctx).Infof("<== %s %s [%d] (%.2fms)", resp.Request.Method, resp.Request.URL, resp.StatusCode(), elapsed) + level := logrus.DebugLevel + status := resp.StatusCode() + if status >= 300 { + level = logrus.ErrorLevel + } + log.L(rctx).Logf(level, "<== %s %s [%d] (%.2fms)", resp.Request.Method, resp.Request.URL, status, elapsed) } // New creates a new Resty client, using static configuration (from the config file) @@ -117,7 +123,7 @@ func New(ctx context.Context, staticConfig config.Prefix) *resty.Client { rctx = log.WithLogger(rctx, l) req.SetContext(rctx) } - log.L(rctx).Infof("==> %s %s%s", req.Method, url, req.URL) + log.L(rctx).Debugf("==> %s %s%s", req.Method, url, req.URL) return nil }) diff --git a/internal/shareddownload/download_manager.go b/internal/shareddownload/download_manager.go new file mode 100644 index 0000000000..56caec6682 --- /dev/null +++ b/internal/shareddownload/download_manager.go @@ -0,0 +1,244 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package shareddownload + +import ( + "context" + "database/sql/driver" + "math" + "time" + + "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/internal/operations" + "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/dataexchange" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/hyperledger/firefly/pkg/sharedstorage" +) + +type Manager interface { + Start() error + WaitStop() + + InitiateDownloadBatch(ctx context.Context, ns string, tx *fftypes.UUID, payloadRef string) error + InitiateDownloadBlob(ctx context.Context, ns string, tx *fftypes.UUID, dataID *fftypes.UUID, payloadRef string) error +} + +// downloadManager operates a number of workers that can perform downloads/retries. Each download +// will stay in pending state until a number of retries has been executed against, but each retry +// will be dispatched individually to the workers. So a retrying downloads do not block new +// downloads from getting a chance to use the workers. +// Pending download operations are recovered on startup, and start a new retry loop. +type downloadManager struct { + ctx context.Context + cancelFunc func() + database database.Plugin + sharedstorage sharedstorage.Plugin + dataexchange dataexchange.Plugin + operations operations.Manager + callbacks Callbacks + workerCount int + workers []*downloadWorker + work chan *downloadWork + recoveryComplete chan struct{} + broadcastBatchPayloadLimit int64 + retryMaxAttempts int + retryInitDelay time.Duration + retryMaxDelay time.Duration + retryFactor float64 +} + +type downloadWork struct { + dispatchedAt time.Time + preparedOp *fftypes.PreparedOperation + attempts int +} + +type Callbacks interface { + SharedStorageBatchDownloaded(ns string, payloadRef string, data []byte) (batchID *fftypes.UUID, err error) + SharedStorageBLOBDownloaded(hash fftypes.Bytes32, size int64, payloadRef string) error +} + +func NewDownloadManager(ctx context.Context, di database.Plugin, ss sharedstorage.Plugin, dx dataexchange.Plugin, om operations.Manager, cb Callbacks) (Manager, error) { + if di == nil || dx == nil || ss == nil || cb == nil { + return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) + } + + dmCtx, cancelFunc := context.WithCancel(ctx) + dm := &downloadManager{ + ctx: dmCtx, + cancelFunc: cancelFunc, + database: di, + sharedstorage: ss, + dataexchange: dx, + operations: om, + callbacks: cb, + broadcastBatchPayloadLimit: config.GetByteSize(config.BroadcastBatchPayloadLimit), + workerCount: config.GetInt(config.DownloadWorkerCount), + retryMaxAttempts: config.GetInt(config.DownloadRetryMaxAttempts), + retryInitDelay: config.GetDuration(config.DownloadRetryInitDelay), + retryMaxDelay: config.GetDuration(config.DownloadRetryMaxDelay), + retryFactor: config.GetFloat64(config.DownloadRetryFactor), + } + // Work queue is twice the size of the worker count + workQueueLength := config.GetInt(config.DownloadWorkerQueueLength) + if workQueueLength <= 0 { + workQueueLength = 2 * dm.workerCount + } + if dm.retryMaxAttempts <= 0 { + dm.retryMaxAttempts = 1 + } + dm.work = make(chan *downloadWork, workQueueLength) + + dm.operations.RegisterHandler(ctx, dm, []fftypes.OpType{ + fftypes.OpTypeSharedStorageDownloadBatch, + fftypes.OpTypeSharedStorageDownloadBlob, + }) + + return dm, nil +} + +func (dm *downloadManager) Start() error { + dm.workers = make([]*downloadWorker, dm.workerCount) + for i := 0; i < dm.workerCount; i++ { + dm.workers[i] = newDownloadWorker(dm, i) + } + dm.recoveryComplete = make(chan struct{}) + go dm.recoverDownloads(fftypes.Now()) + return nil +} + +func (dm *downloadManager) Name() string { + return "SharedStorageDownloadManager" +} + +func (dm *downloadManager) WaitStop() { + dm.cancelFunc() + for _, w := range dm.workers { + <-w.done + } +} + +func (dm *downloadManager) calcDelay(attempts int) time.Duration { + delay := dm.retryInitDelay + for i := 0; i < attempts; i++ { + delay = time.Duration(math.Ceil(float64(delay) * dm.retryFactor)) + } + if delay > dm.retryMaxDelay { + delay = dm.retryMaxDelay + } + return delay +} + +// recoverDownloads grabs all pending operations on startup, to restart them +func (dm *downloadManager) recoverDownloads(startupTime *fftypes.FFTime) { + + defer close(dm.recoveryComplete) + recovered := 0 + pageSize := uint64(25) + page := uint64(0) + errorAttempts := 0 + for { + fb := database.OperationQueryFactory.NewFilter(dm.ctx) + filter := fb.And( + fb.In("type", []driver.Value{ + fftypes.OpTypeSharedStorageDownloadBatch, + fftypes.OpTypeSharedStorageDownloadBlob, + }), + fb.Eq("status", fftypes.OpStatusPending), + fb.Lt("created", startupTime), // retry is handled completely separately + ). + Sort("created"). + Skip(page * pageSize). + Limit(pageSize) + pendingOps, _, err := dm.database.GetOperations(dm.ctx, filter) + if err != nil { + log.L(dm.ctx).Errorf("Error while recovering pending downloads (retries=%d): %s", errorAttempts, err) + errorAttempts++ + time.Sleep(dm.calcDelay(errorAttempts)) + continue + } + errorAttempts = 0 // reset on success + page++ + if len(pendingOps) == 0 { + log.L(dm.ctx).Infof("Download manager completed startup after recovering %d pending downloads", recovered) + return + } + for _, op := range pendingOps { + preparedOp, err := dm.PrepareOperation(dm.ctx, op) + if err != nil { + log.L(dm.ctx).Errorf("Failed to recover pending download %s/%s: %s", op.Type, op.ID, err) + continue + } + recovered++ + log.L(dm.ctx).Infof("Recovering pending download %s/%s", op.Type, op.ID) + dm.dispatchWork(&downloadWork{ + dispatchedAt: time.Now(), + preparedOp: preparedOp, + }) + } + } + +} + +func (dm *downloadManager) dispatchWork(work *downloadWork) { + dm.work <- work + // Log after dispatching so we can see the dispatch delay if the queue got full + log.L(dm.ctx).Debugf("Dispatched download operation %s/%s (attempts=%d) to worker pool", work.preparedOp.Type, work.preparedOp.ID, work.attempts) +} + +// waitAndRetryDownload is a go routine to wait and re-dispatch a retrying download. +// Note this go routine is short lived and completely separate to the workers. +func (dm *downloadManager) waitAndRetryDownload(work *downloadWork) { + startedWaiting := time.Now() + delay := dm.calcDelay(work.attempts) + <-time.After(delay) + delayTimeMS := time.Since(startedWaiting).Milliseconds() + totalTimeMS := time.Since(work.dispatchedAt).Milliseconds() + log.L(dm.ctx).Infof("Retrying download operation %s/%s after %dms (total=%dms,attempts=%d)", + work.preparedOp.Type, work.preparedOp.ID, delayTimeMS, totalTimeMS, work.attempts) + dm.dispatchWork(work) +} + +func (dm *downloadManager) InitiateDownloadBatch(ctx context.Context, ns string, tx *fftypes.UUID, payloadRef string) error { + op := fftypes.NewOperation(dm.sharedstorage, ns, tx, fftypes.OpTypeSharedStorageDownloadBatch) + addDownloadBatchInputs(op, ns, payloadRef) + return dm.createAndDispatchOp(ctx, op, opDownloadBatch(op, ns, payloadRef)) +} + +func (dm *downloadManager) InitiateDownloadBlob(ctx context.Context, ns string, tx *fftypes.UUID, dataID *fftypes.UUID, payloadRef string) error { + op := fftypes.NewOperation(dm.sharedstorage, ns, tx, fftypes.OpTypeSharedStorageDownloadBlob) + addDownloadBlobInputs(op, ns, dataID, payloadRef) + return dm.createAndDispatchOp(ctx, op, opDownloadBlob(op, ns, dataID, payloadRef)) +} + +func (dm *downloadManager) createAndDispatchOp(ctx context.Context, op *fftypes.Operation, preparedOp *fftypes.PreparedOperation) error { + err := dm.database.InsertOperation(ctx, op, func() { + // Use a closure hook to dispatch the work once the operation is successfully in the DB. + // Note we have crash recovery of pending operations on startup. + dm.dispatchWork(&downloadWork{ + dispatchedAt: time.Now(), + preparedOp: preparedOp, + }) + }) + if err != nil { + return err + } + return nil +} diff --git a/internal/shareddownload/download_manager_test.go b/internal/shareddownload/download_manager_test.go new file mode 100644 index 0000000000..f5c6661304 --- /dev/null +++ b/internal/shareddownload/download_manager_test.go @@ -0,0 +1,278 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package shareddownload + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/operations" + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/dataexchangemocks" + "github.com/hyperledger/firefly/mocks/shareddownloadmocks" + "github.com/hyperledger/firefly/mocks/sharedstoragemocks" + "github.com/hyperledger/firefly/pkg/database" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func newTestDownloadManager(t *testing.T) (*downloadManager, func()) { + config.Reset() + config.Set(config.DownloadWorkerCount, 1) + config.Set(config.DownloadRetryMaxAttempts, 0 /* bumps to 1 */) + + mdi := &databasemocks.Plugin{} + mss := &sharedstoragemocks.Plugin{} + mdx := &dataexchangemocks.Plugin{} + mci := &shareddownloadmocks.Callbacks{} + operations, err := operations.NewOperationsManager(context.Background(), mdi) + assert.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + pm, err := NewDownloadManager(ctx, mdi, mss, mdx, operations, mci) + assert.NoError(t, err) + + return pm.(*downloadManager), cancel +} + +func TestNewDownloadManagerMissingDeps(t *testing.T) { + _, err := NewDownloadManager(context.Background(), nil, nil, nil, nil, nil) + assert.Regexp(t, "FF10128", err) +} + +func TestDownloadBatchE2EOk(t *testing.T) { + + dm, cancel := newTestDownloadManager(t) + defer cancel() + dm.workerCount = 1 + dm.workers = []*downloadWorker{newDownloadWorker(dm, 0)} + + reader := ioutil.NopCloser(strings.NewReader("some batch data")) + txID := fftypes.NewUUID() + batchID := fftypes.NewUUID() + + mss := dm.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("Name").Return("utss") + mss.On("DownloadData", mock.Anything, "ref1").Return(reader, nil) + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("InsertOperation", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + args[2].(database.PostCompletionHook)() + }).Return(nil) + mdi.On("ResolveOperation", mock.Anything, mock.Anything, fftypes.OpStatusSucceeded, "", fftypes.JSONObject{ + "batch": batchID, + }).Return(nil) + + called := make(chan struct{}) + + mci := dm.callbacks.(*shareddownloadmocks.Callbacks) + mci.On("SharedStorageBatchDownloaded", "ns1", "ref1", []byte("some batch data")).Run(func(args mock.Arguments) { + close(called) + }).Return(batchID, nil) + + err := dm.InitiateDownloadBatch(dm.ctx, "ns1", txID, "ref1") + assert.NoError(t, err) + + <-called + + mss.AssertExpectations(t) + mdi.AssertExpectations(t) + mci.AssertExpectations(t) + +} + +func TestDownloadBlobWithRetryOk(t *testing.T) { + + dm, _ := newTestDownloadManager(t) + defer dm.WaitStop() + dm.workerCount = 1 + dm.retryMaxAttempts = 3 + dm.retryInitDelay = 10 * time.Microsecond + dm.retryMaxDelay = 15 * time.Microsecond + dm.workers = []*downloadWorker{newDownloadWorker(dm, 0)} + + reader := ioutil.NopCloser(strings.NewReader("some blob data")) + txID := fftypes.NewUUID() + dataID := fftypes.NewUUID() + blobHash := fftypes.NewRandB32() + + mss := dm.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("Name").Return("utss") + mss.On("DownloadData", mock.Anything, "ref1").Return(reader, nil) + + mdx := dm.dataexchange.(*dataexchangemocks.Plugin) + mdx.On("UploadBLOB", mock.Anything, "ns1", *dataID, mock.Anything).Return("privateRef1", blobHash, int64(12345), nil) + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("InsertOperation", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + args[2].(database.PostCompletionHook)() + }).Return(nil) + mdi.On("ResolveOperation", mock.Anything, mock.Anything, fftypes.OpStatusPending, "pop", mock.Anything).Return(nil) + mdi.On("ResolveOperation", mock.Anything, mock.Anything, fftypes.OpStatusSucceeded, "", fftypes.JSONObject{ + "hash": blobHash, + "size": int64(12345), + "dxPayloadRef": "privateRef1", + }).Return(nil) + + called := make(chan struct{}) + + mci := dm.callbacks.(*shareddownloadmocks.Callbacks) + mci.On("SharedStorageBLOBDownloaded", *blobHash, int64(12345), "privateRef1").Return(fmt.Errorf("pop")).Twice() + mci.On("SharedStorageBLOBDownloaded", *blobHash, int64(12345), "privateRef1").Run(func(args mock.Arguments) { + close(called) + }).Return(nil) + + err := dm.InitiateDownloadBlob(dm.ctx, "ns1", txID, dataID, "ref1") + assert.NoError(t, err) + + <-called + + mss.AssertExpectations(t) + mdx.AssertExpectations(t) + mdi.AssertExpectations(t) + mci.AssertExpectations(t) + +} + +func TestDownloadBlobInsertOpFail(t *testing.T) { + + dm, cancel := newTestDownloadManager(t) + defer cancel() + + txID := fftypes.NewUUID() + dataID := fftypes.NewUUID() + + mss := dm.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("Name").Return("utss") + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("InsertOperation", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) + + err := dm.InitiateDownloadBlob(dm.ctx, "ns1", txID, dataID, "ref1") + assert.Regexp(t, "pop", err) + + mdi.AssertExpectations(t) + +} + +func TestDownloadManagerStartupRecoveryCombinations(t *testing.T) { + + dm, cancel := newTestDownloadManager(t) + defer cancel() + dm.workerCount = 1 + dm.retryInitDelay = 1 * time.Microsecond + dm.workers = []*downloadWorker{newDownloadWorker(dm, 0)} + + called := make(chan bool) + + reader := ioutil.NopCloser(strings.NewReader("some batch data")) + batchID := fftypes.NewUUID() + + mss := dm.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("DownloadData", mock.Anything, "ref1").Return(nil, fmt.Errorf("pop")) + mss.On("DownloadData", mock.Anything, "ref2").Return(reader, nil) + + mdi := dm.database.(*databasemocks.Plugin) + mdi.On("GetOperations", mock.Anything, mock.Anything).Return([]*fftypes.Operation{}, nil, fmt.Errorf("initial error")).Once() + mdi.On("GetOperations", mock.Anything, mock.MatchedBy(func(filter database.Filter) bool { + fi, err := filter.Finalize() + assert.NoError(t, err) + return fi.Skip == 0 && fi.Limit == 25 + })).Return([]*fftypes.Operation{ + { + // This one won't submit + Type: fftypes.OpTypeSharedStorageDownloadBlob, + ID: fftypes.NewUUID(), + Input: fftypes.JSONObject{ + "bad": "inputs", + }, + }, + { + // This one will be re-submitted and be marked failed + Type: fftypes.OpTypeSharedStorageDownloadBlob, + ID: fftypes.NewUUID(), + Input: fftypes.JSONObject{ + "namespace": "ns1", + "dataId": fftypes.NewUUID().String(), + "payloadRef": "ref1", + }, + }, + { + // This one will be re-submitted and succeed + Type: fftypes.OpTypeSharedStorageDownloadBatch, + ID: fftypes.NewUUID(), + Input: fftypes.JSONObject{ + "namespace": "ns1", + "payloadRef": "ref2", + }, + }, + }, nil, nil).Once() + mdi.On("GetOperations", mock.Anything, mock.MatchedBy(func(filter database.Filter) bool { + fi, err := filter.Finalize() + assert.NoError(t, err) + return fi.Skip == 25 && fi.Limit == 25 + })).Return([]*fftypes.Operation{}, nil, nil).Once() + mdi.On("ResolveOperation", mock.Anything, mock.Anything, fftypes.OpStatusFailed, "pop", mock.Anything).Run(func(args mock.Arguments) { + called <- true + }).Return(nil) + mdi.On("ResolveOperation", mock.Anything, mock.Anything, fftypes.OpStatusSucceeded, "", mock.Anything).Run(func(args mock.Arguments) { + called <- true + }).Return(nil) + + mci := dm.callbacks.(*shareddownloadmocks.Callbacks) + mci.On("SharedStorageBatchDownloaded", "ns1", "ref2", []byte("some batch data")).Return(batchID, nil) + + err := dm.Start() + assert.NoError(t, err) + + <-called + <-called + <-dm.recoveryComplete + + mss.AssertExpectations(t) + mdi.AssertExpectations(t) + mci.AssertExpectations(t) + +} + +func TestPrepareOperationUnknown(t *testing.T) { + + dm, cancel := newTestDownloadManager(t) + defer cancel() + + _, err := dm.PrepareOperation(dm.ctx, &fftypes.Operation{ + Type: fftypes.CallTypeInvoke, + }) + assert.Regexp(t, "FF10371", err) +} + +func TestRunOperationUnknown(t *testing.T) { + + dm, cancel := newTestDownloadManager(t) + defer cancel() + + _, _, err := dm.RunOperation(dm.ctx, &fftypes.PreparedOperation{ + Type: fftypes.CallTypeInvoke, + }) + assert.Regexp(t, "FF10378", err) +} diff --git a/internal/shareddownload/download_worker.go b/internal/shareddownload/download_worker.go new file mode 100644 index 0000000000..2d25cd21dc --- /dev/null +++ b/internal/shareddownload/download_worker.go @@ -0,0 +1,74 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package shareddownload + +import ( + "context" + "fmt" + + "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/internal/operations" +) + +type downloadWorker struct { + ctx context.Context + done chan struct{} + dm *downloadManager +} + +func newDownloadWorker(dm *downloadManager, idx int) *downloadWorker { + dw := &downloadWorker{ + ctx: log.WithLogField(dm.ctx, "downloadworker", fmt.Sprintf("dw_%.3d", idx)), + done: make(chan struct{}), + dm: dm, + } + go dw.downloadWorkerLoop() + return dw +} + +func (dw *downloadWorker) downloadWorkerLoop() { + defer close(dw.done) + + l := log.L(dw.ctx) + for { + select { + case <-dw.ctx.Done(): + l.Debugf("Download worker shutting down") + return + case work := <-dw.dm.work: + dw.attemptWork(work) + } + } +} + +func (dw *downloadWorker) attemptWork(work *downloadWork) { + + work.attempts++ + isLastAttempt := work.attempts >= dw.dm.retryMaxAttempts + options := []operations.RunOperationOption{operations.RemainPendingOnFailure} + if isLastAttempt { + options = []operations.RunOperationOption{} + } + + err := dw.dm.operations.RunOperation(dw.ctx, work.preparedOp, options...) + if err != nil { + log.L(dw.ctx).Errorf("Download operation %s/%s attempt=%d/%d failed: %s", work.preparedOp.Type, work.preparedOp.ID, work.attempts, dw.dm.retryMaxAttempts, err) + if !isLastAttempt { + go dw.dm.waitAndRetryDownload(work) + } + } +} diff --git a/internal/shareddownload/operations.go b/internal/shareddownload/operations.go new file mode 100644 index 0000000000..51139a20ad --- /dev/null +++ b/internal/shareddownload/operations.go @@ -0,0 +1,191 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package shareddownload + +import ( + "context" + "io" + "io/ioutil" + + "github.com/docker/go-units" + "github.com/hyperledger/firefly/internal/i18n" + "github.com/hyperledger/firefly/internal/log" + "github.com/hyperledger/firefly/pkg/fftypes" +) + +type downloadBatchData struct { + Namespace string `json:"namespace"` + PayloadRef string `json:"payloadRef"` +} + +type downloadBlobData struct { + Namespace string `json:"namespace"` + DataID *fftypes.UUID `json:"dataId"` + PayloadRef string `json:"payloadRef"` +} + +func addDownloadBatchInputs(op *fftypes.Operation, ns, payloadRef string) { + op.Input = fftypes.JSONObject{ + "namespace": ns, + "payloadRef": payloadRef, + } +} + +func getDownloadBatchOutputs(batchID *fftypes.UUID) fftypes.JSONObject { + return fftypes.JSONObject{ + "batch": batchID, + } +} + +func addDownloadBlobInputs(op *fftypes.Operation, ns string, dataID *fftypes.UUID, payloadRef string) { + op.Input = fftypes.JSONObject{ + "namespace": ns, + "dataId": dataID.String(), + "payloadRef": payloadRef, + } +} + +func getDownloadBlobOutputs(hash *fftypes.Bytes32, size int64, dxPaylodRef string) fftypes.JSONObject { + return fftypes.JSONObject{ + "hash": hash, + "size": size, + "dxPayloadRef": dxPaylodRef, + } +} + +func retrieveDownloadBatchInputs(op *fftypes.Operation) (string, string) { + return op.Input.GetString("namespace"), + op.Input.GetString("payloadRef") +} + +func retrieveDownloadBlobInputs(ctx context.Context, op *fftypes.Operation) (namespace string, dataID *fftypes.UUID, payloadRef string, err error) { + namespace = op.Input.GetString("namespace") + dataID, err = fftypes.ParseUUID(ctx, op.Input.GetString("dataId")) + if err != nil { + return "", nil, "", err + } + payloadRef = op.Input.GetString("payloadRef") + return +} + +func (dm *downloadManager) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + switch op.Type { + + case fftypes.OpTypeSharedStorageDownloadBatch: + namespace, payloadRef := retrieveDownloadBatchInputs(op) + return opDownloadBatch(op, namespace, payloadRef), nil + + case fftypes.OpTypeSharedStorageDownloadBlob: + namespace, dataID, payloadRef, err := retrieveDownloadBlobInputs(ctx, op) + if err != nil { + return nil, err + } + return opDownloadBlob(op, namespace, dataID, payloadRef), nil + + default: + return nil, i18n.NewError(ctx, i18n.MsgOperationNotSupported, op.Type) + } +} + +func (dm *downloadManager) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (outputs fftypes.JSONObject, complete bool, err error) { + switch data := op.Data.(type) { + case downloadBatchData: + return dm.downloadBatch(ctx, data) + case downloadBlobData: + return dm.downloadBlob(ctx, data) + default: + return nil, false, i18n.NewError(ctx, i18n.MsgOperationDataIncorrect, op.Data) + } +} + +// downloadBatch retrieves a serialized batch from public storage, then persists it and drives a rewind +// on the messages included (just like the event driven when we receive data over DX). +func (dm *downloadManager) downloadBatch(ctx context.Context, data downloadBatchData) (outputs fftypes.JSONObject, complete bool, err error) { + + // Download into memory for batches + reader, err := dm.sharedstorage.DownloadData(ctx, data.PayloadRef) + if err != nil { + return nil, false, i18n.WrapError(ctx, err, i18n.MsgDownloadSharedFailed, data.PayloadRef) + } + defer reader.Close() + + // Read from the stream up to the limit + maxReadLimit := dm.broadcastBatchPayloadLimit + 1024 + limitedReader := io.LimitReader(reader, maxReadLimit) + batchBytes, err := ioutil.ReadAll(limitedReader) + if err != nil { + return nil, false, i18n.WrapError(ctx, err, i18n.MsgDownloadSharedFailed, data.PayloadRef) + } + if len(batchBytes) == int(maxReadLimit) { + return nil, false, i18n.WrapError(ctx, err, i18n.MsgDownloadBatchMaxBytes, data.PayloadRef) + } + + // Parse and store the batch + batchID, err := dm.callbacks.SharedStorageBatchDownloaded(data.Namespace, data.PayloadRef, batchBytes) + if err != nil { + return nil, false, err + } + return getDownloadBatchOutputs(batchID), true, nil +} + +func (dm *downloadManager) downloadBlob(ctx context.Context, data downloadBlobData) (outputs fftypes.JSONObject, complete bool, err error) { + + // Stream from shared storage ... + reader, err := dm.sharedstorage.DownloadData(ctx, data.PayloadRef) + if err != nil { + return nil, false, err + } + defer reader.Close() + + // ... to data exchange + dxPayloadRef, hash, blobSize, err := dm.dataexchange.UploadBLOB(ctx, data.Namespace, *data.DataID, reader) + if err != nil { + return nil, false, i18n.WrapError(ctx, err, i18n.MsgDownloadSharedFailed, data.PayloadRef) + } + log.L(ctx).Infof("Transferred blob '%s' (%s) from shared storage '%s' to local data exchange '%s'", hash, units.HumanSizeWithPrecision(float64(blobSize), 2), data.PayloadRef, dxPayloadRef) + + // then callback to store metadata + err = dm.callbacks.SharedStorageBLOBDownloaded(*hash, blobSize, dxPayloadRef) + if err != nil { + return nil, false, err + } + + return getDownloadBlobOutputs(hash, blobSize, dxPayloadRef), true, nil +} + +func opDownloadBatch(op *fftypes.Operation, ns string, payloadRef string) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: downloadBatchData{ + Namespace: ns, + PayloadRef: payloadRef, + }, + } +} + +func opDownloadBlob(op *fftypes.Operation, ns string, dataID *fftypes.UUID, payloadRef string) *fftypes.PreparedOperation { + return &fftypes.PreparedOperation{ + ID: op.ID, + Type: op.Type, + Data: downloadBlobData{ + Namespace: ns, + DataID: dataID, + PayloadRef: payloadRef, + }, + } +} diff --git a/internal/shareddownload/operations_test.go b/internal/shareddownload/operations_test.go new file mode 100644 index 0000000000..1c7f7f027e --- /dev/null +++ b/internal/shareddownload/operations_test.go @@ -0,0 +1,136 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package shareddownload + +import ( + "bytes" + "fmt" + "io/ioutil" + "strings" + "testing" + "testing/iotest" + + "github.com/hyperledger/firefly/mocks/dataexchangemocks" + "github.com/hyperledger/firefly/mocks/shareddownloadmocks" + "github.com/hyperledger/firefly/mocks/sharedstoragemocks" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestDownloadBatchDownloadDataFail(t *testing.T) { + + dm, cancel := newTestDownloadManager(t) + defer cancel() + + mss := dm.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("DownloadData", mock.Anything, "ref1").Return(nil, fmt.Errorf("pop")) + + _, _, err := dm.downloadBatch(dm.ctx, downloadBatchData{ + Namespace: "ns1", + PayloadRef: "ref1", + }) + assert.Regexp(t, "FF10376", err) + + mss.AssertExpectations(t) +} + +func TestDownloadBatchDownloadDataReadFail(t *testing.T) { + + dm, cancel := newTestDownloadManager(t) + defer cancel() + + reader := ioutil.NopCloser(iotest.ErrReader(fmt.Errorf("read failed"))) + + mss := dm.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("DownloadData", mock.Anything, "ref1").Return(reader, nil) + + _, _, err := dm.downloadBatch(dm.ctx, downloadBatchData{ + Namespace: "ns1", + PayloadRef: "ref1", + }) + assert.Regexp(t, "FF10376", err) + + mss.AssertExpectations(t) +} + +func TestDownloadBatchDownloadDataReadMaxedOut(t *testing.T) { + + dm, cancel := newTestDownloadManager(t) + defer cancel() + + dm.broadcastBatchPayloadLimit = 1 + reader := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 2048))) + + mss := dm.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("DownloadData", mock.Anything, "ref1").Return(reader, nil) + + _, _, err := dm.downloadBatch(dm.ctx, downloadBatchData{ + Namespace: "ns1", + PayloadRef: "ref1", + }) + assert.Regexp(t, "FF10377", err) + + mss.AssertExpectations(t) +} + +func TestDownloadBatchDownloadCallbackFailed(t *testing.T) { + + dm, cancel := newTestDownloadManager(t) + defer cancel() + + reader := ioutil.NopCloser(strings.NewReader("some batch data")) + + mss := dm.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("DownloadData", mock.Anything, "ref1").Return(reader, nil) + + mci := dm.callbacks.(*shareddownloadmocks.Callbacks) + mci.On("SharedStorageBatchDownloaded", "ns1", "ref1", []byte("some batch data")).Return(nil, fmt.Errorf("pop")) + + _, _, err := dm.downloadBatch(dm.ctx, downloadBatchData{ + Namespace: "ns1", + PayloadRef: "ref1", + }) + assert.Regexp(t, "pop", err) + + mss.AssertExpectations(t) + mci.AssertExpectations(t) +} + +func TestDownloadBlobDownloadDataReadFail(t *testing.T) { + + dm, cancel := newTestDownloadManager(t) + defer cancel() + + reader := ioutil.NopCloser(iotest.ErrReader(fmt.Errorf("read failed"))) + + mss := dm.sharedstorage.(*sharedstoragemocks.Plugin) + mss.On("DownloadData", mock.Anything, "ref1").Return(reader, nil) + + mdx := dm.dataexchange.(*dataexchangemocks.Plugin) + mdx.On("UploadBLOB", mock.Anything, "ns1", mock.Anything, reader).Return("", nil, int64(-1), fmt.Errorf("pop")) + + _, _, err := dm.downloadBlob(dm.ctx, downloadBlobData{ + Namespace: "ns1", + PayloadRef: "ref1", + DataID: fftypes.NewUUID(), + }) + assert.Regexp(t, "FF10376", err) + + mss.AssertExpectations(t) + mdx.AssertExpectations(t) +} diff --git a/internal/sharedstorage/ipfs/ipfs.go b/internal/sharedstorage/ipfs/ipfs.go index 3677feee32..722f7f4365 100644 --- a/internal/sharedstorage/ipfs/ipfs.go +++ b/internal/sharedstorage/ipfs/ipfs.go @@ -72,7 +72,7 @@ func (i *IPFS) Capabilities() *sharedstorage.Capabilities { return i.capabilities } -func (i *IPFS) PublishData(ctx context.Context, data io.Reader) (string, error) { +func (i *IPFS) UploadData(ctx context.Context, data io.Reader) (string, error) { var ipfsResponse ipfsUploadResponse res, err := i.apiClient.R(). SetContext(ctx). @@ -86,7 +86,7 @@ func (i *IPFS) PublishData(ctx context.Context, data io.Reader) (string, error) return ipfsResponse.Hash, err } -func (i *IPFS) RetrieveData(ctx context.Context, payloadRef string) (data io.ReadCloser, err error) { +func (i *IPFS) DownloadData(ctx context.Context, payloadRef string) (data io.ReadCloser, err error) { res, err := i.gwClient.R(). SetContext(ctx). SetDoNotParseResponse(true). diff --git a/internal/sharedstorage/ipfs/ipfs_test.go b/internal/sharedstorage/ipfs/ipfs_test.go index b0cee1fcb0..fba4903e7e 100644 --- a/internal/sharedstorage/ipfs/ipfs_test.go +++ b/internal/sharedstorage/ipfs/ipfs_test.go @@ -91,7 +91,7 @@ func TestIPFSUploadSuccess(t *testing.T) { })) data := []byte(`hello world`) - payloadRef, err := i.PublishData(context.Background(), bytes.NewReader(data)) + payloadRef, err := i.UploadData(context.Background(), bytes.NewReader(data)) assert.NoError(t, err) assert.Equal(t, `Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD`, payloadRef) @@ -116,7 +116,7 @@ func TestIPFSUploadFail(t *testing.T) { httpmock.NewJsonResponderOrPanic(500, map[string]interface{}{"error": "pop"})) data := []byte(`hello world`) - _, err = i.PublishData(context.Background(), bytes.NewReader(data)) + _, err = i.UploadData(context.Background(), bytes.NewReader(data)) assert.Regexp(t, "FF10136", err) } @@ -140,7 +140,7 @@ func TestIPFSDownloadSuccess(t *testing.T) { httpmock.RegisterResponder("GET", "http://localhost:12345/ipfs/QmRAQfHNnknnz8S936M2yJGhhVNA6wXJ4jTRP3VXtptmmL", httpmock.NewBytesResponder(200, data)) - r, err := i.RetrieveData(context.Background(), "QmRAQfHNnknnz8S936M2yJGhhVNA6wXJ4jTRP3VXtptmmL") + r, err := i.DownloadData(context.Background(), "QmRAQfHNnknnz8S936M2yJGhhVNA6wXJ4jTRP3VXtptmmL") assert.NoError(t, err) defer r.Close() @@ -168,7 +168,7 @@ func TestIPFSDownloadFail(t *testing.T) { httpmock.RegisterResponder("GET", "http://localhost:12345/ipfs/QmRAQfHNnknnz8S936M2yJGhhVNA6wXJ4jTRP3VXtptmmL", httpmock.NewJsonResponderOrPanic(500, map[string]interface{}{"error": "pop"})) - _, err = i.RetrieveData(context.Background(), "QmRAQfHNnknnz8S936M2yJGhhVNA6wXJ4jTRP3VXtptmmL") + _, err = i.DownloadData(context.Background(), "QmRAQfHNnknnz8S936M2yJGhhVNA6wXJ4jTRP3VXtptmmL") assert.Regexp(t, "FF10136", err) } @@ -191,7 +191,7 @@ func TestIPFSDownloadError(t *testing.T) { httpmock.RegisterResponder("GET", "http://localhost:12345/ipfs/QmRAQfHNnknnz8S936M2yJGhhVNA6wXJ4jTRP3VXtptmmL", httpmock.NewErrorResponder(fmt.Errorf("pop"))) - _, err = i.RetrieveData(context.Background(), "QmRAQfHNnknnz8S936M2yJGhhVNA6wXJ4jTRP3VXtptmmL") + _, err = i.DownloadData(context.Background(), "QmRAQfHNnknnz8S936M2yJGhhVNA6wXJ4jTRP3VXtptmmL") assert.Regexp(t, "FF10136", err) } diff --git a/internal/syncasync/sync_async_bridge.go b/internal/syncasync/sync_async_bridge.go index 5181535701..9bc6e79047 100644 --- a/internal/syncasync/sync_async_bridge.go +++ b/internal/syncasync/sync_async_bridge.go @@ -377,8 +377,8 @@ func (sa *syncAsyncBridge) handleTransferOpFailedEvent(event *fftypes.EventDeliv return err } // Extract the LocalID of the transfer - var transfer fftypes.TokenTransfer - if err := txcommon.RetrieveTokenTransferInputs(sa.ctx, op, &transfer); err != nil { + transfer, err := txcommon.RetrieveTokenTransferInputs(sa.ctx, op) + if err != nil || transfer.LocalID == nil { log.L(sa.ctx).Warnf("Failed to extract token transfer inputs for operation '%s': %s", op.ID, err) } @@ -399,8 +399,8 @@ func (sa *syncAsyncBridge) handleApprovalOpFailedEvent(event *fftypes.EventDeliv return err } // Extract the LocalID of the transfer - var approval fftypes.TokenApproval - if err := txcommon.RetrieveTokenApprovalInputs(sa.ctx, op, &approval); err != nil { + approval, err := txcommon.RetrieveTokenApprovalInputs(sa.ctx, op) + if err != nil || approval.LocalID == nil { log.L(sa.ctx).Warnf("Failed to extract token approval inputs for operation '%s': %s", op.ID, err) } @@ -452,7 +452,7 @@ func (sa *syncAsyncBridge) resolveReply(inflight *inflightRequest, msg *fftypes. log.L(sa.ctx).Debugf("Resolving reply request '%s' with message '%s'", inflight.id, msg.Header.ID) response := &fftypes.MessageInOut{Message: *msg} - data, _, err := sa.data.GetMessageData(sa.ctx, msg, true) + data, _, err := sa.data.GetMessageDataCached(sa.ctx, msg) if err != nil { log.L(sa.ctx).Errorf("Failed to read response data for message '%s' on request '%s': %s", msg.Header.ID, inflight.id, err) return diff --git a/internal/syncasync/sync_async_bridge_test.go b/internal/syncasync/sync_async_bridge_test.go index 4aacfa1674..95b16dca9f 100644 --- a/internal/syncasync/sync_async_bridge_test.go +++ b/internal/syncasync/sync_async_bridge_test.go @@ -69,19 +69,21 @@ func TestRequestReplyOk(t *testing.T) { } mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) reply, err := sa.WaitForReply(sa.ctx, "ns1", requestID, func(ctx context.Context) error { go func() { sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeMessageConfirmed, - Reference: replyID, - Correlator: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeMessageConfirmed, + Reference: replyID, + Correlator: requestID, + Namespace: "ns1", + }, }, }) }() @@ -117,18 +119,20 @@ func TestAwaitConfirmationOk(t *testing.T) { } mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) reply, err := sa.WaitForMessage(sa.ctx, "ns1", requestID, func(ctx context.Context) error { go func() { sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeMessageConfirmed, - Reference: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeMessageConfirmed, + Reference: requestID, + Namespace: "ns1", + }, }, }) }() @@ -163,18 +167,20 @@ func TestAwaitConfirmationRejected(t *testing.T) { } mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return([]*fftypes.Data{ + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return(fftypes.DataArray{ {ID: dataID, Value: fftypes.JSONAnyPtr(`"response data"`)}, }, true, nil) _, err := sa.WaitForMessage(sa.ctx, "ns1", requestID, func(ctx context.Context) error { go func() { sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeMessageRejected, - Reference: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeMessageRejected, + Reference: requestID, + Namespace: "ns1", + }, }, }) }() @@ -221,11 +227,13 @@ func TestEventCallbackNotInflight(t *testing.T) { mse.On("AddSystemEventListener", "ns1", mock.Anything).Return(nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: fftypes.NewUUID(), - Type: fftypes.EventTypeMessageConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: fftypes.NewUUID(), + Type: fftypes.EventTypeMessageConfirmed, + }, }, }) assert.NoError(t, err) @@ -243,11 +251,13 @@ func TestEventCallbackNotInflight(t *testing.T) { fftypes.EventTypeIdentityConfirmed, } { err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: fftypes.NewUUID(), - Type: eventType, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: fftypes.NewUUID(), + Type: eventType, + }, }, }) assert.NoError(t, err) @@ -268,11 +278,13 @@ func TestEventCallbackWrongType(t *testing.T) { } err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: fftypes.NewUUID(), - Type: fftypes.EventTypeIdentityUpdated, // We use the message for this one, so no sync/async handler + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: fftypes.NewUUID(), + Type: fftypes.EventTypeIdentityUpdated, // We use the message for this one, so no sync/async handler + }, }, }) assert.NoError(t, err) @@ -297,11 +309,13 @@ func TestEventCallbackMsgLookupFail(t *testing.T) { mdi.On("GetMessageByID", sa.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Type: fftypes.EventTypeMessageConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Type: fftypes.EventTypeMessageConfirmed, + }, }, }) assert.EqualError(t, err, "pop") @@ -326,11 +340,13 @@ func TestEventCallbackTokenPoolLookupFail(t *testing.T) { mdi.On("GetTokenPoolByID", sa.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Type: fftypes.EventTypePoolConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Type: fftypes.EventTypePoolConfirmed, + }, }, }) assert.EqualError(t, err, "pop") @@ -355,11 +371,13 @@ func TestEventCallbackIdentityLookupFail(t *testing.T) { mdi.On("GetIdentityByID", sa.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Type: fftypes.EventTypeIdentityConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Type: fftypes.EventTypeIdentityConfirmed, + }, }, }) assert.EqualError(t, err, "pop") @@ -384,11 +402,13 @@ func TestEventCallbackIdentityLookupNotFound(t *testing.T) { mdi.On("GetIdentityByID", sa.ctx, mock.Anything).Return(nil, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Type: fftypes.EventTypeIdentityConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Type: fftypes.EventTypeIdentityConfirmed, + }, }, }) assert.NoError(t, err) @@ -413,11 +433,13 @@ func TestEventCallbackTokenTransferLookupFail(t *testing.T) { mdi.On("GetTokenTransfer", sa.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Type: fftypes.EventTypeTransferConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Type: fftypes.EventTypeTransferConfirmed, + }, }, }) assert.EqualError(t, err, "pop") @@ -441,11 +463,13 @@ func TestEventCallbackTokenApprovalLookupFail(t *testing.T) { mdi.On("GetTokenApproval", sa.ctx, mock.Anything).Return(nil, fmt.Errorf("pop")) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Type: fftypes.EventTypeApprovalConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Type: fftypes.EventTypeApprovalConfirmed, + }, }, }) assert.EqualError(t, err, "pop") @@ -470,11 +494,13 @@ func TestEventCallbackMsgNotFound(t *testing.T) { mdi.On("GetMessageByID", sa.ctx, mock.Anything).Return(nil, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Type: fftypes.EventTypeMessageConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Type: fftypes.EventTypeMessageConfirmed, + }, }, }) assert.NoError(t, err) @@ -504,12 +530,14 @@ func TestEventCallbackRejectedMsgNotFound(t *testing.T) { mdi.On("GetMessageByID", sa.ctx, mock.Anything).Return(nil, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Correlator: correlationID, - Type: fftypes.EventTypeMessageRejected, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Correlator: correlationID, + Type: fftypes.EventTypeMessageRejected, + }, }, }) assert.NoError(t, err) @@ -535,11 +563,13 @@ func TestEventCallbackTokenPoolNotFound(t *testing.T) { mdi.On("GetTokenPoolByID", sa.ctx, mock.Anything).Return(nil, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Type: fftypes.EventTypePoolConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Type: fftypes.EventTypePoolConfirmed, + }, }, }) assert.NoError(t, err) @@ -565,11 +595,13 @@ func TestEventCallbackTokenTransferNotFound(t *testing.T) { mdi.On("GetTokenTransfer", sa.ctx, mock.Anything).Return(nil, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Type: fftypes.EventTypeTransferConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Type: fftypes.EventTypeTransferConfirmed, + }, }, }) assert.NoError(t, err) @@ -595,11 +627,13 @@ func TestEventCallbackTokenApprovalNotFound(t *testing.T) { mdi.On("GetTokenApproval", sa.ctx, mock.Anything).Return(nil, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Type: fftypes.EventTypeApprovalConfirmed, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Type: fftypes.EventTypeApprovalConfirmed, + }, }, }) assert.NoError(t, err) @@ -634,12 +668,14 @@ func TestEventCallbackTokenPoolRejectedNoData(t *testing.T) { mdi.On("GetMessageByID", sa.ctx, mock.Anything).Return(msg, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: fftypes.NewUUID(), - Correlator: responseID, - Type: fftypes.EventTypeMessageRejected, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: fftypes.NewUUID(), + Correlator: responseID, + Type: fftypes.EventTypeMessageRejected, + }, }, }) assert.NoError(t, err) @@ -682,12 +718,14 @@ func TestEventCallbackTokenPoolRejectedDataError(t *testing.T) { mdi.On("GetDataByID", sa.ctx, dataID, true).Return(nil, fmt.Errorf("pop")) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - Namespace: "ns1", - ID: fftypes.NewUUID(), - Reference: responseID, - Correlator: correlationID, - Type: fftypes.EventTypeMessageRejected, + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + Namespace: "ns1", + ID: fftypes.NewUUID(), + Reference: responseID, + Correlator: correlationID, + Type: fftypes.EventTypeMessageRejected, + }, }, }) assert.EqualError(t, err, "pop") @@ -701,7 +739,7 @@ func TestEventCallbackMsgDataLookupFail(t *testing.T) { defer cancel() mdm := sa.data.(*datamocks.Manager) - mdm.On("GetMessageData", sa.ctx, mock.Anything, true).Return(nil, false, fmt.Errorf("pop")) + mdm.On("GetMessageDataCached", sa.ctx, mock.Anything).Return(nil, false, fmt.Errorf("pop")) sa.resolveReply(&inflightRequest{}, &fftypes.Message{ Header: fftypes.MessageHeader{ @@ -738,11 +776,13 @@ func TestAwaitTokenPoolConfirmation(t *testing.T) { reply, err := sa.WaitForTokenPool(sa.ctx, "ns1", requestID, func(ctx context.Context) error { go func() { sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypePoolConfirmed, - Reference: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypePoolConfirmed, + Reference: requestID, + Namespace: "ns1", + }, }, }) }() @@ -803,12 +843,14 @@ func TestAwaitTokenPoolConfirmationRejected(t *testing.T) { _, err := sa.WaitForTokenPool(sa.ctx, "ns1", pool.Pool.ID, func(ctx context.Context) error { go func() { sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeMessageRejected, - Reference: msg.Header.ID, - Correlator: pool.Pool.ID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeMessageRejected, + Reference: msg.Header.ID, + Correlator: pool.Pool.ID, + Namespace: "ns1", + }, }, }) }() @@ -842,11 +884,13 @@ func TestAwaitTokenTransferConfirmation(t *testing.T) { reply, err := sa.WaitForTokenTransfer(sa.ctx, "ns1", requestID, func(ctx context.Context) error { go func() { sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeTransferConfirmed, - Reference: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeTransferConfirmed, + Reference: requestID, + Namespace: "ns1", + }, }, }) }() @@ -882,11 +926,13 @@ func TestAwaitTokenApprovalConfirmation(t *testing.T) { reply, err := sa.WaitForTokenApproval(sa.ctx, "ns1", requestID, func(ctx context.Context) error { go func() { sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeApprovalConfirmed, - Reference: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeApprovalConfirmed, + Reference: requestID, + Namespace: "ns1", + }, }, }) }() @@ -954,12 +1000,14 @@ func TestAwaitFailedTokenTransfer(t *testing.T) { _, err := sa.WaitForTokenTransfer(sa.ctx, "ns1", requestID, func(ctx context.Context) error { go func() { sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeTransferOpFailed, - Reference: op.ID, - Correlator: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeTransferOpFailed, + Reference: op.ID, + Correlator: requestID, + Namespace: "ns1", + }, }, }) }() @@ -990,12 +1038,14 @@ func TestAwaitFailedTokenApproval(t *testing.T) { _, err := sa.WaitForTokenApproval(sa.ctx, "ns1", requestID, func(ctx context.Context) error { go func() { sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeApprovalOpFailed, - Reference: op.ID, - Correlator: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeApprovalOpFailed, + Reference: op.ID, + Correlator: requestID, + Namespace: "ns1", + }, }, }) }() @@ -1029,12 +1079,14 @@ func TestFailedTokenTransferOpError(t *testing.T) { mdi.On("GetOperationByID", sa.ctx, op.ID).Return(nil, fmt.Errorf("pop")) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeTransferOpFailed, - Reference: op.ID, - Correlator: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeTransferOpFailed, + Reference: op.ID, + Correlator: requestID, + Namespace: "ns1", + }, }, }) assert.EqualError(t, err, "pop") @@ -1067,12 +1119,14 @@ func TestFailedTokenApprovalOpError(t *testing.T) { mdi.On("GetOperationByID", sa.ctx, op.ID).Return(nil, fmt.Errorf("pop")) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeApprovalOpFailed, - Reference: op.ID, - Correlator: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeApprovalOpFailed, + Reference: op.ID, + Correlator: requestID, + Namespace: "ns1", + }, }, }) assert.EqualError(t, err, "pop") @@ -1105,12 +1159,14 @@ func TestFailedTokenApprovalOpNotFound(t *testing.T) { mdi.On("GetOperationByID", sa.ctx, op.ID).Return(nil, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeApprovalOpFailed, - Reference: op.ID, - Correlator: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeApprovalOpFailed, + Reference: op.ID, + Correlator: requestID, + Namespace: "ns1", + }, }, }) assert.NoError(t, err) @@ -1141,12 +1197,14 @@ func TestFailedTokenApprovalIDLookupFail(t *testing.T) { mdi.On("GetOperationByID", sa.ctx, op.ID).Return(op, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeApprovalOpFailed, - Reference: op.ID, - Correlator: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeApprovalOpFailed, + Reference: op.ID, + Correlator: requestID, + Namespace: "ns1", + }, }, }) assert.NoError(t, err) @@ -1179,12 +1237,14 @@ func TestFailedTokenTransferOpNotFound(t *testing.T) { mdi.On("GetOperationByID", sa.ctx, op.ID).Return(nil, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeTransferOpFailed, - Reference: op.ID, - Correlator: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeTransferOpFailed, + Reference: op.ID, + Correlator: requestID, + Namespace: "ns1", + }, }, }) assert.NoError(t, err) @@ -1215,12 +1275,14 @@ func TestFailedTokenTransferIDLookupFail(t *testing.T) { mdi.On("GetOperationByID", sa.ctx, op.ID).Return(op, nil) err := sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeTransferOpFailed, - Reference: op.ID, - Correlator: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeTransferOpFailed, + Reference: op.ID, + Correlator: requestID, + Namespace: "ns1", + }, }, }) assert.NoError(t, err) @@ -1256,11 +1318,13 @@ func TestAwaitIdentityConfirmed(t *testing.T) { retIdentity, err := sa.WaitForIdentity(sa.ctx, "ns1", requestID, func(ctx context.Context) error { go func() { sa.eventCallback(&fftypes.EventDelivery{ - Event: fftypes.Event{ - ID: fftypes.NewUUID(), - Type: fftypes.EventTypeIdentityConfirmed, - Reference: requestID, - Namespace: "ns1", + EnrichedEvent: fftypes.EnrichedEvent{ + Event: fftypes.Event{ + ID: fftypes.NewUUID(), + Type: fftypes.EventTypeIdentityConfirmed, + Reference: requestID, + Namespace: "ns1", + }, }, }) }() diff --git a/internal/tokens/fftokens/fftokens.go b/internal/tokens/fftokens/fftokens.go index 4d637bfb7a..dcf37036e7 100644 --- a/internal/tokens/fftokens/fftokens.go +++ b/internal/tokens/fftokens/fftokens.go @@ -59,9 +59,10 @@ const ( ) type tokenData struct { - TX *fftypes.UUID `json:"tx,omitempty"` - Message *fftypes.UUID `json:"message,omitempty"` - MessageHash *fftypes.Bytes32 `json:"messageHash,omitempty"` + TX *fftypes.UUID `json:"tx,omitempty"` + TXType fftypes.TransactionType `json:"txtype,omitempty"` + Message *fftypes.UUID `json:"message,omitempty"` + MessageHash *fftypes.Bytes32 `json:"messageHash,omitempty"` } type createPool struct { @@ -190,11 +191,13 @@ func (ft *FFTokens) handleTokenPoolCreate(ctx context.Context, data fftypes.JSON tokenType := data.GetString("type") protocolID := data.GetString("poolId") standard := data.GetString("standard") // optional + symbol := data.GetString("symbol") // optional rawOutput := data.GetObject("rawOutput") // optional tx := data.GetObject("transaction") txHash := tx.GetString("transactionHash") // optional location := data.GetString("location") signature := data.GetString("signature") + info := data.GetObject("info") // optional timestampStr := data.GetString("timestamp") timestamp, err := fftypes.ParseTimeString(timestampStr) @@ -217,12 +220,22 @@ func (ft *FFTokens) handleTokenPoolCreate(ctx context.Context, data fftypes.JSON poolData = tokenData{} } + txType := poolData.TXType + if txType == "" { + txType = fftypes.TransactionTypeTokenPool + } + pool := &tokens.TokenPool{ - Type: fftypes.FFEnum(tokenType), - ProtocolID: protocolID, - TransactionID: poolData.TX, - Connector: ft.configuredName, - Standard: standard, + Type: fftypes.FFEnum(tokenType), + ProtocolID: protocolID, + TX: fftypes.TransactionRef{ + ID: poolData.TX, + Type: txType, + }, + Connector: ft.configuredName, + Standard: standard, + Symbol: symbol, + Info: info, Event: blockchain.Event{ BlockchainTXID: txHash, Source: ft.Name() + ":" + ft.configuredName, @@ -297,6 +310,11 @@ func (ft *FFTokens) handleTokenTransfer(ctx context.Context, t fftypes.TokenTran return nil // move on } + txType := transferData.TXType + if txType == "" { + txType = fftypes.TransactionTypeTokenTransfer + } + transfer := &tokens.TokenTransfer{ PoolProtocolID: poolProtocolID, TokenTransfer: fftypes.TokenTransfer{ @@ -313,7 +331,7 @@ func (ft *FFTokens) handleTokenTransfer(ctx context.Context, t fftypes.TokenTran MessageHash: transferData.MessageHash, TX: fftypes.TransactionRef{ ID: transferData.TX, - Type: fftypes.TransactionTypeTokenTransfer, + Type: txType, }, }, Event: blockchain.Event{ @@ -369,6 +387,11 @@ func (ft *FFTokens) handleTokenApproval(ctx context.Context, data fftypes.JSONOb transferData = tokenData{} } + txType := transferData.TXType + if txType == "" { + txType = fftypes.TransactionTypeTokenApproval + } + approval := &tokens.TokenApproval{ PoolProtocolID: poolProtocolID, TokenApproval: fftypes.TokenApproval{ @@ -379,7 +402,7 @@ func (ft *FFTokens) handleTokenApproval(ctx context.Context, data fftypes.JSONOb ProtocolID: eventProtocolID, TX: fftypes.TransactionRef{ ID: transferData.TX, - Type: fftypes.TransactionTypeTokenApproval, + Type: txType, }, }, Event: blockchain.Event{ @@ -458,7 +481,8 @@ func (ft *FFTokens) eventLoop() { func (ft *FFTokens) CreateTokenPool(ctx context.Context, opID *fftypes.UUID, pool *fftypes.TokenPool) (complete bool, err error) { data, _ := json.Marshal(tokenData{ - TX: pool.TX.ID, + TX: pool.TX.ID, + TXType: pool.TX.Type, }) res, err := ft.client.R().SetContext(ctx). SetBody(&createPool{ @@ -485,12 +509,12 @@ func (ft *FFTokens) CreateTokenPool(ctx context.Context, opID *fftypes.UUID, poo return false, nil } -func (ft *FFTokens) ActivateTokenPool(ctx context.Context, opID *fftypes.UUID, pool *fftypes.TokenPool, event *fftypes.BlockchainEvent) (complete bool, err error) { +func (ft *FFTokens) ActivateTokenPool(ctx context.Context, opID *fftypes.UUID, pool *fftypes.TokenPool, blockchainInfo fftypes.JSONObject) (complete bool, err error) { res, err := ft.client.R().SetContext(ctx). SetBody(&activatePool{ RequestID: opID.String(), PoolID: pool.ProtocolID, - Transaction: event.Info, + Transaction: blockchainInfo, }). Post("/api/v1/activatepool") if err != nil || !res.IsSuccess() { @@ -510,6 +534,7 @@ func (ft *FFTokens) ActivateTokenPool(ctx context.Context, opID *fftypes.UUID, p func (ft *FFTokens) MintTokens(ctx context.Context, opID *fftypes.UUID, poolProtocolID string, mint *fftypes.TokenTransfer) error { data, _ := json.Marshal(tokenData{ TX: mint.TX.ID, + TXType: mint.TX.Type, Message: mint.Message, MessageHash: mint.MessageHash, }) @@ -533,6 +558,7 @@ func (ft *FFTokens) MintTokens(ctx context.Context, opID *fftypes.UUID, poolProt func (ft *FFTokens) BurnTokens(ctx context.Context, opID *fftypes.UUID, poolProtocolID string, burn *fftypes.TokenTransfer) error { data, _ := json.Marshal(tokenData{ TX: burn.TX.ID, + TXType: burn.TX.Type, Message: burn.Message, MessageHash: burn.MessageHash, }) @@ -556,6 +582,7 @@ func (ft *FFTokens) BurnTokens(ctx context.Context, opID *fftypes.UUID, poolProt func (ft *FFTokens) TransferTokens(ctx context.Context, opID *fftypes.UUID, poolProtocolID string, transfer *fftypes.TokenTransfer) error { data, _ := json.Marshal(tokenData{ TX: transfer.TX.ID, + TXType: transfer.TX.Type, Message: transfer.Message, MessageHash: transfer.MessageHash, }) @@ -579,7 +606,8 @@ func (ft *FFTokens) TransferTokens(ctx context.Context, opID *fftypes.UUID, pool func (ft *FFTokens) TokensApproval(ctx context.Context, opID *fftypes.UUID, poolProtocolID string, approval *fftypes.TokenApproval) error { data, _ := json.Marshal(tokenData{ - TX: approval.TX.ID, + TX: approval.TX.ID, + TXType: approval.TX.Type, }) res, err := ft.client.R().SetContext(ctx). SetBody(&tokenApproval{ diff --git a/internal/tokens/fftokens/fftokens_test.go b/internal/tokens/fftokens/fftokens_test.go index 3de499bcb4..fc9dd00f3b 100644 --- a/internal/tokens/fftokens/fftokens_test.go +++ b/internal/tokens/fftokens/fftokens_test.go @@ -128,7 +128,10 @@ func TestCreateTokenPool(t *testing.T) { "config": map[string]interface{}{ "foo": "bar", }, - "data": `{"tx":"` + pool.TX.ID.String() + `"}`, + "data": fftypes.JSONObject{ + "tx": pool.TX.ID.String(), + "txtype": fftypes.TransactionTypeTokenPool.String(), + }.String(), "name": "new-pool", "symbol": "symbol", }, body) @@ -213,7 +216,7 @@ func TestCreateTokenPoolSynchronous(t *testing.T) { mcb := h.callbacks.(*tokenmocks.Callbacks) mcb.On("TokenPoolCreated", h, mock.MatchedBy(func(p *tokens.TokenPool) bool { - return p.ProtocolID == "F1" && p.Type == fftypes.TokenTypeFungible && *p.TransactionID == *pool.TX.ID && p.Event.ProtocolID == "000000000010/000020/000030/000040" + return p.ProtocolID == "F1" && p.Type == fftypes.TokenTypeFungible && *p.TX.ID == *pool.TX.ID && p.Event.ProtocolID == "000000000010/000020/000030/000040" })).Return(nil) complete, err := h.CreateTokenPool(context.Background(), opID, pool) @@ -274,9 +277,6 @@ func TestActivateTokenPool(t *testing.T) { txInfo := map[string]interface{}{ "foo": "bar", } - ev := &fftypes.BlockchainEvent{ - Info: txInfo, - } httpmock.RegisterResponder("POST", fmt.Sprintf("%s/api/v1/activatepool", httpURL), func(req *http.Request) (*http.Response, error) { @@ -299,7 +299,7 @@ func TestActivateTokenPool(t *testing.T) { return res, nil }) - complete, err := h.ActivateTokenPool(context.Background(), opID, pool, ev) + complete, err := h.ActivateTokenPool(context.Background(), opID, pool, txInfo) assert.False(t, complete) assert.NoError(t, err) } @@ -315,12 +315,11 @@ func TestActivateTokenPoolError(t *testing.T) { Type: fftypes.TransactionTypeTokenPool, }, } - ev := &fftypes.BlockchainEvent{} httpmock.RegisterResponder("POST", fmt.Sprintf("%s/api/v1/activatepool", httpURL), httpmock.NewJsonResponderOrPanic(500, fftypes.JSONObject{})) - complete, err := h.ActivateTokenPool(context.Background(), fftypes.NewUUID(), pool, ev) + complete, err := h.ActivateTokenPool(context.Background(), fftypes.NewUUID(), pool, nil) assert.False(t, complete) assert.Regexp(t, "FF10274", err) } @@ -336,9 +335,6 @@ func TestActivateTokenPoolSynchronous(t *testing.T) { txInfo := map[string]interface{}{ "foo": "bar", } - ev := &fftypes.BlockchainEvent{ - Info: txInfo, - } httpmock.RegisterResponder("POST", fmt.Sprintf("%s/api/v1/activatepool", httpURL), func(req *http.Request) (*http.Response, error) { @@ -367,10 +363,10 @@ func TestActivateTokenPoolSynchronous(t *testing.T) { mcb := h.callbacks.(*tokenmocks.Callbacks) mcb.On("TokenPoolCreated", h, mock.MatchedBy(func(p *tokens.TokenPool) bool { - return p.ProtocolID == "F1" && p.Type == fftypes.TokenTypeFungible && p.TransactionID == nil && p.Event.ProtocolID == "" + return p.ProtocolID == "F1" && p.Type == fftypes.TokenTypeFungible && p.TX.ID == nil && p.Event.ProtocolID == "" })).Return(nil) - complete, err := h.ActivateTokenPool(context.Background(), opID, pool, ev) + complete, err := h.ActivateTokenPool(context.Background(), opID, pool, txInfo) assert.True(t, complete) assert.NoError(t, err) } @@ -386,9 +382,6 @@ func TestActivateTokenPoolSynchronousBadResponse(t *testing.T) { txInfo := map[string]interface{}{ "foo": "bar", } - ev := &fftypes.BlockchainEvent{ - Info: txInfo, - } httpmock.RegisterResponder("POST", fmt.Sprintf("%s/api/v1/activatepool", httpURL), func(req *http.Request) (*http.Response, error) { @@ -413,10 +406,10 @@ func TestActivateTokenPoolSynchronousBadResponse(t *testing.T) { mcb := h.callbacks.(*tokenmocks.Callbacks) mcb.On("TokenPoolCreated", h, mock.MatchedBy(func(p *tokens.TokenPool) bool { - return p.ProtocolID == "F1" && p.Type == fftypes.TokenTypeFungible && p.TransactionID == nil && p.Event.ProtocolID == "" + return p.ProtocolID == "F1" && p.Type == fftypes.TokenTypeFungible && p.TX.ID == nil && p.Event.ProtocolID == "" })).Return(nil) - complete, err := h.ActivateTokenPool(context.Background(), opID, pool, ev) + complete, err := h.ActivateTokenPool(context.Background(), opID, pool, txInfo) assert.False(t, complete) assert.Regexp(t, "FF10151", err) } @@ -448,7 +441,10 @@ func TestMintTokens(t *testing.T) { "amount": "10", "signer": "0x123", "requestId": opID.String(), - "data": `{"tx":"` + mint.TX.ID.String() + `"}`, + "data": fftypes.JSONObject{ + "tx": mint.TX.ID.String(), + "txtype": fftypes.TransactionTypeTokenTransfer.String(), + }.String(), }, body) res := &http.Response{ @@ -498,7 +494,10 @@ func TestTokenApproval(t *testing.T) { "foo": "bar", }, "requestId": opID.String(), - "data": `{"tx":"` + approval.TX.ID.String() + `"}`, + "data": fftypes.JSONObject{ + "tx": approval.TX.ID.String(), + "txtype": fftypes.TransactionTypeTokenApproval.String(), + }.String(), }, body) res := &http.Response{ @@ -570,7 +569,10 @@ func TestBurnTokens(t *testing.T) { "amount": "10", "signer": "0x123", "requestId": opID.String(), - "data": `{"tx":"` + burn.TX.ID.String() + `"}`, + "data": fftypes.JSONObject{ + "tx": burn.TX.ID.String(), + "txtype": fftypes.TransactionTypeTokenTransfer.String(), + }.String(), }, body) res := &http.Response{ @@ -631,7 +633,10 @@ func TestTransferTokens(t *testing.T) { "amount": "10", "signer": "0x123", "requestId": opID.String(), - "data": `{"tx":"` + transfer.TX.ID.String() + `"}`, + "data": fftypes.JSONObject{ + "tx": transfer.TX.ID.String(), + "txtype": fftypes.TransactionTypeTokenTransfer.String(), + }.String(), }, body) res := &http.Response{ @@ -724,7 +729,7 @@ func TestEvents(t *testing.T) { // token-pool: invalid uuid (success) mcb.On("TokenPoolCreated", h, mock.MatchedBy(func(p *tokens.TokenPool) bool { - return p.ProtocolID == "F1" && p.Type == fftypes.TokenTypeFungible && p.TransactionID == nil && p.Event.ProtocolID == "000000000010/000020/000030/000040" + return p.ProtocolID == "F1" && p.Type == fftypes.TokenTypeFungible && p.TX.ID == nil && p.Event.ProtocolID == "000000000010/000020/000030/000040" })).Return(nil).Once() fromServer <- fftypes.JSONObject{ "id": "7", @@ -745,7 +750,7 @@ func TestEvents(t *testing.T) { // token-pool: success mcb.On("TokenPoolCreated", h, mock.MatchedBy(func(p *tokens.TokenPool) bool { - return p.ProtocolID == "F1" && p.Type == fftypes.TokenTypeFungible && txID.Equals(p.TransactionID) && p.Event.ProtocolID == "000000000010/000020/000030/000040" + return p.ProtocolID == "F1" && p.Type == fftypes.TokenTypeFungible && txID.Equals(p.TX.ID) && p.Event.ProtocolID == "000000000010/000020/000030/000040" })).Return(nil).Once() fromServer <- fftypes.JSONObject{ "id": "8", diff --git a/internal/txcommon/event_enrich.go b/internal/txcommon/event_enrich.go new file mode 100644 index 0000000000..145acbcdd2 --- /dev/null +++ b/internal/txcommon/event_enrich.go @@ -0,0 +1,51 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package txcommon + +import ( + "context" + + "github.com/hyperledger/firefly/pkg/fftypes" +) + +func (t *transactionHelper) EnrichEvent(ctx context.Context, event *fftypes.Event) (*fftypes.EnrichedEvent, error) { + e := &fftypes.EnrichedEvent{ + Event: *event, + } + + switch event.Type { + case fftypes.EventTypeTransactionSubmitted: + tx, err := t.GetTransactionByIDCached(ctx, event.Reference) + if err != nil { + return nil, err + } + e.Transaction = tx + case fftypes.EventTypeMessageConfirmed, fftypes.EventTypeMessageRejected: + msg, _, _, err := t.data.GetMessageWithDataCached(ctx, event.Reference) + if err != nil { + return nil, err + } + e.Message = msg + case fftypes.EventTypeBlockchainEventReceived: + be, err := t.database.GetBlockchainEventByID(ctx, event.Reference) + if err != nil { + return nil, err + } + e.BlockchainEvent = be + } + return e, nil +} diff --git a/internal/txcommon/event_enrich_test.go b/internal/txcommon/event_enrich_test.go new file mode 100644 index 0000000000..d0ee00f968 --- /dev/null +++ b/internal/txcommon/event_enrich_test.go @@ -0,0 +1,202 @@ +// Copyright © 2022 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package txcommon + +import ( + "context" + "fmt" + "testing" + + "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" + "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestEnrichMessageConfirmed(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) + ctx := context.Background() + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + + // Setup enrichment + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: ref1}, + }, nil, true, nil) + + event := &fftypes.Event{ + ID: ev1, + Type: fftypes.EventTypeMessageConfirmed, + Reference: ref1, + } + + enriched, err := txHelper.EnrichEvent(ctx, event) + assert.NoError(t, err) + assert.Equal(t, ref1, enriched.Message.Header.ID) +} + +func TestEnrichMessageFail(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) + ctx := context.Background() + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + + // Setup enrichment + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(nil, nil, false, fmt.Errorf("pop")) + + event := &fftypes.Event{ + ID: ev1, + Type: fftypes.EventTypeMessageConfirmed, + Reference: ref1, + } + + _, err := txHelper.EnrichEvent(ctx, event) + assert.EqualError(t, err, "pop") +} + +func TestEnrichMessageRejected(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) + ctx := context.Background() + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + + // Setup enrichment + mdm.On("GetMessageWithDataCached", mock.Anything, ref1).Return(&fftypes.Message{ + Header: fftypes.MessageHeader{ID: ref1}, + }, nil, true, nil) + + event := &fftypes.Event{ + ID: ev1, + Type: fftypes.EventTypeMessageRejected, + Reference: ref1, + } + + enriched, err := txHelper.EnrichEvent(ctx, event) + assert.NoError(t, err) + assert.Equal(t, ref1, enriched.Message.Header.ID) +} + +func TestEnrichTxSubmitted(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) + ctx := context.Background() + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + + // Setup enrichment + mdi.On("GetTransactionByID", mock.Anything, ref1).Return(&fftypes.Transaction{ + ID: ref1, + }, nil) + + event := &fftypes.Event{ + ID: ev1, + Type: fftypes.EventTypeTransactionSubmitted, + Reference: ref1, + } + + enriched, err := txHelper.EnrichEvent(ctx, event) + assert.NoError(t, err) + assert.Equal(t, ref1, enriched.Transaction.ID) +} + +func TestEnrichTxFail(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) + ctx := context.Background() + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + + // Setup enrichment + mdi.On("GetTransactionByID", mock.Anything, ref1).Return(nil, fmt.Errorf("pop")) + + event := &fftypes.Event{ + ID: ev1, + Type: fftypes.EventTypeTransactionSubmitted, + Reference: ref1, + } + + _, err := txHelper.EnrichEvent(ctx, event) + assert.EqualError(t, err, "pop") +} + +func TestEnrichBlockchainEventSubmitted(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) + ctx := context.Background() + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + + // Setup enrichment + mdi.On("GetBlockchainEventByID", mock.Anything, ref1).Return(&fftypes.BlockchainEvent{ + ID: ref1, + }, nil) + + event := &fftypes.Event{ + ID: ev1, + Type: fftypes.EventTypeBlockchainEventReceived, + Reference: ref1, + } + + enriched, err := txHelper.EnrichEvent(ctx, event) + assert.NoError(t, err) + assert.Equal(t, ref1, enriched.BlockchainEvent.ID) +} + +func TestEnrichBlockchainEventFail(t *testing.T) { + mdi := &databasemocks.Plugin{} + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) + ctx := context.Background() + + // Setup the IDs + ref1 := fftypes.NewUUID() + ev1 := fftypes.NewUUID() + + // Setup enrichment + mdi.On("GetBlockchainEventByID", mock.Anything, ref1).Return(nil, fmt.Errorf("pop")) + + event := &fftypes.Event{ + ID: ev1, + Type: fftypes.EventTypeBlockchainEventReceived, + Reference: ref1, + } + + _, err := txHelper.EnrichEvent(ctx, event) + assert.EqualError(t, err, "pop") +} diff --git a/internal/txcommon/token_inputs.go b/internal/txcommon/token_inputs.go index d58528ccba..a63ed202d2 100644 --- a/internal/txcommon/token_inputs.go +++ b/internal/txcommon/token_inputs.go @@ -19,59 +19,56 @@ package txcommon import ( "context" "encoding/json" - "fmt" "github.com/hyperledger/firefly/internal/i18n" "github.com/hyperledger/firefly/pkg/fftypes" ) -func AddTokenPoolCreateInputs(op *fftypes.Operation, pool *fftypes.TokenPool) { - op.Input = fftypes.JSONObject{ - "id": pool.ID.String(), - "namespace": pool.Namespace, - "name": pool.Name, - "symbol": pool.Symbol, - "config": pool.Config, +func AddTokenPoolCreateInputs(op *fftypes.Operation, pool *fftypes.TokenPool) (err error) { + var poolJSON []byte + if poolJSON, err = json.Marshal(pool); err == nil { + err = json.Unmarshal(poolJSON, &op.Input) } + return err } -func RetrieveTokenPoolCreateInputs(ctx context.Context, op *fftypes.Operation, pool *fftypes.TokenPool) (err error) { - input := &op.Input - pool.ID, err = fftypes.ParseUUID(ctx, input.GetString("id")) - if err != nil { - return err +func RetrieveTokenPoolCreateInputs(ctx context.Context, op *fftypes.Operation) (*fftypes.TokenPool, error) { + var pool fftypes.TokenPool + s := op.Input.String() + if err := json.Unmarshal([]byte(s), &pool); err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, s) } - pool.Namespace = input.GetString("namespace") - pool.Name = input.GetString("name") - if pool.Namespace == "" || pool.Name == "" { - return fmt.Errorf("namespace or name missing from inputs") + return &pool, nil +} + +func AddTokenPoolActivateInputs(op *fftypes.Operation, poolID *fftypes.UUID, blockchainInfo fftypes.JSONObject) { + op.Input = fftypes.JSONObject{ + "id": poolID.String(), + "info": blockchainInfo, } - pool.Symbol = input.GetString("symbol") - pool.Config = input.GetObject("config") - return nil +} + +func RetrieveTokenPoolActivateInputs(ctx context.Context, op *fftypes.Operation) (*fftypes.UUID, fftypes.JSONObject, error) { + id, err := fftypes.ParseUUID(ctx, op.Input.GetString("id")) + info := op.Input.GetObject("info") + return id, info, err } func AddTokenTransferInputs(op *fftypes.Operation, transfer *fftypes.TokenTransfer) (err error) { - var j []byte - if j, err = json.Marshal(transfer); err == nil { - err = json.Unmarshal(j, &op.Input) + var transferJSON []byte + if transferJSON, err = json.Marshal(transfer); err == nil { + err = json.Unmarshal(transferJSON, &op.Input) } return err } -func RetrieveTokenTransferInputs(ctx context.Context, op *fftypes.Operation, transfer *fftypes.TokenTransfer) (err error) { - var t fftypes.TokenTransfer +func RetrieveTokenTransferInputs(ctx context.Context, op *fftypes.Operation) (*fftypes.TokenTransfer, error) { + var transfer fftypes.TokenTransfer s := op.Input.String() - if err = json.Unmarshal([]byte(s), &t); err != nil { - return i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, s) - } - if t.LocalID == nil { - return i18n.NewError(ctx, i18n.MsgInvalidUUID) + if err := json.Unmarshal([]byte(s), &transfer); err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, s) } - // The LocalID is the only thing that needs to be read back out when processing an event - // (everything else should be unpacked from the event) - transfer.LocalID = t.LocalID - return nil + return &transfer, nil } func AddTokenApprovalInputs(op *fftypes.Operation, approval *fftypes.TokenApproval) (err error) { @@ -82,17 +79,11 @@ func AddTokenApprovalInputs(op *fftypes.Operation, approval *fftypes.TokenApprov return err } -func RetrieveTokenApprovalInputs(ctx context.Context, op *fftypes.Operation, approval *fftypes.TokenApproval) (err error) { - var a fftypes.TokenApproval +func RetrieveTokenApprovalInputs(ctx context.Context, op *fftypes.Operation) (approval *fftypes.TokenApproval, err error) { + var approve fftypes.TokenApproval s := op.Input.String() - if err = json.Unmarshal([]byte(s), &a); err != nil { - return i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, s) - } - if a.LocalID == nil { - return i18n.NewError(ctx, i18n.MsgInvalidUUID) + if err = json.Unmarshal([]byte(s), &approve); err != nil { + return nil, i18n.WrapError(ctx, err, i18n.MsgJSONObjectParseFailed, s) } - // The LocalID is the only thing that needs to be read back out when processing an event - // (everything else should be unpacked from the event) - approval.LocalID = a.LocalID - return nil + return &approve, nil } diff --git a/internal/txcommon/token_inputs_test.go b/internal/txcommon/token_inputs_test.go index 65633b965a..f8bd8b4214 100644 --- a/internal/txcommon/token_inputs_test.go +++ b/internal/txcommon/token_inputs_test.go @@ -59,9 +59,8 @@ func TestRetrieveTokenPoolCreateInputs(t *testing.T) { "config": config, }, } - pool := &fftypes.TokenPool{} - err := RetrieveTokenPoolCreateInputs(context.Background(), op, pool) + pool, err := RetrieveTokenPoolCreateInputs(context.Background(), op) assert.NoError(t, err) assert.Equal(t, *id, *pool.ID) assert.Equal(t, "ns1", pool.Namespace) @@ -76,23 +75,39 @@ func TestRetrieveTokenPoolCreateInputsBadID(t *testing.T) { "id": "bad", }, } - pool := &fftypes.TokenPool{} - err := RetrieveTokenPoolCreateInputs(context.Background(), op, pool) - assert.Regexp(t, "FF10142", err) + _, err := RetrieveTokenPoolCreateInputs(context.Background(), op) + assert.Regexp(t, "FF10151", err) +} + +func TestAddTokenPoolActivateInputs(t *testing.T) { + op := &fftypes.Operation{} + poolID := fftypes.NewUUID() + info := fftypes.JSONObject{ + "some": "info", + } + + AddTokenPoolActivateInputs(op, poolID, info) + assert.Equal(t, poolID.String(), op.Input.GetString("id")) + assert.Equal(t, info, op.Input.GetObject("info")) } -func TestRetrieveTokenPoolCreateInputsNoName(t *testing.T) { +func TestRetrieveTokenPoolActivateInputs(t *testing.T) { + id := fftypes.NewUUID() + info := fftypes.JSONObject{ + "foo": "bar", + } op := &fftypes.Operation{ Input: fftypes.JSONObject{ - "id": fftypes.NewUUID().String(), - "namespace": "ns1", + "id": id.String(), + "info": info, }, } - pool := &fftypes.TokenPool{} - err := RetrieveTokenPoolCreateInputs(context.Background(), op, pool) - assert.Error(t, err) + poolID, newInfo, err := RetrieveTokenPoolActivateInputs(context.Background(), op) + assert.NoError(t, err) + assert.Equal(t, *id, *poolID) + assert.Equal(t, info, newInfo) } func TestAddTokenTransferInputs(t *testing.T) { @@ -127,12 +142,11 @@ func TestRetrieveTokenTransferInputs(t *testing.T) { "localId": id.String(), }, } - transfer := &fftypes.TokenTransfer{Amount: *fftypes.NewFFBigInt(2)} - err := RetrieveTokenTransferInputs(context.Background(), op, transfer) + transfer, err := RetrieveTokenTransferInputs(context.Background(), op) assert.NoError(t, err) assert.Equal(t, *id, *transfer.LocalID) - assert.Equal(t, int64(2), transfer.Amount.Int().Int64()) + assert.Equal(t, int64(1), transfer.Amount.Int().Int64()) } func TestRetrieveTokenTransferInputsBadID(t *testing.T) { @@ -141,22 +155,11 @@ func TestRetrieveTokenTransferInputsBadID(t *testing.T) { "localId": "bad", }, } - transfer := &fftypes.TokenTransfer{} - err := RetrieveTokenTransferInputs(context.Background(), op, transfer) + _, err := RetrieveTokenTransferInputs(context.Background(), op) assert.Regexp(t, "FF10151", err) } -func TestRetrieveTokenTransferInputsMissingID(t *testing.T) { - op := &fftypes.Operation{ - Input: fftypes.JSONObject{}, - } - transfer := &fftypes.TokenTransfer{} - - err := RetrieveTokenTransferInputs(context.Background(), op, transfer) - assert.Regexp(t, "FF10142", err) -} - func TestAddTokenApprovalInputs(t *testing.T) { op := &fftypes.Operation{} approval := &fftypes.TokenApproval{ @@ -187,13 +190,13 @@ func TestRetrieveTokenApprovalInputs(t *testing.T) { id := fftypes.NewUUID() op := &fftypes.Operation{ Input: fftypes.JSONObject{ - "amount": "1", - "localId": id.String(), + "amount": "1", + "localId": id.String(), + "approved": true, }, } - approval := &fftypes.TokenApproval{Approved: true} - err := RetrieveTokenApprovalInputs(context.Background(), op, approval) + approval, err := RetrieveTokenApprovalInputs(context.Background(), op) assert.NoError(t, err) assert.Equal(t, *id, *approval.LocalID) assert.Equal(t, true, approval.Approved) @@ -205,18 +208,7 @@ func TestRetrieveTokenApprovalInputsBadID(t *testing.T) { "localId": "bad", }, } - approval := &fftypes.TokenApproval{} - err := RetrieveTokenApprovalInputs(context.Background(), op, approval) + _, err := RetrieveTokenApprovalInputs(context.Background(), op) assert.Regexp(t, "FF10151", err) } - -func TestRetrieveTokenApprovalInputsMissingID(t *testing.T) { - op := &fftypes.Operation{ - Input: fftypes.JSONObject{}, - } - approval := &fftypes.TokenApproval{} - - err := RetrieveTokenApprovalInputs(context.Background(), op, approval) - assert.Regexp(t, "FF10142", err) -} diff --git a/internal/txcommon/txcommon.go b/internal/txcommon/txcommon.go index 05f4fdb3f8..bf593364fb 100644 --- a/internal/txcommon/txcommon.go +++ b/internal/txcommon/txcommon.go @@ -19,28 +19,60 @@ package txcommon import ( "context" "strings" + "time" + "github.com/hyperledger/firefly/internal/config" + "github.com/hyperledger/firefly/internal/data" "github.com/hyperledger/firefly/internal/log" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" + "github.com/karlseguin/ccache" ) type Helper interface { SubmitNewTransaction(ctx context.Context, ns string, txType fftypes.TransactionType) (*fftypes.UUID, error) PersistTransaction(ctx context.Context, ns string, id *fftypes.UUID, txType fftypes.TransactionType, blockchainTXID string) (valid bool, err error) AddBlockchainTX(ctx context.Context, id *fftypes.UUID, blockchainTXID string) error - WriteOperationSuccess(ctx context.Context, opID *fftypes.UUID, output fftypes.JSONObject) - WriteOperationFailure(ctx context.Context, opID *fftypes.UUID, err error) + EnrichEvent(ctx context.Context, event *fftypes.Event) (*fftypes.EnrichedEvent, error) + GetTransactionByIDCached(ctx context.Context, id *fftypes.UUID) (*fftypes.Transaction, error) } type transactionHelper struct { - database database.Plugin + database database.Plugin + data data.Manager + transactionCache *ccache.Cache + transactionCacheTTL time.Duration } -func NewTransactionHelper(di database.Plugin) Helper { - return &transactionHelper{ +func NewTransactionHelper(di database.Plugin, dm data.Manager) Helper { + t := &transactionHelper{ database: di, + data: dm, } + t.transactionCache = ccache.New( + // We use a LRU cache with a size-aware max + ccache.Configure(). + MaxSize(config.GetByteSize(config.TransactionCacheSize)), + ) + return t +} + +func (t *transactionHelper) updateTransactionsCache(tx *fftypes.Transaction) { + t.transactionCache.Set(tx.ID.String(), tx, t.transactionCacheTTL) +} + +func (t *transactionHelper) GetTransactionByIDCached(ctx context.Context, id *fftypes.UUID) (*fftypes.Transaction, error) { + cached := t.transactionCache.Get(id.String()) + if cached != nil { + cached.Extend(t.transactionCacheTTL) + return cached.Value().(*fftypes.Transaction), nil + } + tx, err := t.database.GetTransactionByID(ctx, id) + if err != nil || tx == nil { + return tx, err + } + t.updateTransactionsCache(tx) + return tx, nil } // SubmitNewTransaction is called when there is a new transaction being submitted by the local node @@ -56,7 +88,7 @@ func (t *transactionHelper) SubmitNewTransaction(ctx context.Context, ns string, return nil, err } - if err := t.database.InsertEvent(ctx, fftypes.NewEvent(fftypes.EventTypeTransactionSubmitted, tx.Namespace, tx.ID, tx.ID)); err != nil { + if err := t.database.InsertEvent(ctx, fftypes.NewEvent(fftypes.EventTypeTransactionSubmitted, tx.Namespace, tx.ID, tx.ID, tx.Type.String())); err != nil { return nil, err } @@ -66,6 +98,7 @@ func (t *transactionHelper) SubmitNewTransaction(ctx context.Context, ns string, // PersistTransaction is called when we need to ensure a transaction exists in the DB, and optionally associate a new BlockchainTXID to it func (t *transactionHelper) PersistTransaction(ctx context.Context, ns string, id *fftypes.UUID, txType fftypes.TransactionType, blockchainTXID string) (valid bool, err error) { + // TODO: Consider if this can exploit caching tx, err := t.database.GetTransactionByID(ctx, id) if err != nil { return false, err @@ -92,15 +125,20 @@ func (t *transactionHelper) PersistTransaction(ctx context.Context, ns string, i return false, err } - } else if err = t.database.InsertTransaction(ctx, &fftypes.Transaction{ - ID: id, - Namespace: ns, - Type: txType, - BlockchainIDs: fftypes.NewFFStringArray(strings.ToLower(blockchainTXID)), - }); err != nil { - return false, err + } else { + tx = &fftypes.Transaction{ + ID: id, + Namespace: ns, + Type: txType, + BlockchainIDs: fftypes.NewFFStringArray(strings.ToLower(blockchainTXID)), + } + if err = t.database.InsertTransaction(ctx, tx); err != nil { + return false, err + } } + t.updateTransactionsCache(tx) + return true, nil } @@ -128,15 +166,3 @@ func (t *transactionHelper) AddBlockchainTX(ctx context.Context, id *fftypes.UUI return nil } - -func (t *transactionHelper) WriteOperationSuccess(ctx context.Context, opID *fftypes.UUID, output fftypes.JSONObject) { - if err2 := t.database.ResolveOperation(ctx, opID, fftypes.OpStatusSucceeded, "", output); err2 != nil { - log.L(ctx).Errorf("Failed to update operation %s: %s", opID, err2) - } -} - -func (t *transactionHelper) WriteOperationFailure(ctx context.Context, opID *fftypes.UUID, err error) { - if err2 := t.database.ResolveOperation(ctx, opID, fftypes.OpStatusFailed, err.Error(), nil); err2 != nil { - log.L(ctx).Errorf("Failed to update operation %s: %s", opID, err2) - } -} diff --git a/internal/txcommon/txcommon_test.go b/internal/txcommon/txcommon_test.go index e9982eb204..6fe1358a96 100644 --- a/internal/txcommon/txcommon_test.go +++ b/internal/txcommon/txcommon_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hyperledger/firefly/mocks/databasemocks" + "github.com/hyperledger/firefly/mocks/datamocks" "github.com/hyperledger/firefly/pkg/database" "github.com/hyperledger/firefly/pkg/fftypes" "github.com/stretchr/testify/assert" @@ -31,7 +32,8 @@ import ( func TestSubmitNewTransactionOK(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() var txidInserted *fftypes.UUID @@ -58,7 +60,8 @@ func TestSubmitNewTransactionOK(t *testing.T) { func TestSubmitNewTransactionFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() mdi.On("InsertTransaction", ctx, mock.Anything).Return(fmt.Errorf("pop")) @@ -73,7 +76,8 @@ func TestSubmitNewTransactionFail(t *testing.T) { func TestSubmitNewTransactionEventFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() mdi.On("InsertTransaction", ctx, mock.Anything).Return(nil) @@ -89,7 +93,8 @@ func TestSubmitNewTransactionEventFail(t *testing.T) { func TestPersistTransactionNew(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -113,7 +118,8 @@ func TestPersistTransactionNew(t *testing.T) { func TestPersistTransactionNewInserTFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -131,7 +137,8 @@ func TestPersistTransactionNewInserTFail(t *testing.T) { func TestPersistTransactionExistingAddBlockchainID(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -155,7 +162,8 @@ func TestPersistTransactionExistingAddBlockchainID(t *testing.T) { func TestPersistTransactionExistingUpdateFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -179,7 +187,8 @@ func TestPersistTransactionExistingUpdateFail(t *testing.T) { func TestPersistTransactionExistingNoChange(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -202,7 +211,8 @@ func TestPersistTransactionExistingNoChange(t *testing.T) { func TestPersistTransactionExistingNoBlockchainID(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -225,7 +235,8 @@ func TestPersistTransactionExistingNoBlockchainID(t *testing.T) { func TestPersistTransactionExistingLookupFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -242,7 +253,8 @@ func TestPersistTransactionExistingLookupFail(t *testing.T) { func TestPersistTransactionExistingMismatchNS(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -265,7 +277,8 @@ func TestPersistTransactionExistingMismatchNS(t *testing.T) { func TestPersistTransactionExistingMismatchType(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -288,7 +301,8 @@ func TestPersistTransactionExistingMismatchType(t *testing.T) { func TestAddBlockchainTX(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -318,7 +332,8 @@ func TestAddBlockchainTX(t *testing.T) { func TestAddBlockchainTXGetFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -334,7 +349,8 @@ func TestAddBlockchainTXGetFail(t *testing.T) { func TestAddBlockchainTXUpdateFail(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -357,7 +373,8 @@ func TestAddBlockchainTXUpdateFail(t *testing.T) { func TestAddBlockchainTXUnchanged(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() txid := fftypes.NewUUID() @@ -376,32 +393,29 @@ func TestAddBlockchainTXUnchanged(t *testing.T) { } -func TestWriteOperationSuccess(t *testing.T) { +func TestGetTransactionByIDCached(t *testing.T) { mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) + mdm := &datamocks.Manager{} + txHelper := NewTransactionHelper(mdi, mdm) ctx := context.Background() - opID := fftypes.NewUUID() - output := fftypes.JSONObject{"some": "info"} - mdi.On("ResolveOperation", ctx, opID, fftypes.OpStatusSucceeded, "", output).Return(fmt.Errorf("pop")) - - txHelper.WriteOperationSuccess(ctx, opID, output) - - mdi.AssertExpectations(t) - -} - -func TestWriteOperationFailure(t *testing.T) { - - mdi := &databasemocks.Plugin{} - txHelper := NewTransactionHelper(mdi) - ctx := context.Background() + txid := fftypes.NewUUID() + mdi.On("GetTransactionByID", ctx, txid).Return(&fftypes.Transaction{ + ID: txid, + Namespace: "ns1", + Type: fftypes.TransactionTypeContractInvoke, + Created: fftypes.Now(), + BlockchainIDs: fftypes.FFStringArray{"0x111111"}, + }, nil).Once() - opID := fftypes.NewUUID() - mdi.On("ResolveOperation", ctx, opID, fftypes.OpStatusFailed, "pop", mock.Anything).Return(fmt.Errorf("pop")) + tx, err := txHelper.GetTransactionByIDCached(ctx, txid) + assert.NoError(t, err) + assert.Equal(t, txid, tx.ID) - txHelper.WriteOperationFailure(ctx, opID, fmt.Errorf("pop")) + tx, err = txHelper.GetTransactionByIDCached(ctx, txid) + assert.NoError(t, err) + assert.Equal(t, txid, tx.ID) mdi.AssertExpectations(t) diff --git a/manifest.json b/manifest.json index 294e7dd26b..6c4f550b65 100644 --- a/manifest.json +++ b/manifest.json @@ -1,28 +1,28 @@ { "ethconnect": { "image": "ghcr.io/hyperledger/firefly-ethconnect", - "tag": "v3.1.3-20220301-21", - "sha": "62d84aeb9119303f635a02e2e43e7cf5baa1af8f0eb586da8a33a5d8d8ddf55c" + "tag": "v3.1.5", + "sha": "dc7f4f9a1eb1ba608a89f54876bb84324571bdd765ac735d2fafbb5a0862cd7c" }, "fabconnect": { "image": "ghcr.io/hyperledger/firefly-fabconnect", - "tag": "v0.9.10-20220221-16", - "sha": "0cc6125718cb8e467d448311036c1b4384aa1843c9d0f16e0dc3ac4fac736a8e" + "tag": "v0.9.13", + "sha": "d6de00e7377ba9a510f8f8caedcce32cdef471070008d28e7d166c4acb7d617f" }, "dataexchange-https": { "image": "ghcr.io/hyperledger/firefly-dataexchange-https", - "tag": "v0.10.3-20220209-6", - "sha": "a94776c7f89c27548149e080627fe3c55ad528835ecea0131b1c1ae96981398e" + "tag": "v0.10.5", + "sha": "70344c0f856be14304e4cba37c8c1620de3720262ad3d08de7ba46d633b83cbd" }, "tokens-erc1155": { "image": "ghcr.io/hyperledger/firefly-tokens-erc1155", - "tag": "v0.10.5-20220304-19", - "sha": "2e58cdd26f89d864c7a73c35293ffd4a9b6e74e7aa5b6b5ee37d305f9d6ef389" + "tag": "v0.10.6", + "sha": "26796e1364f749608b4034f6bc2f01c89f4f90fddb7dc51b058df2db3a67cf74" }, "tokens-erc20-erc721": { "image": "ghcr.io/hyperledger/firefly-tokens-erc20-erc721", - "tag": "v0.1.6-20220304-17", - "sha": "0d830b676c13f1c578b068be8e9512f1b2dbd6d8fb76d5d64cf34febe5d5042b" + "tag": "v0.2.0", + "sha": "a30a40ff4912931d06e008a354b3f9409defea98a5344303d663628d5e4a8629" }, "build": { "firefly-builder": { @@ -44,6 +44,6 @@ "release": "v0.5.0_8cb358c" }, "cli": { - "tag": "v0.0.44" + "tag": "v0.0.45" } } diff --git a/mocks/assetmocks/manager.go b/mocks/assetmocks/manager.go index 83ed285533..4c9c07db8c 100644 --- a/mocks/assetmocks/manager.go +++ b/mocks/assetmocks/manager.go @@ -18,13 +18,13 @@ type Manager struct { mock.Mock } -// ActivateTokenPool provides a mock function with given fields: ctx, pool, event -func (_m *Manager) ActivateTokenPool(ctx context.Context, pool *fftypes.TokenPool, event *fftypes.BlockchainEvent) error { - ret := _m.Called(ctx, pool, event) +// ActivateTokenPool provides a mock function with given fields: ctx, pool, blockchainInfo +func (_m *Manager) ActivateTokenPool(ctx context.Context, pool *fftypes.TokenPool, blockchainInfo fftypes.JSONObject) error { + ret := _m.Called(ctx, pool, blockchainInfo) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.TokenPool, *fftypes.BlockchainEvent) error); ok { - r0 = rf(ctx, pool, event) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.TokenPool, fftypes.JSONObject) error); ok { + r0 = rf(ctx, pool, blockchainInfo) } else { r0 = ret.Error(0) } @@ -385,6 +385,20 @@ func (_m *Manager) MintTokens(ctx context.Context, ns string, transfer *fftypes. return r0, r1 } +// Name provides a mock function with given fields: +func (_m *Manager) Name() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + // NewApproval provides a mock function with given fields: ns, approve func (_m *Manager) NewApproval(ns string, approve *fftypes.TokenApprovalInput) sysmessaging.MessageSender { ret := _m.Called(ns, approve) @@ -417,18 +431,57 @@ func (_m *Manager) NewTransfer(ns string, transfer *fftypes.TokenTransferInput) return r0 } -// Start provides a mock function with given fields: -func (_m *Manager) Start() error { - ret := _m.Called() +// PrepareOperation provides a mock function with given fields: ctx, op +func (_m *Manager) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + ret := _m.Called(ctx, op) - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + var r0 *fftypes.PreparedOperation + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Operation) *fftypes.PreparedOperation); ok { + r0 = rf(ctx, op) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.PreparedOperation) + } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Operation) error); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RunOperation provides a mock function with given fields: ctx, op +func (_m *Manager) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (fftypes.JSONObject, bool, error) { + ret := _m.Called(ctx, op) + + var r0 fftypes.JSONObject + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.PreparedOperation) fftypes.JSONObject); ok { + r0 = rf(ctx, op) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(fftypes.JSONObject) + } + } + + var r1 bool + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.PreparedOperation) bool); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Get(1).(bool) + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, *fftypes.PreparedOperation) error); ok { + r2 = rf(ctx, op) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // TokenApproval provides a mock function with given fields: ctx, ns, approval, waitConfirm @@ -476,8 +529,3 @@ func (_m *Manager) TransferTokens(ctx context.Context, ns string, transfer *ffty return r0, r1 } - -// WaitStop provides a mock function with given fields: -func (_m *Manager) WaitStop() { - _m.Called() -} diff --git a/mocks/batchpinmocks/submitter.go b/mocks/batchpinmocks/submitter.go index 2902e504e2..49818b55f1 100644 --- a/mocks/batchpinmocks/submitter.go +++ b/mocks/batchpinmocks/submitter.go @@ -14,12 +14,79 @@ type Submitter struct { mock.Mock } +// Name provides a mock function with given fields: +func (_m *Submitter) Name() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// PrepareOperation provides a mock function with given fields: ctx, op +func (_m *Submitter) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + ret := _m.Called(ctx, op) + + var r0 *fftypes.PreparedOperation + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Operation) *fftypes.PreparedOperation); ok { + r0 = rf(ctx, op) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.PreparedOperation) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Operation) error); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RunOperation provides a mock function with given fields: ctx, op +func (_m *Submitter) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (fftypes.JSONObject, bool, error) { + ret := _m.Called(ctx, op) + + var r0 fftypes.JSONObject + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.PreparedOperation) fftypes.JSONObject); ok { + r0 = rf(ctx, op) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(fftypes.JSONObject) + } + } + + var r1 bool + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.PreparedOperation) bool); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Get(1).(bool) + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, *fftypes.PreparedOperation) error); ok { + r2 = rf(ctx, op) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // SubmitPinnedBatch provides a mock function with given fields: ctx, batch, contexts -func (_m *Submitter) SubmitPinnedBatch(ctx context.Context, batch *fftypes.Batch, contexts []*fftypes.Bytes32) error { +func (_m *Submitter) SubmitPinnedBatch(ctx context.Context, batch *fftypes.BatchPersisted, contexts []*fftypes.Bytes32) error { ret := _m.Called(ctx, batch, contexts) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Batch, []*fftypes.Bytes32) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.BatchPersisted, []*fftypes.Bytes32) error); ok { r0 = rf(ctx, batch, contexts) } else { r0 = ret.Error(0) diff --git a/mocks/blockchainmocks/plugin.go b/mocks/blockchainmocks/plugin.go index d5a07e0a7c..01444d10d4 100644 --- a/mocks/blockchainmocks/plugin.go +++ b/mocks/blockchainmocks/plugin.go @@ -20,8 +20,8 @@ type Plugin struct { mock.Mock } -// AddSubscription provides a mock function with given fields: ctx, subscription -func (_m *Plugin) AddSubscription(ctx context.Context, subscription *fftypes.ContractListenerInput) error { +// AddContractListener provides a mock function with given fields: ctx, subscription +func (_m *Plugin) AddContractListener(ctx context.Context, subscription *fftypes.ContractListenerInput) error { ret := _m.Called(ctx, subscription) var r0 error @@ -50,8 +50,8 @@ func (_m *Plugin) Capabilities() *blockchain.Capabilities { return r0 } -// DeleteSubscription provides a mock function with given fields: ctx, subscription -func (_m *Plugin) DeleteSubscription(ctx context.Context, subscription *fftypes.ContractListener) error { +// DeleteContractListener provides a mock function with given fields: ctx, subscription +func (_m *Plugin) DeleteContractListener(ctx context.Context, subscription *fftypes.ContractListener) error { ret := _m.Called(ctx, subscription) var r0 error diff --git a/mocks/broadcastmocks/manager.go b/mocks/broadcastmocks/manager.go index e47d84e439..e3745a46b4 100644 --- a/mocks/broadcastmocks/manager.go +++ b/mocks/broadcastmocks/manager.go @@ -177,6 +177,20 @@ func (_m *Manager) BroadcastTokenPool(ctx context.Context, ns string, pool *ffty return r0, r1 } +// Name provides a mock function with given fields: +func (_m *Manager) Name() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + // NewBroadcast provides a mock function with given fields: ns, in func (_m *Manager) NewBroadcast(ns string, in *fftypes.MessageInOut) sysmessaging.MessageSender { ret := _m.Called(ns, in) @@ -193,6 +207,59 @@ func (_m *Manager) NewBroadcast(ns string, in *fftypes.MessageInOut) sysmessagin return r0 } +// PrepareOperation provides a mock function with given fields: ctx, op +func (_m *Manager) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + ret := _m.Called(ctx, op) + + var r0 *fftypes.PreparedOperation + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Operation) *fftypes.PreparedOperation); ok { + r0 = rf(ctx, op) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.PreparedOperation) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Operation) error); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RunOperation provides a mock function with given fields: ctx, op +func (_m *Manager) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (fftypes.JSONObject, bool, error) { + ret := _m.Called(ctx, op) + + var r0 fftypes.JSONObject + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.PreparedOperation) fftypes.JSONObject); ok { + r0 = rf(ctx, op) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(fftypes.JSONObject) + } + } + + var r1 bool + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.PreparedOperation) bool); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Get(1).(bool) + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, *fftypes.PreparedOperation) error); ok { + r2 = rf(ctx, op) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // Start provides a mock function with given fields: func (_m *Manager) Start() error { ret := _m.Called() diff --git a/mocks/contractmocks/manager.go b/mocks/contractmocks/manager.go index 9ff2279f39..57d263aaa3 100644 --- a/mocks/contractmocks/manager.go +++ b/mocks/contractmocks/manager.go @@ -380,6 +380,73 @@ func (_m *Manager) InvokeContractAPI(ctx context.Context, ns string, apiName str return r0, r1 } +// Name provides a mock function with given fields: +func (_m *Manager) Name() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// PrepareOperation provides a mock function with given fields: ctx, op +func (_m *Manager) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + ret := _m.Called(ctx, op) + + var r0 *fftypes.PreparedOperation + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Operation) *fftypes.PreparedOperation); ok { + r0 = rf(ctx, op) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.PreparedOperation) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Operation) error); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RunOperation provides a mock function with given fields: ctx, op +func (_m *Manager) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (fftypes.JSONObject, bool, error) { + ret := _m.Called(ctx, op) + + var r0 fftypes.JSONObject + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.PreparedOperation) fftypes.JSONObject); ok { + r0 = rf(ctx, op) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(fftypes.JSONObject) + } + } + + var r1 bool + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.PreparedOperation) bool); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Get(1).(bool) + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, *fftypes.PreparedOperation) error); ok { + r2 = rf(ctx, op) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // ValidateFFIAndSetPathnames provides a mock function with given fields: ctx, ffi func (_m *Manager) ValidateFFIAndSetPathnames(ctx context.Context, ffi *fftypes.FFI) error { ret := _m.Called(ctx, ffi) diff --git a/mocks/databasemocks/plugin.go b/mocks/databasemocks/plugin.go index c92af2989e..c6fd02acc2 100644 --- a/mocks/databasemocks/plugin.go +++ b/mocks/databasemocks/plugin.go @@ -162,15 +162,15 @@ func (_m *Plugin) DeleteSubscriptionByID(ctx context.Context, id *fftypes.UUID) } // GetBatchByID provides a mock function with given fields: ctx, id -func (_m *Plugin) GetBatchByID(ctx context.Context, id *fftypes.UUID) (*fftypes.Batch, error) { +func (_m *Plugin) GetBatchByID(ctx context.Context, id *fftypes.UUID) (*fftypes.BatchPersisted, error) { ret := _m.Called(ctx, id) - var r0 *fftypes.Batch - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) *fftypes.Batch); ok { + var r0 *fftypes.BatchPersisted + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) *fftypes.BatchPersisted); ok { r0 = rf(ctx, id) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*fftypes.Batch) + r0 = ret.Get(0).(*fftypes.BatchPersisted) } } @@ -185,15 +185,15 @@ func (_m *Plugin) GetBatchByID(ctx context.Context, id *fftypes.UUID) (*fftypes. } // GetBatches provides a mock function with given fields: ctx, filter -func (_m *Plugin) GetBatches(ctx context.Context, filter database.Filter) ([]*fftypes.Batch, *database.FilterResult, error) { +func (_m *Plugin) GetBatches(ctx context.Context, filter database.Filter) ([]*fftypes.BatchPersisted, *database.FilterResult, error) { ret := _m.Called(ctx, filter) - var r0 []*fftypes.Batch - if rf, ok := ret.Get(0).(func(context.Context, database.Filter) []*fftypes.Batch); ok { + var r0 []*fftypes.BatchPersisted + if rf, ok := ret.Get(0).(func(context.Context, database.Filter) []*fftypes.BatchPersisted); ok { r0 = rf(ctx, filter) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Batch) + r0 = ret.Get(0).([]*fftypes.BatchPersisted) } } @@ -584,15 +584,15 @@ func (_m *Plugin) GetContractListeners(ctx context.Context, filter database.Filt } // GetData provides a mock function with given fields: ctx, filter -func (_m *Plugin) GetData(ctx context.Context, filter database.Filter) ([]*fftypes.Data, *database.FilterResult, error) { +func (_m *Plugin) GetData(ctx context.Context, filter database.Filter) (fftypes.DataArray, *database.FilterResult, error) { ret := _m.Called(ctx, filter) - var r0 []*fftypes.Data - if rf, ok := ret.Get(0).(func(context.Context, database.Filter) []*fftypes.Data); ok { + var r0 fftypes.DataArray + if rf, ok := ret.Get(0).(func(context.Context, database.Filter) fftypes.DataArray); ok { r0 = rf(ctx, filter) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Data) + r0 = ret.Get(0).(fftypes.DataArray) } } @@ -1193,6 +1193,29 @@ func (_m *Plugin) GetMessageByID(ctx context.Context, id *fftypes.UUID) (*fftype return r0, r1 } +// GetMessageIDs provides a mock function with given fields: ctx, filter +func (_m *Plugin) GetMessageIDs(ctx context.Context, filter database.Filter) ([]*fftypes.IDAndSequence, error) { + ret := _m.Called(ctx, filter) + + var r0 []*fftypes.IDAndSequence + if rf, ok := ret.Get(0).(func(context.Context, database.Filter) []*fftypes.IDAndSequence); ok { + r0 = rf(ctx, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*fftypes.IDAndSequence) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, database.Filter) error); ok { + r1 = rf(ctx, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetMessages provides a mock function with given fields: ctx, filter func (_m *Plugin) GetMessages(ctx context.Context, filter database.Filter) ([]*fftypes.Message, *database.FilterResult, error) { ret := _m.Called(ctx, filter) @@ -2221,6 +2244,20 @@ func (_m *Plugin) InsertBlockchainEvent(ctx context.Context, event *fftypes.Bloc return r0 } +// InsertDataArray provides a mock function with given fields: ctx, data +func (_m *Plugin) InsertDataArray(ctx context.Context, data fftypes.DataArray) error { + ret := _m.Called(ctx, data) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, fftypes.DataArray) error); ok { + r0 = rf(ctx, data) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // InsertEvent provides a mock function with given fields: ctx, data func (_m *Plugin) InsertEvent(ctx context.Context, data *fftypes.Event) error { ret := _m.Called(ctx, data) @@ -2235,6 +2272,20 @@ func (_m *Plugin) InsertEvent(ctx context.Context, data *fftypes.Event) error { return r0 } +// InsertMessages provides a mock function with given fields: ctx, messages +func (_m *Plugin) InsertMessages(ctx context.Context, messages []*fftypes.Message) error { + ret := _m.Called(ctx, messages) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []*fftypes.Message) error); ok { + r0 = rf(ctx, messages) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // InsertNextPin provides a mock function with given fields: ctx, nextpin func (_m *Plugin) InsertNextPin(ctx context.Context, nextpin *fftypes.NextPin) error { ret := _m.Called(ctx, nextpin) @@ -2249,13 +2300,34 @@ func (_m *Plugin) InsertNextPin(ctx context.Context, nextpin *fftypes.NextPin) e return r0 } -// InsertOperation provides a mock function with given fields: ctx, operation -func (_m *Plugin) InsertOperation(ctx context.Context, operation *fftypes.Operation) error { - ret := _m.Called(ctx, operation) +// InsertOperation provides a mock function with given fields: ctx, operation, hooks +func (_m *Plugin) InsertOperation(ctx context.Context, operation *fftypes.Operation, hooks ...database.PostCompletionHook) error { + _va := make([]interface{}, len(hooks)) + for _i := range hooks { + _va[_i] = hooks[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, operation) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Operation) error); ok { - r0 = rf(ctx, operation) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Operation, ...database.PostCompletionHook) error); ok { + r0 = rf(ctx, operation, hooks...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertPins provides a mock function with given fields: ctx, pins +func (_m *Plugin) InsertPins(ctx context.Context, pins []*fftypes.Pin) error { + ret := _m.Called(ctx, pins) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []*fftypes.Pin) error); ok { + r0 = rf(ctx, pins) } else { r0 = ret.Error(0) } @@ -2473,6 +2545,20 @@ func (_m *Plugin) UpdateOffset(ctx context.Context, rowID int64, update database return r0 } +// UpdateOperation provides a mock function with given fields: ctx, id, update +func (_m *Plugin) UpdateOperation(ctx context.Context, id *fftypes.UUID, update database.Update) error { + ret := _m.Called(ctx, id, update) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, database.Update) error); ok { + r0 = rf(ctx, id, update) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // UpdatePins provides a mock function with given fields: ctx, filter, update func (_m *Plugin) UpdatePins(ctx context.Context, filter database.Filter, update database.Update) error { ret := _m.Called(ctx, filter, update) @@ -2544,11 +2630,11 @@ func (_m *Plugin) UpdateVerifier(ctx context.Context, hash *fftypes.Bytes32, upd } // UpsertBatch provides a mock function with given fields: ctx, data -func (_m *Plugin) UpsertBatch(ctx context.Context, data *fftypes.Batch) error { +func (_m *Plugin) UpsertBatch(ctx context.Context, data *fftypes.BatchPersisted) error { ret := _m.Called(ctx, data) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Batch) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.BatchPersisted) error); ok { r0 = rf(ctx, data) } else { r0 = ret.Error(0) diff --git a/mocks/dataexchangemocks/callbacks.go b/mocks/dataexchangemocks/callbacks.go index 0e598d1421..37aaf0ed7e 100644 --- a/mocks/dataexchangemocks/callbacks.go +++ b/mocks/dataexchangemocks/callbacks.go @@ -12,20 +12,6 @@ type Callbacks struct { mock.Mock } -// BLOBReceived provides a mock function with given fields: peerID, hash, size, payloadRef -func (_m *Callbacks) BLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error { - ret := _m.Called(peerID, hash, size, payloadRef) - - var r0 error - if rf, ok := ret.Get(0).(func(string, fftypes.Bytes32, int64, string) error); ok { - r0 = rf(peerID, hash, size, payloadRef) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // MessageReceived provides a mock function with given fields: peerID, data func (_m *Callbacks) MessageReceived(peerID string, data []byte) (string, error) { ret := _m.Called(peerID, data) @@ -47,6 +33,20 @@ func (_m *Callbacks) MessageReceived(peerID string, data []byte) (string, error) return r0, r1 } +// PrivateBLOBReceived provides a mock function with given fields: peerID, hash, size, payloadRef +func (_m *Callbacks) PrivateBLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error { + ret := _m.Called(peerID, hash, size, payloadRef) + + var r0 error + if rf, ok := ret.Get(0).(func(string, fftypes.Bytes32, int64, string) error); ok { + r0 = rf(peerID, hash, size, payloadRef) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // TransferResult provides a mock function with given fields: trackingID, status, info func (_m *Callbacks) TransferResult(trackingID string, status fftypes.OpStatus, info fftypes.TransportStatusUpdate) error { ret := _m.Called(trackingID, status, info) diff --git a/mocks/datamocks/manager.go b/mocks/datamocks/manager.go index b932143e37..dfa2b55890 100644 --- a/mocks/datamocks/manager.go +++ b/mocks/datamocks/manager.go @@ -5,6 +5,7 @@ package datamocks import ( context "context" + data "github.com/hyperledger/firefly/internal/data" fftypes "github.com/hyperledger/firefly/pkg/fftypes" io "io" @@ -31,29 +32,6 @@ func (_m *Manager) CheckDatatype(ctx context.Context, ns string, datatype *fftyp return r0 } -// CopyBlobPStoDX provides a mock function with given fields: ctx, _a1 -func (_m *Manager) CopyBlobPStoDX(ctx context.Context, _a1 *fftypes.Data) (*fftypes.Blob, error) { - ret := _m.Called(ctx, _a1) - - var r0 *fftypes.Blob - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Data) *fftypes.Blob); ok { - r0 = rf(ctx, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*fftypes.Blob) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Data) error); ok { - r1 = rf(ctx, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // DownloadBLOB provides a mock function with given fields: ctx, ns, dataID func (_m *Manager) DownloadBLOB(ctx context.Context, ns string, dataID string) (*fftypes.Blob, io.ReadCloser, error) { ret := _m.Called(ctx, ns, dataID) @@ -86,29 +64,36 @@ func (_m *Manager) DownloadBLOB(ctx context.Context, ns string, dataID string) ( return r0, r1, r2 } -// GetMessageData provides a mock function with given fields: ctx, msg, withValue -func (_m *Manager) GetMessageData(ctx context.Context, msg *fftypes.Message, withValue bool) ([]*fftypes.Data, bool, error) { - ret := _m.Called(ctx, msg, withValue) - - var r0 []*fftypes.Data - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Message, bool) []*fftypes.Data); ok { - r0 = rf(ctx, msg, withValue) +// GetMessageDataCached provides a mock function with given fields: ctx, msg, options +func (_m *Manager) GetMessageDataCached(ctx context.Context, msg *fftypes.Message, options ...data.CacheReadOption) (fftypes.DataArray, bool, error) { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, msg) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 fftypes.DataArray + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Message, ...data.CacheReadOption) fftypes.DataArray); ok { + r0 = rf(ctx, msg, options...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Data) + r0 = ret.Get(0).(fftypes.DataArray) } } var r1 bool - if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Message, bool) bool); ok { - r1 = rf(ctx, msg, withValue) + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Message, ...data.CacheReadOption) bool); ok { + r1 = rf(ctx, msg, options...) } else { r1 = ret.Get(1).(bool) } var r2 error - if rf, ok := ret.Get(2).(func(context.Context, *fftypes.Message, bool) error); ok { - r2 = rf(ctx, msg, withValue) + if rf, ok := ret.Get(2).(func(context.Context, *fftypes.Message, ...data.CacheReadOption) error); ok { + r2 = rf(ctx, msg, options...) } else { r2 = ret.Error(2) } @@ -116,54 +101,68 @@ func (_m *Manager) GetMessageData(ctx context.Context, msg *fftypes.Message, wit return r0, r1, r2 } -// ResolveInlineDataBroadcast provides a mock function with given fields: ctx, ns, inData -func (_m *Manager) ResolveInlineDataBroadcast(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, []*fftypes.DataAndBlob, error) { - ret := _m.Called(ctx, ns, inData) - - var r0 fftypes.DataRefs - if rf, ok := ret.Get(0).(func(context.Context, string, fftypes.InlineData) fftypes.DataRefs); ok { - r0 = rf(ctx, ns, inData) +// GetMessageWithDataCached provides a mock function with given fields: ctx, msgID, options +func (_m *Manager) GetMessageWithDataCached(ctx context.Context, msgID *fftypes.UUID, options ...data.CacheReadOption) (*fftypes.Message, fftypes.DataArray, bool, error) { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, msgID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *fftypes.Message + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) *fftypes.Message); ok { + r0 = rf(ctx, msgID, options...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(fftypes.DataRefs) + r0 = ret.Get(0).(*fftypes.Message) } } - var r1 []*fftypes.DataAndBlob - if rf, ok := ret.Get(1).(func(context.Context, string, fftypes.InlineData) []*fftypes.DataAndBlob); ok { - r1 = rf(ctx, ns, inData) + var r1 fftypes.DataArray + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) fftypes.DataArray); ok { + r1 = rf(ctx, msgID, options...) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).([]*fftypes.DataAndBlob) + r1 = ret.Get(1).(fftypes.DataArray) } } - var r2 error - if rf, ok := ret.Get(2).(func(context.Context, string, fftypes.InlineData) error); ok { - r2 = rf(ctx, ns, inData) + var r2 bool + if rf, ok := ret.Get(2).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) bool); ok { + r2 = rf(ctx, msgID, options...) } else { - r2 = ret.Error(2) + r2 = ret.Get(2).(bool) } - return r0, r1, r2 + var r3 error + if rf, ok := ret.Get(3).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) error); ok { + r3 = rf(ctx, msgID, options...) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 } -// ResolveInlineDataPrivate provides a mock function with given fields: ctx, ns, inData -func (_m *Manager) ResolveInlineDataPrivate(ctx context.Context, ns string, inData fftypes.InlineData) (fftypes.DataRefs, error) { - ret := _m.Called(ctx, ns, inData) +// HydrateBatch provides a mock function with given fields: ctx, persistedBatch +func (_m *Manager) HydrateBatch(ctx context.Context, persistedBatch *fftypes.BatchPersisted) (*fftypes.Batch, error) { + ret := _m.Called(ctx, persistedBatch) - var r0 fftypes.DataRefs - if rf, ok := ret.Get(0).(func(context.Context, string, fftypes.InlineData) fftypes.DataRefs); ok { - r0 = rf(ctx, ns, inData) + var r0 *fftypes.Batch + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.BatchPersisted) *fftypes.Batch); ok { + r0 = rf(ctx, persistedBatch) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(fftypes.DataRefs) + r0 = ret.Get(0).(*fftypes.Batch) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, fftypes.InlineData) error); ok { - r1 = rf(ctx, ns, inData) + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.BatchPersisted) error); ok { + r1 = rf(ctx, persistedBatch) } else { r1 = ret.Error(1) } @@ -171,6 +170,62 @@ func (_m *Manager) ResolveInlineDataPrivate(ctx context.Context, ns string, inDa return r0, r1 } +// PeekMessageCache provides a mock function with given fields: ctx, id, options +func (_m *Manager) PeekMessageCache(ctx context.Context, id *fftypes.UUID, options ...data.CacheReadOption) (*fftypes.Message, fftypes.DataArray) { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, id) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *fftypes.Message + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) *fftypes.Message); ok { + r0 = rf(ctx, id, options...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.Message) + } + } + + var r1 fftypes.DataArray + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID, ...data.CacheReadOption) fftypes.DataArray); ok { + r1 = rf(ctx, id, options...) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(fftypes.DataArray) + } + } + + return r0, r1 +} + +// ResolveInlineData provides a mock function with given fields: ctx, msg +func (_m *Manager) ResolveInlineData(ctx context.Context, msg *data.NewMessage) error { + ret := _m.Called(ctx, msg) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *data.NewMessage) error); ok { + r0 = rf(ctx, msg) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateMessageCache provides a mock function with given fields: msg, _a1 +func (_m *Manager) UpdateMessageCache(msg *fftypes.Message, _a1 fftypes.DataArray) { + _m.Called(msg, _a1) +} + +// UpdateMessageIfCached provides a mock function with given fields: ctx, msg +func (_m *Manager) UpdateMessageIfCached(ctx context.Context, msg *fftypes.Message) { + _m.Called(ctx, msg) +} + // UploadBLOB provides a mock function with given fields: ctx, ns, inData, blob, autoMeta func (_m *Manager) UploadBLOB(ctx context.Context, ns string, inData *fftypes.DataRefOrValue, blob *fftypes.Multipart, autoMeta bool) (*fftypes.Data, error) { ret := _m.Called(ctx, ns, inData, blob, autoMeta) @@ -218,18 +273,18 @@ func (_m *Manager) UploadJSON(ctx context.Context, ns string, inData *fftypes.Da } // ValidateAll provides a mock function with given fields: ctx, _a1 -func (_m *Manager) ValidateAll(ctx context.Context, _a1 []*fftypes.Data) (bool, error) { +func (_m *Manager) ValidateAll(ctx context.Context, _a1 fftypes.DataArray) (bool, error) { ret := _m.Called(ctx, _a1) var r0 bool - if rf, ok := ret.Get(0).(func(context.Context, []*fftypes.Data) bool); ok { + if rf, ok := ret.Get(0).(func(context.Context, fftypes.DataArray) bool); ok { r0 = rf(ctx, _a1) } else { r0 = ret.Get(0).(bool) } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, []*fftypes.Data) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, fftypes.DataArray) error); ok { r1 = rf(ctx, _a1) } else { r1 = ret.Error(1) @@ -251,3 +306,22 @@ func (_m *Manager) VerifyNamespaceExists(ctx context.Context, ns string) error { return r0 } + +// WaitStop provides a mock function with given fields: +func (_m *Manager) WaitStop() { + _m.Called() +} + +// WriteNewMessage provides a mock function with given fields: ctx, newMsg +func (_m *Manager) WriteNewMessage(ctx context.Context, newMsg *data.NewMessage) error { + ret := _m.Called(ctx, newMsg) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *data.NewMessage) error); ok { + r0 = rf(ctx, newMsg) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/mocks/definitionsmocks/definition_handlers.go b/mocks/definitionsmocks/definition_handlers.go index 91b38ec4c4..e4d8b2cdf7 100644 --- a/mocks/definitionsmocks/definition_handlers.go +++ b/mocks/definitionsmocks/definition_handlers.go @@ -95,18 +95,18 @@ func (_m *DefinitionHandlers) GetGroupsNS(ctx context.Context, ns string, filter } // HandleDefinitionBroadcast provides a mock function with given fields: ctx, state, msg, data, tx -func (_m *DefinitionHandlers) HandleDefinitionBroadcast(ctx context.Context, state definitions.DefinitionBatchState, msg *fftypes.Message, data []*fftypes.Data, tx *fftypes.UUID) (definitions.HandlerResult, error) { +func (_m *DefinitionHandlers) HandleDefinitionBroadcast(ctx context.Context, state definitions.DefinitionBatchState, msg *fftypes.Message, data fftypes.DataArray, tx *fftypes.UUID) (definitions.HandlerResult, error) { ret := _m.Called(ctx, state, msg, data, tx) var r0 definitions.HandlerResult - if rf, ok := ret.Get(0).(func(context.Context, definitions.DefinitionBatchState, *fftypes.Message, []*fftypes.Data, *fftypes.UUID) definitions.HandlerResult); ok { + if rf, ok := ret.Get(0).(func(context.Context, definitions.DefinitionBatchState, *fftypes.Message, fftypes.DataArray, *fftypes.UUID) definitions.HandlerResult); ok { r0 = rf(ctx, state, msg, data, tx) } else { r0 = ret.Get(0).(definitions.HandlerResult) } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, definitions.DefinitionBatchState, *fftypes.Message, []*fftypes.Data, *fftypes.UUID) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, definitions.DefinitionBatchState, *fftypes.Message, fftypes.DataArray, *fftypes.UUID) error); ok { r1 = rf(ctx, state, msg, data, tx) } else { r1 = ret.Error(1) diff --git a/mocks/eventmocks/event_manager.go b/mocks/eventmocks/event_manager.go index 67f8d936ac..b6a99ecfdc 100644 --- a/mocks/eventmocks/event_manager.go +++ b/mocks/eventmocks/event_manager.go @@ -13,6 +13,8 @@ import ( mock "github.com/stretchr/testify/mock" + sharedstorage "github.com/hyperledger/firefly/pkg/sharedstorage" + system "github.com/hyperledger/firefly/internal/events/system" tokens "github.com/hyperledger/firefly/pkg/tokens" @@ -37,20 +39,6 @@ func (_m *EventManager) AddSystemEventListener(ns string, el system.EventListene return r0 } -// BLOBReceived provides a mock function with given fields: dx, peerID, hash, size, payloadRef -func (_m *EventManager) BLOBReceived(dx dataexchange.Plugin, peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error { - ret := _m.Called(dx, peerID, hash, size, payloadRef) - - var r0 error - if rf, ok := ret.Get(0).(func(dataexchange.Plugin, string, fftypes.Bytes32, int64, string) error); ok { - r0 = rf(dx, peerID, hash, size, payloadRef) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // BatchPinComplete provides a mock function with given fields: bi, batch, signingKey func (_m *EventManager) BatchPinComplete(bi blockchain.Plugin, batch *blockchain.BatchPin, signingKey *fftypes.VerifierRef) error { ret := _m.Called(bi, batch, signingKey) @@ -222,6 +210,57 @@ func (_m *EventManager) OperationUpdate(plugin fftypes.Named, operationID *fftyp return r0 } +// PrivateBLOBReceived provides a mock function with given fields: dx, peerID, hash, size, payloadRef +func (_m *EventManager) PrivateBLOBReceived(dx dataexchange.Plugin, peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error { + ret := _m.Called(dx, peerID, hash, size, payloadRef) + + var r0 error + if rf, ok := ret.Get(0).(func(dataexchange.Plugin, string, fftypes.Bytes32, int64, string) error); ok { + r0 = rf(dx, peerID, hash, size, payloadRef) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SharedStorageBLOBDownloaded provides a mock function with given fields: ss, hash, size, payloadRef +func (_m *EventManager) SharedStorageBLOBDownloaded(ss sharedstorage.Plugin, hash fftypes.Bytes32, size int64, payloadRef string) error { + ret := _m.Called(ss, hash, size, payloadRef) + + var r0 error + if rf, ok := ret.Get(0).(func(sharedstorage.Plugin, fftypes.Bytes32, int64, string) error); ok { + r0 = rf(ss, hash, size, payloadRef) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SharedStorageBatchDownloaded provides a mock function with given fields: ss, ns, payloadRef, data +func (_m *EventManager) SharedStorageBatchDownloaded(ss sharedstorage.Plugin, ns string, payloadRef string, data []byte) (*fftypes.UUID, error) { + ret := _m.Called(ss, ns, payloadRef, data) + + var r0 *fftypes.UUID + if rf, ok := ret.Get(0).(func(sharedstorage.Plugin, string, string, []byte) *fftypes.UUID); ok { + r0 = rf(ss, ns, payloadRef, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.UUID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(sharedstorage.Plugin, string, string, []byte) error); ok { + r1 = rf(ss, ns, payloadRef, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Start provides a mock function with given fields: func (_m *EventManager) Start() error { ret := _m.Called() diff --git a/mocks/eventsmocks/plugin.go b/mocks/eventsmocks/plugin.go index 1288aaa2e7..b8a8f9f1a3 100644 --- a/mocks/eventsmocks/plugin.go +++ b/mocks/eventsmocks/plugin.go @@ -36,11 +36,11 @@ func (_m *Plugin) Capabilities() *events.Capabilities { } // DeliveryRequest provides a mock function with given fields: connID, sub, event, data -func (_m *Plugin) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (_m *Plugin) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { ret := _m.Called(connID, sub, event, data) var r0 error - if rf, ok := ret.Get(0).(func(string, *fftypes.Subscription, *fftypes.EventDelivery, []*fftypes.Data) error); ok { + if rf, ok := ret.Get(0).(func(string, *fftypes.Subscription, *fftypes.EventDelivery, fftypes.DataArray) error); ok { r0 = rf(connID, sub, event, data) } else { r0 = ret.Error(0) diff --git a/mocks/eventsmocks/plugin_all.go b/mocks/eventsmocks/plugin_all.go index 550385038e..d85005ea66 100644 --- a/mocks/eventsmocks/plugin_all.go +++ b/mocks/eventsmocks/plugin_all.go @@ -41,11 +41,11 @@ func (_m *PluginAll) ChangeEvent(connID string, ce *fftypes.ChangeEvent) { } // DeliveryRequest provides a mock function with given fields: connID, sub, event, data -func (_m *PluginAll) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error { +func (_m *PluginAll) DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error { ret := _m.Called(connID, sub, event, data) var r0 error - if rf, ok := ret.Get(0).(func(string, *fftypes.Subscription, *fftypes.EventDelivery, []*fftypes.Data) error); ok { + if rf, ok := ret.Get(0).(func(string, *fftypes.Subscription, *fftypes.EventDelivery, fftypes.DataArray) error); ok { r0 = rf(connID, sub, event, data) } else { r0 = ret.Error(0) diff --git a/mocks/networkmapmocks/manager.go b/mocks/networkmapmocks/manager.go index 7ec1c297d3..24618e70cb 100644 --- a/mocks/networkmapmocks/manager.go +++ b/mocks/networkmapmocks/manager.go @@ -128,13 +128,13 @@ func (_m *Manager) GetIdentityVerifiers(ctx context.Context, ns string, id strin return r0, r1, r2 } -// GetNodeByID provides a mock function with given fields: ctx, id -func (_m *Manager) GetNodeByID(ctx context.Context, id string) (*fftypes.Identity, error) { - ret := _m.Called(ctx, id) +// GetNodeByNameOrID provides a mock function with given fields: ctx, nameOrID +func (_m *Manager) GetNodeByNameOrID(ctx context.Context, nameOrID string) (*fftypes.Identity, error) { + ret := _m.Called(ctx, nameOrID) var r0 *fftypes.Identity if rf, ok := ret.Get(0).(func(context.Context, string) *fftypes.Identity); ok { - r0 = rf(ctx, id) + r0 = rf(ctx, nameOrID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*fftypes.Identity) @@ -143,7 +143,7 @@ func (_m *Manager) GetNodeByID(ctx context.Context, id string) (*fftypes.Identit var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(ctx, id) + r1 = rf(ctx, nameOrID) } else { r1 = ret.Error(1) } @@ -183,13 +183,13 @@ func (_m *Manager) GetNodes(ctx context.Context, filter database.AndFilter) ([]* return r0, r1, r2 } -// GetOrganizationByID provides a mock function with given fields: ctx, id -func (_m *Manager) GetOrganizationByID(ctx context.Context, id string) (*fftypes.Identity, error) { - ret := _m.Called(ctx, id) +// GetOrganizationByNameOrID provides a mock function with given fields: ctx, nameOrID +func (_m *Manager) GetOrganizationByNameOrID(ctx context.Context, nameOrID string) (*fftypes.Identity, error) { + ret := _m.Called(ctx, nameOrID) var r0 *fftypes.Identity if rf, ok := ret.Get(0).(func(context.Context, string) *fftypes.Identity); ok { - r0 = rf(ctx, id) + r0 = rf(ctx, nameOrID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*fftypes.Identity) @@ -198,7 +198,7 @@ func (_m *Manager) GetOrganizationByID(ctx context.Context, id string) (*fftypes var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(ctx, id) + r1 = rf(ctx, nameOrID) } else { r1 = ret.Error(1) } diff --git a/mocks/operationmocks/manager.go b/mocks/operationmocks/manager.go new file mode 100644 index 0000000000..cf0d3b6872 --- /dev/null +++ b/mocks/operationmocks/manager.go @@ -0,0 +1,103 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package operationmocks + +import ( + context "context" + + fftypes "github.com/hyperledger/firefly/pkg/fftypes" + mock "github.com/stretchr/testify/mock" + + operations "github.com/hyperledger/firefly/internal/operations" +) + +// Manager is an autogenerated mock type for the Manager type +type Manager struct { + mock.Mock +} + +// AddOrReuseOperation provides a mock function with given fields: ctx, op +func (_m *Manager) AddOrReuseOperation(ctx context.Context, op *fftypes.Operation) error { + ret := _m.Called(ctx, op) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Operation) error); ok { + r0 = rf(ctx, op) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PrepareOperation provides a mock function with given fields: ctx, op +func (_m *Manager) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + ret := _m.Called(ctx, op) + + var r0 *fftypes.PreparedOperation + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Operation) *fftypes.PreparedOperation); ok { + r0 = rf(ctx, op) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.PreparedOperation) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Operation) error); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RegisterHandler provides a mock function with given fields: ctx, handler, ops +func (_m *Manager) RegisterHandler(ctx context.Context, handler operations.OperationHandler, ops []fftypes.FFEnum) { + _m.Called(ctx, handler, ops) +} + +// RetryOperation provides a mock function with given fields: ctx, ns, opID +func (_m *Manager) RetryOperation(ctx context.Context, ns string, opID *fftypes.UUID) (*fftypes.Operation, error) { + ret := _m.Called(ctx, ns, opID) + + var r0 *fftypes.Operation + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID) *fftypes.Operation); ok { + r0 = rf(ctx, ns, opID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.Operation) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, *fftypes.UUID) error); ok { + r1 = rf(ctx, ns, opID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RunOperation provides a mock function with given fields: ctx, op, options +func (_m *Manager) RunOperation(ctx context.Context, op *fftypes.PreparedOperation, options ...operations.RunOperationOption) error { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, op) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.PreparedOperation, ...operations.RunOperationOption) error); ok { + r0 = rf(ctx, op, options...) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/mocks/orchestratormocks/orchestrator.go b/mocks/orchestratormocks/orchestrator.go index a5a1ca6883..108e388df9 100644 --- a/mocks/orchestratormocks/orchestrator.go +++ b/mocks/orchestratormocks/orchestrator.go @@ -26,6 +26,8 @@ import ( networkmap "github.com/hyperledger/firefly/internal/networkmap" + operations "github.com/hyperledger/firefly/internal/operations" + privatemessaging "github.com/hyperledger/firefly/internal/privatemessaging" ) @@ -205,15 +207,15 @@ func (_m *Orchestrator) Events() events.EventManager { } // GetBatchByID provides a mock function with given fields: ctx, ns, id -func (_m *Orchestrator) GetBatchByID(ctx context.Context, ns string, id string) (*fftypes.Batch, error) { +func (_m *Orchestrator) GetBatchByID(ctx context.Context, ns string, id string) (*fftypes.BatchPersisted, error) { ret := _m.Called(ctx, ns, id) - var r0 *fftypes.Batch - if rf, ok := ret.Get(0).(func(context.Context, string, string) *fftypes.Batch); ok { + var r0 *fftypes.BatchPersisted + if rf, ok := ret.Get(0).(func(context.Context, string, string) *fftypes.BatchPersisted); ok { r0 = rf(ctx, ns, id) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*fftypes.Batch) + r0 = ret.Get(0).(*fftypes.BatchPersisted) } } @@ -228,15 +230,15 @@ func (_m *Orchestrator) GetBatchByID(ctx context.Context, ns string, id string) } // GetBatches provides a mock function with given fields: ctx, ns, filter -func (_m *Orchestrator) GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Batch, *database.FilterResult, error) { +func (_m *Orchestrator) GetBatches(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.BatchPersisted, *database.FilterResult, error) { ret := _m.Called(ctx, ns, filter) - var r0 []*fftypes.Batch - if rf, ok := ret.Get(0).(func(context.Context, string, database.AndFilter) []*fftypes.Batch); ok { + var r0 []*fftypes.BatchPersisted + if rf, ok := ret.Get(0).(func(context.Context, string, database.AndFilter) []*fftypes.BatchPersisted); ok { r0 = rf(ctx, ns, filter) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Batch) + r0 = ret.Get(0).([]*fftypes.BatchPersisted) } } @@ -409,15 +411,15 @@ func (_m *Orchestrator) GetConfigRecords(ctx context.Context, filter database.An } // GetData provides a mock function with given fields: ctx, ns, filter -func (_m *Orchestrator) GetData(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.Data, *database.FilterResult, error) { +func (_m *Orchestrator) GetData(ctx context.Context, ns string, filter database.AndFilter) (fftypes.DataArray, *database.FilterResult, error) { ret := _m.Called(ctx, ns, filter) - var r0 []*fftypes.Data - if rf, ok := ret.Get(0).(func(context.Context, string, database.AndFilter) []*fftypes.Data); ok { + var r0 fftypes.DataArray + if rf, ok := ret.Get(0).(func(context.Context, string, database.AndFilter) fftypes.DataArray); ok { r0 = rf(ctx, ns, filter) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Data) + r0 = ret.Get(0).(fftypes.DataArray) } } @@ -596,6 +598,38 @@ func (_m *Orchestrator) GetEvents(ctx context.Context, ns string, filter databas return r0, r1, r2 } +// GetEventsWithReferences provides a mock function with given fields: ctx, ns, filter +func (_m *Orchestrator) GetEventsWithReferences(ctx context.Context, ns string, filter database.AndFilter) ([]*fftypes.EnrichedEvent, *database.FilterResult, error) { + ret := _m.Called(ctx, ns, filter) + + var r0 []*fftypes.EnrichedEvent + if rf, ok := ret.Get(0).(func(context.Context, string, database.AndFilter) []*fftypes.EnrichedEvent); ok { + r0 = rf(ctx, ns, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*fftypes.EnrichedEvent) + } + } + + var r1 *database.FilterResult + if rf, ok := ret.Get(1).(func(context.Context, string, database.AndFilter) *database.FilterResult); ok { + r1 = rf(ctx, ns, filter) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*database.FilterResult) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, string, database.AndFilter) error); ok { + r2 = rf(ctx, ns, filter) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // GetMessageByID provides a mock function with given fields: ctx, ns, id func (_m *Orchestrator) GetMessageByID(ctx context.Context, ns string, id string) (*fftypes.Message, error) { ret := _m.Called(ctx, ns, id) @@ -643,15 +677,15 @@ func (_m *Orchestrator) GetMessageByIDWithData(ctx context.Context, ns string, i } // GetMessageData provides a mock function with given fields: ctx, ns, id -func (_m *Orchestrator) GetMessageData(ctx context.Context, ns string, id string) ([]*fftypes.Data, error) { +func (_m *Orchestrator) GetMessageData(ctx context.Context, ns string, id string) (fftypes.DataArray, error) { ret := _m.Called(ctx, ns, id) - var r0 []*fftypes.Data - if rf, ok := ret.Get(0).(func(context.Context, string, string) []*fftypes.Data); ok { + var r0 fftypes.DataArray + if rf, ok := ret.Get(0).(func(context.Context, string, string) fftypes.DataArray); ok { r0 = rf(ctx, ns, id) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*fftypes.Data) + r0 = ret.Get(0).(fftypes.DataArray) } } @@ -958,6 +992,38 @@ func (_m *Orchestrator) GetOperations(ctx context.Context, ns string, filter dat return r0, r1, r2 } +// GetPins provides a mock function with given fields: ctx, filter +func (_m *Orchestrator) GetPins(ctx context.Context, filter database.AndFilter) ([]*fftypes.Pin, *database.FilterResult, error) { + ret := _m.Called(ctx, filter) + + var r0 []*fftypes.Pin + if rf, ok := ret.Get(0).(func(context.Context, database.AndFilter) []*fftypes.Pin); ok { + r0 = rf(ctx, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*fftypes.Pin) + } + } + + var r1 *database.FilterResult + if rf, ok := ret.Get(1).(func(context.Context, database.AndFilter) *database.FilterResult); ok { + r1 = rf(ctx, filter) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*database.FilterResult) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, database.AndFilter) error); ok { + r2 = rf(ctx, filter) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // GetStatus provides a mock function with given fields: ctx func (_m *Orchestrator) GetStatus(ctx context.Context) (*fftypes.NodeStatus, error) { ret := _m.Called(ctx) @@ -1238,6 +1304,22 @@ func (_m *Orchestrator) NetworkMap() networkmap.Manager { return r0 } +// Operations provides a mock function with given fields: +func (_m *Orchestrator) Operations() operations.Manager { + ret := _m.Called() + + var r0 operations.Manager + if rf, ok := ret.Get(0).(func() operations.Manager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(operations.Manager) + } + } + + return r0 +} + // PrivateMessaging provides a mock function with given fields: func (_m *Orchestrator) PrivateMessaging() privatemessaging.Manager { ret := _m.Called() diff --git a/mocks/privatemessagingmocks/manager.go b/mocks/privatemessagingmocks/manager.go index be51e56a61..2769d32059 100644 --- a/mocks/privatemessagingmocks/manager.go +++ b/mocks/privatemessagingmocks/manager.go @@ -94,6 +94,20 @@ func (_m *Manager) GetGroupsNS(ctx context.Context, ns string, filter database.A return r0, r1, r2 } +// Name provides a mock function with given fields: +func (_m *Manager) Name() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + // NewMessage provides a mock function with given fields: ns, msg func (_m *Manager) NewMessage(ns string, msg *fftypes.MessageInOut) sysmessaging.MessageSender { ret := _m.Called(ns, msg) @@ -110,6 +124,29 @@ func (_m *Manager) NewMessage(ns string, msg *fftypes.MessageInOut) sysmessaging return r0 } +// PrepareOperation provides a mock function with given fields: ctx, op +func (_m *Manager) PrepareOperation(ctx context.Context, op *fftypes.Operation) (*fftypes.PreparedOperation, error) { + ret := _m.Called(ctx, op) + + var r0 *fftypes.PreparedOperation + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Operation) *fftypes.PreparedOperation); ok { + r0 = rf(ctx, op) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.PreparedOperation) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Operation) error); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // RequestReply provides a mock function with given fields: ctx, ns, request func (_m *Manager) RequestReply(ctx context.Context, ns string, request *fftypes.MessageInOut) (*fftypes.MessageInOut, error) { ret := _m.Called(ctx, ns, request) @@ -156,6 +193,36 @@ func (_m *Manager) ResolveInitGroup(ctx context.Context, msg *fftypes.Message) ( return r0, r1 } +// RunOperation provides a mock function with given fields: ctx, op +func (_m *Manager) RunOperation(ctx context.Context, op *fftypes.PreparedOperation) (fftypes.JSONObject, bool, error) { + ret := _m.Called(ctx, op) + + var r0 fftypes.JSONObject + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.PreparedOperation) fftypes.JSONObject); ok { + r0 = rf(ctx, op) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(fftypes.JSONObject) + } + } + + var r1 bool + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.PreparedOperation) bool); ok { + r1 = rf(ctx, op) + } else { + r1 = ret.Get(1).(bool) + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, *fftypes.PreparedOperation) error); ok { + r2 = rf(ctx, op) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // SendMessage provides a mock function with given fields: ctx, ns, in, waitConfirm func (_m *Manager) SendMessage(ctx context.Context, ns string, in *fftypes.MessageInOut, waitConfirm bool) (*fftypes.Message, error) { ret := _m.Called(ctx, ns, in, waitConfirm) diff --git a/mocks/shareddownloadmocks/callbacks.go b/mocks/shareddownloadmocks/callbacks.go new file mode 100644 index 0000000000..973e1599f2 --- /dev/null +++ b/mocks/shareddownloadmocks/callbacks.go @@ -0,0 +1,50 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package shareddownloadmocks + +import ( + fftypes "github.com/hyperledger/firefly/pkg/fftypes" + mock "github.com/stretchr/testify/mock" +) + +// Callbacks is an autogenerated mock type for the Callbacks type +type Callbacks struct { + mock.Mock +} + +// SharedStorageBLOBDownloaded provides a mock function with given fields: hash, size, payloadRef +func (_m *Callbacks) SharedStorageBLOBDownloaded(hash fftypes.Bytes32, size int64, payloadRef string) error { + ret := _m.Called(hash, size, payloadRef) + + var r0 error + if rf, ok := ret.Get(0).(func(fftypes.Bytes32, int64, string) error); ok { + r0 = rf(hash, size, payloadRef) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SharedStorageBatchDownloaded provides a mock function with given fields: ns, payloadRef, data +func (_m *Callbacks) SharedStorageBatchDownloaded(ns string, payloadRef string, data []byte) (*fftypes.UUID, error) { + ret := _m.Called(ns, payloadRef, data) + + var r0 *fftypes.UUID + if rf, ok := ret.Get(0).(func(string, string, []byte) *fftypes.UUID); ok { + r0 = rf(ns, payloadRef, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.UUID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string, []byte) error); ok { + r1 = rf(ns, payloadRef, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/mocks/shareddownloadmocks/manager.go b/mocks/shareddownloadmocks/manager.go new file mode 100644 index 0000000000..71ae0af7c2 --- /dev/null +++ b/mocks/shareddownloadmocks/manager.go @@ -0,0 +1,62 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package shareddownloadmocks + +import ( + context "context" + + fftypes "github.com/hyperledger/firefly/pkg/fftypes" + mock "github.com/stretchr/testify/mock" +) + +// Manager is an autogenerated mock type for the Manager type +type Manager struct { + mock.Mock +} + +// InitiateDownloadBatch provides a mock function with given fields: ctx, ns, tx, payloadRef +func (_m *Manager) InitiateDownloadBatch(ctx context.Context, ns string, tx *fftypes.UUID, payloadRef string) error { + ret := _m.Called(ctx, ns, tx, payloadRef) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, string) error); ok { + r0 = rf(ctx, ns, tx, payloadRef) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InitiateDownloadBlob provides a mock function with given fields: ctx, ns, tx, dataID, payloadRef +func (_m *Manager) InitiateDownloadBlob(ctx context.Context, ns string, tx *fftypes.UUID, dataID *fftypes.UUID, payloadRef string) error { + ret := _m.Called(ctx, ns, tx, dataID, payloadRef) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *fftypes.UUID, *fftypes.UUID, string) error); ok { + r0 = rf(ctx, ns, tx, dataID, payloadRef) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: +func (_m *Manager) Start() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WaitStop provides a mock function with given fields: +func (_m *Manager) WaitStop() { + _m.Called() +} diff --git a/mocks/sharedstoragemocks/plugin.go b/mocks/sharedstoragemocks/plugin.go index 12fc2198e7..ede3016ce8 100644 --- a/mocks/sharedstoragemocks/plugin.go +++ b/mocks/sharedstoragemocks/plugin.go @@ -35,6 +35,29 @@ func (_m *Plugin) Capabilities() *sharedstorage.Capabilities { return r0 } +// DownloadData provides a mock function with given fields: ctx, payloadRef +func (_m *Plugin) DownloadData(ctx context.Context, payloadRef string) (io.ReadCloser, error) { + ret := _m.Called(ctx, payloadRef) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, string) io.ReadCloser); ok { + r0 = rf(ctx, payloadRef) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, payloadRef) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Init provides a mock function with given fields: ctx, prefix, callbacks func (_m *Plugin) Init(ctx context.Context, prefix config.Prefix, callbacks sharedstorage.Callbacks) error { ret := _m.Called(ctx, prefix, callbacks) @@ -68,8 +91,8 @@ func (_m *Plugin) Name() string { return r0 } -// PublishData provides a mock function with given fields: ctx, data -func (_m *Plugin) PublishData(ctx context.Context, data io.Reader) (string, error) { +// UploadData provides a mock function with given fields: ctx, data +func (_m *Plugin) UploadData(ctx context.Context, data io.Reader) (string, error) { ret := _m.Called(ctx, data) var r0 string @@ -88,26 +111,3 @@ func (_m *Plugin) PublishData(ctx context.Context, data io.Reader) (string, erro return r0, r1 } - -// RetrieveData provides a mock function with given fields: ctx, payloadRef -func (_m *Plugin) RetrieveData(ctx context.Context, payloadRef string) (io.ReadCloser, error) { - ret := _m.Called(ctx, payloadRef) - - var r0 io.ReadCloser - if rf, ok := ret.Get(0).(func(context.Context, string) io.ReadCloser); ok { - r0 = rf(ctx, payloadRef) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(io.ReadCloser) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(ctx, payloadRef) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/mocks/tokenmocks/plugin.go b/mocks/tokenmocks/plugin.go index 565ce03000..671f621062 100644 --- a/mocks/tokenmocks/plugin.go +++ b/mocks/tokenmocks/plugin.go @@ -19,20 +19,20 @@ type Plugin struct { mock.Mock } -// ActivateTokenPool provides a mock function with given fields: ctx, opID, pool, event -func (_m *Plugin) ActivateTokenPool(ctx context.Context, opID *fftypes.UUID, pool *fftypes.TokenPool, event *fftypes.BlockchainEvent) (bool, error) { - ret := _m.Called(ctx, opID, pool, event) +// ActivateTokenPool provides a mock function with given fields: ctx, opID, pool, blockchainInfo +func (_m *Plugin) ActivateTokenPool(ctx context.Context, opID *fftypes.UUID, pool *fftypes.TokenPool, blockchainInfo fftypes.JSONObject) (bool, error) { + ret := _m.Called(ctx, opID, pool, blockchainInfo) var r0 bool - if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, *fftypes.TokenPool, *fftypes.BlockchainEvent) bool); ok { - r0 = rf(ctx, opID, pool, event) + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID, *fftypes.TokenPool, fftypes.JSONObject) bool); ok { + r0 = rf(ctx, opID, pool, blockchainInfo) } else { r0 = ret.Get(0).(bool) } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID, *fftypes.TokenPool, *fftypes.BlockchainEvent) error); ok { - r1 = rf(ctx, opID, pool, event) + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID, *fftypes.TokenPool, fftypes.JSONObject) error); ok { + r1 = rf(ctx, opID, pool, blockchainInfo) } else { r1 = ret.Error(1) } diff --git a/mocks/txcommonmocks/helper.go b/mocks/txcommonmocks/helper.go index bfff72281f..18c4a61333 100644 --- a/mocks/txcommonmocks/helper.go +++ b/mocks/txcommonmocks/helper.go @@ -28,6 +28,52 @@ func (_m *Helper) AddBlockchainTX(ctx context.Context, id *fftypes.UUID, blockch return r0 } +// EnrichEvent provides a mock function with given fields: ctx, event +func (_m *Helper) EnrichEvent(ctx context.Context, event *fftypes.Event) (*fftypes.EnrichedEvent, error) { + ret := _m.Called(ctx, event) + + var r0 *fftypes.EnrichedEvent + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.Event) *fftypes.EnrichedEvent); ok { + r0 = rf(ctx, event) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.EnrichedEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.Event) error); ok { + r1 = rf(ctx, event) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionByIDCached provides a mock function with given fields: ctx, id +func (_m *Helper) GetTransactionByIDCached(ctx context.Context, id *fftypes.UUID) (*fftypes.Transaction, error) { + ret := _m.Called(ctx, id) + + var r0 *fftypes.Transaction + if rf, ok := ret.Get(0).(func(context.Context, *fftypes.UUID) *fftypes.Transaction); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*fftypes.Transaction) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *fftypes.UUID) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // PersistTransaction provides a mock function with given fields: ctx, ns, id, txType, blockchainTXID func (_m *Helper) PersistTransaction(ctx context.Context, ns string, id *fftypes.UUID, txType fftypes.FFEnum, blockchainTXID string) (bool, error) { ret := _m.Called(ctx, ns, id, txType, blockchainTXID) @@ -71,13 +117,3 @@ func (_m *Helper) SubmitNewTransaction(ctx context.Context, ns string, txType ff return r0, r1 } - -// WriteOperationFailure provides a mock function with given fields: ctx, opID, err -func (_m *Helper) WriteOperationFailure(ctx context.Context, opID *fftypes.UUID, err error) { - _m.Called(ctx, opID, err) -} - -// WriteOperationSuccess provides a mock function with given fields: ctx, opID, output -func (_m *Helper) WriteOperationSuccess(ctx context.Context, opID *fftypes.UUID, output fftypes.JSONObject) { - _m.Called(ctx, opID, output) -} diff --git a/pkg/blockchain/plugin.go b/pkg/blockchain/plugin.go index 96fdddbd67..a945b1d4f3 100644 --- a/pkg/blockchain/plugin.go +++ b/pkg/blockchain/plugin.go @@ -58,11 +58,11 @@ type Plugin interface { // QueryContract executes a method via custom on-chain logic and returns the result QueryContract(ctx context.Context, location *fftypes.JSONAny, method *fftypes.FFIMethod, input map[string]interface{}) (interface{}, error) - // AddSubscription adds a new subscription to a user-specified contract and event - AddSubscription(ctx context.Context, subscription *fftypes.ContractListenerInput) error + // AddContractListener adds a new subscription to a user-specified contract and event + AddContractListener(ctx context.Context, subscription *fftypes.ContractListenerInput) error - // DeleteSubscription deletes a previously-created subscription - DeleteSubscription(ctx context.Context, subscription *fftypes.ContractListener) error + // DeleteContractListener deletes a previously-created subscription + DeleteContractListener(ctx context.Context, subscription *fftypes.ContractListener) error // GetFFIParamValidator returns a blockchain-plugin-specific validator for FFIParams and their JSON Schema GetFFIParamValidator(ctx context.Context) (fftypes.FFIParamValidator, error) diff --git a/pkg/database/plugin.go b/pkg/database/plugin.go index 45ba9706d9..3b2f196832 100644 --- a/pkg/database/plugin.go +++ b/pkg/database/plugin.go @@ -77,6 +77,9 @@ type iMessageCollection interface { // must match the hash of the record that is being inserted. UpsertMessage(ctx context.Context, message *fftypes.Message, optimization UpsertOptimization) (err error) + // InsertMessages performs a batch insert of messages assured to be new records - fails if they already exist, so caller can fall back to upsert individually + InsertMessages(ctx context.Context, messages []*fftypes.Message) (err error) + // UpdateMessage - Update message UpdateMessage(ctx context.Context, id *fftypes.UUID, update Update) (err error) @@ -93,6 +96,9 @@ type iMessageCollection interface { // GetMessages - List messages, reverse sorted (newest first) by Confirmed then Created, with pagination, and simple must filters GetMessages(ctx context.Context, filter Filter) (message []*fftypes.Message, res *FilterResult, err error) + // GetMessageIDs - Retrieves messages, but only querying the messages ID (no other fields) + GetMessageIDs(ctx context.Context, filter Filter) (ids []*fftypes.IDAndSequence, err error) + // GetMessagesForData - List messages where there is a data reference to the specified ID GetMessagesForData(ctx context.Context, dataID *fftypes.UUID, filter Filter) (message []*fftypes.Message, res *FilterResult, err error) } @@ -103,6 +109,9 @@ type iDataCollection interface { // must match the hash of the record that is being inserted. UpsertData(ctx context.Context, data *fftypes.Data, optimization UpsertOptimization) (err error) + // InsertDataArray performs a batch insert of data assured to be new records - fails if they already exist, so caller can fall back to upsert individually + InsertDataArray(ctx context.Context, data fftypes.DataArray) (err error) + // UpdateData - Update data UpdateData(ctx context.Context, id *fftypes.UUID, update Update) (err error) @@ -110,7 +119,7 @@ type iDataCollection interface { GetDataByID(ctx context.Context, id *fftypes.UUID, withValue bool) (message *fftypes.Data, err error) // GetData - Get data - GetData(ctx context.Context, filter Filter) (message []*fftypes.Data, res *FilterResult, err error) + GetData(ctx context.Context, filter Filter) (message fftypes.DataArray, res *FilterResult, err error) // GetDataRefs - Get data references only (no data) GetDataRefs(ctx context.Context, filter Filter) (message fftypes.DataRefs, res *FilterResult, err error) @@ -118,16 +127,16 @@ type iDataCollection interface { type iBatchCollection interface { // UpsertBatch - Upsert a batch - the hash cannot change - UpsertBatch(ctx context.Context, data *fftypes.Batch) (err error) + UpsertBatch(ctx context.Context, data *fftypes.BatchPersisted) (err error) // UpdateBatch - Update data UpdateBatch(ctx context.Context, id *fftypes.UUID, update Update) (err error) // GetBatchByID - Get a batch by ID - GetBatchByID(ctx context.Context, id *fftypes.UUID) (message *fftypes.Batch, err error) + GetBatchByID(ctx context.Context, id *fftypes.UUID) (message *fftypes.BatchPersisted, err error) // GetBatches - Get batches - GetBatches(ctx context.Context, filter Filter) (message []*fftypes.Batch, res *FilterResult, err error) + GetBatches(ctx context.Context, filter Filter) (message []*fftypes.BatchPersisted, res *FilterResult, err error) } type iTransactionCollection interface { @@ -179,6 +188,9 @@ type iOffsetCollection interface { } type iPinCollection interface { + // InsertPins - Inserts a list of pins - fails if they already exist, so caller can fall back to upsert individually + InsertPins(ctx context.Context, pins []*fftypes.Pin) (err error) + // UpsertPin - Will insert a pin at the end of the sequence, unless the batch+hash+index sequence already exists UpsertPin(ctx context.Context, parked *fftypes.Pin) (err error) @@ -194,11 +206,14 @@ type iPinCollection interface { type iOperationCollection interface { // InsertOperation - Insert an operation - InsertOperation(ctx context.Context, operation *fftypes.Operation) (err error) + InsertOperation(ctx context.Context, operation *fftypes.Operation, hooks ...PostCompletionHook) (err error) // ResolveOperation - Resolve operation upon completion ResolveOperation(ctx context.Context, id *fftypes.UUID, status fftypes.OpStatus, errorMsg string, output fftypes.JSONObject) (err error) + // UpdateOperation - Update an operation + UpdateOperation(ctx context.Context, id *fftypes.UUID, update Update) (err error) + // GetOperationByID - Get an operation by ID GetOperationByID(ctx context.Context, id *fftypes.UUID) (operation *fftypes.Operation, err error) @@ -562,7 +577,7 @@ type OrderedUUIDCollectionNS CollectionName const ( CollectionMessages OrderedUUIDCollectionNS = "messages" CollectionEvents OrderedUUIDCollectionNS = "events" - CollectionBlockchainEvents OrderedUUIDCollectionNS = "contractevents" + CollectionBlockchainEvents OrderedUUIDCollectionNS = "blockchainevents" ) // OrderedCollection is a collection that is ordered, and that sequence is the only key @@ -627,6 +642,11 @@ const ( CollectionTokenBalances OtherCollection = "tokenbalances" ) +// PostCompletionHook is a closure/function that will be called after a successful insertion. +// This includes where the insert is nested in a RunAsGroup, and the database is transactional. +// These hooks are useful when triggering code that relies on the inserted database object being available. +type PostCompletionHook func() + // Callbacks are the methods for passing data from plugin to core // // If Capabilities returns ClusterEvents=true then these should be broadcast to every instance within @@ -656,7 +676,7 @@ type Callbacks interface { // Capabilities defines the capabilities a plugin can report as implementing or not type Capabilities struct { - ClusterEvents bool + Concurrency bool } // NamespaceQueryFactory filter fields for namespaces @@ -764,20 +784,19 @@ var OperationQueryFactory = &queryFields{ "output": &JSONField{}, "created": &TimeField{}, "updated": &TimeField{}, + "retry": &UUIDField{}, } // SubscriptionQueryFactory filter fields for data subscriptions var SubscriptionQueryFactory = &queryFields{ - "id": &UUIDField{}, - "namespace": &StringField{}, - "name": &StringField{}, - "transport": &StringField{}, - "events": &StringField{}, - "filter.topics": &StringField{}, - "filter.tag": &StringField{}, - "filter.group": &StringField{}, - "options": &StringField{}, - "created": &TimeField{}, + "id": &UUIDField{}, + "namespace": &StringField{}, + "name": &StringField{}, + "transport": &StringField{}, + "events": &StringField{}, + "filters": &JSONField{}, + "options": &StringField{}, + "created": &TimeField{}, } // EventQueryFactory filter fields for data events @@ -788,6 +807,7 @@ var EventQueryFactory = &queryFields{ "reference": &UUIDField{}, "correlator": &UUIDField{}, "tx": &UUIDField{}, + "topic": &StringField{}, "sequence": &Int64Field{}, "created": &TimeField{}, } diff --git a/pkg/dataexchange/plugin.go b/pkg/dataexchange/plugin.go index de1166aff7..1cbebd04fd 100644 --- a/pkg/dataexchange/plugin.go +++ b/pkg/dataexchange/plugin.go @@ -97,8 +97,8 @@ type Callbacks interface { // MessageReceived notifies of a message received from another node in the network MessageReceived(peerID string, data []byte) (manifest string, err error) - // BLOBReceived notifies of the ID of a BLOB that has been stored by DX after being received from another node in the network - BLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error + // PrivateBLOBReceived notifies of the ID of a BLOB that has been stored by DX after being received from another node in the network + PrivateBLOBReceived(peerID string, hash fftypes.Bytes32, size int64, payloadRef string) error // TransferResult notifies of a status update of a transfer (can have multiple status updates). TransferResult(trackingID string, status fftypes.OpStatus, info fftypes.TransportStatusUpdate) error diff --git a/pkg/events/plugin.go b/pkg/events/plugin.go index 15177c24c6..1aceceda2f 100644 --- a/pkg/events/plugin.go +++ b/pkg/events/plugin.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -48,7 +48,7 @@ type Plugin interface { // DeliveryRequest requests delivery of work on a connection, which must later be responded to // Data will only be supplied as non-nil if the subscription is set to include data - DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data []*fftypes.Data) error + DeliveryRequest(connID string, sub *fftypes.Subscription, event *fftypes.EventDelivery, data fftypes.DataArray) error } // ChangeEventListener is an optional interface for delivering database change events, only supported for ephemeral connections diff --git a/pkg/fftypes/batch.go b/pkg/fftypes/batch.go index db5bbc62a0..e1d144177c 100644 --- a/pkg/fftypes/batch.go +++ b/pkg/fftypes/batch.go @@ -17,38 +17,89 @@ package fftypes import ( - "context" "crypto/sha256" - "database/sql/driver" "encoding/json" +) + +// BatchType is the type of a batch +type BatchType = FFEnum - "github.com/hyperledger/firefly/internal/i18n" +var ( + // BatchTypeBroadcast is a batch that is broadcast via the shared data interface + BatchTypeBroadcast = ffEnum("batchtype", "broadcast") + // BatchTypePrivate is a batch that is sent privately to a group + BatchTypePrivate = ffEnum("batchtype", "private") ) -type Batch struct { - ID *UUID `json:"id"` - Namespace string `json:"namespace"` - Type MessageType `json:"type"` - Node *UUID `json:"node,omitempty"` +const ( + ManifestVersionUnset uint = 0 + ManifestVersion1 uint = 1 +) + +// BatchHeader is the common fields between the serialized batch, and the batch manifest +type BatchHeader struct { + ID *UUID `json:"id"` + Type BatchType `json:"type" ffenum:"batchtype"` + Namespace string `json:"namespace"` + Node *UUID `json:"node,omitempty"` SignerRef - Group *Bytes32 `jdon:"group,omitempty"` - Hash *Bytes32 `json:"hash"` - Created *FFTime `json:"created"` - Confirmed *FFTime `json:"confirmed"` - Payload BatchPayload `json:"payload"` - PayloadRef string `json:"payloadRef,omitempty"` - Blobs []*Bytes32 `json:"blobs,omitempty"` // only used in-flight + Group *Bytes32 `jdon:"group,omitempty"` + Created *FFTime `json:"created"` +} + +type MessageManifestEntry struct { + MessageRef + Topics int `json:"topics"` // We only need the count, to be able to match up the pins +} + +// BatchManifest is all we need to persist to be able to reconstitute +// an identical batch, and also all of the fields that are protected by +// the hash of the batch. +// It can be generated from a received batch to +// confirm you have received an identical batch to that sent. +type BatchManifest struct { + Version uint `json:"version"` + ID *UUID `json:"id"` + TX TransactionRef `json:"tx"` + SignerRef + Messages []*MessageManifestEntry `json:"messages"` + Data DataRefs `json:"data"` +} + +// Batch is the full payload object used in-flight. +type Batch struct { + BatchHeader + Hash *Bytes32 `json:"hash"` + Payload BatchPayload `json:"payload"` +} + +// BatchPersisted is the structure written to the database +type BatchPersisted struct { + BatchHeader + Hash *Bytes32 `json:"hash"` + Manifest *JSONAny `json:"manifest"` + TX TransactionRef `json:"tx"` + PayloadRef string `json:"payloadRef,omitempty"` + Confirmed *FFTime `json:"confirmed"` } +// BatchPayload contains the full JSON of the messages and data, but +// importantly only the immutable parts of the messages/data. +// In v0.13 and earlier, we used the whole of this payload object to +// form the hash of the in-flight batch. Subsequent to that we only +// calculate the hash of the manifest, as that contains the hashes +// of all the messages and data (thus minimizing the overhead of +// calculating the hash). +// - See Message.BatchMessage() and Data.BatchData() type BatchPayload struct { TX TransactionRef `json:"tx"` Messages []*Message `json:"messages"` - Data []*Data `json:"data"` + Data DataArray `json:"data"` } -// Value implements sql.Valuer -func (ma BatchPayload) Value() (driver.Value, error) { - return json.Marshal(&ma) +func (bm *BatchManifest) String() string { + b, _ := json.Marshal(&bm) + return string(b) } func (ma *BatchPayload) Hash() *Bytes32 { @@ -57,42 +108,65 @@ func (ma *BatchPayload) Hash() *Bytes32 { return &b32 } -// Scan implements sql.Scanner -func (ma *BatchPayload) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case []byte: - return json.Unmarshal(src, &ma) - - case string: - if src == "" { - return nil +func (ma *BatchPayload) Manifest(id *UUID) *BatchManifest { + tm := &BatchManifest{ + Version: ManifestVersion1, + ID: id, + TX: ma.TX, + Messages: make([]*MessageManifestEntry, 0, len(ma.Messages)), + Data: make(DataRefs, 0, len(ma.Data)), + } + for _, m := range ma.Messages { + if m != nil && m.Header.ID != nil { + tm.Messages = append(tm.Messages, &MessageManifestEntry{ + MessageRef: MessageRef{ + ID: m.Header.ID, + Hash: m.Hash, + }, + Topics: len(m.Header.Topics), + }) } - return json.Unmarshal([]byte(src), &ma) - - default: - return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, ma) } + for _, d := range ma.Data { + if d != nil && d.ID != nil { + tm.Data = append(tm.Data, &DataRef{ + ID: d.ID, + Hash: d.Hash, + }) + } + } + return tm +} +func (b *BatchPersisted) GenManifest(messages []*Message, data DataArray) *BatchManifest { + return (&BatchPayload{ + TX: b.TX, + Messages: messages, + Data: data, + }).Manifest(b.ID) } -func (b *Batch) Manifest() *Manifest { - if b == nil { - return nil - } - tm := &Manifest{ - Messages: make([]MessageRef, len(b.Payload.Messages)), - Data: make([]DataRef, len(b.Payload.Data)), - } - for i, m := range b.Payload.Messages { - tm.Messages[i].ID = m.Header.ID - tm.Messages[i].Hash = m.Hash - } - for i, d := range b.Payload.Data { - tm.Data[i].ID = d.ID - tm.Data[i].Hash = d.Hash +func (b *BatchPersisted) GenInflight(messages []*Message, data DataArray) *Batch { + return &Batch{ + BatchHeader: b.BatchHeader, + Hash: b.Hash, + Payload: BatchPayload{ + TX: b.TX, + Messages: messages, + Data: data, + }, } - return tm +} + +// Confirmed generates a newly confirmed persisted batch, including (re-)generating the manifest +func (b *Batch) Confirmed() (*BatchPersisted, *BatchManifest) { + manifest := b.Payload.Manifest(b.ID) + manifestString := manifest.String() + return &BatchPersisted{ + BatchHeader: b.BatchHeader, + Hash: b.Hash, + TX: b.Payload.TX, + Manifest: JSONAnyPtr(manifestString), + Confirmed: Now(), + }, manifest } diff --git a/pkg/fftypes/batch_test.go b/pkg/fftypes/batch_test.go index f184c353f1..90c5921c7d 100644 --- a/pkg/fftypes/batch_test.go +++ b/pkg/fftypes/batch_test.go @@ -17,51 +17,50 @@ package fftypes import ( + "crypto/sha256" + "encoding/hex" "encoding/json" "testing" "github.com/stretchr/testify/assert" ) -func TestSQLSerializedMessageArray(t *testing.T) { +func TestSQLSerializedManifest(t *testing.T) { msgID1 := NewUUID() msgID2 := NewUUID() - batchPayload := BatchPayload{ - Messages: []*Message{ - {Header: MessageHeader{ID: msgID1}}, - {Header: MessageHeader{ID: msgID2}}, + batch := &Batch{ + BatchHeader: BatchHeader{ + ID: NewUUID(), + }, + Payload: BatchPayload{ + TX: TransactionRef{ + ID: NewUUID(), + }, + Messages: []*Message{ + {Header: MessageHeader{ID: msgID1}}, + {Header: MessageHeader{ID: msgID2}}, + }, }, } - b, err := batchPayload.Value() - assert.NoError(t, err) - assert.IsType(t, []byte{}, b) - - var batchPayloadRead BatchPayload - err = batchPayloadRead.Scan(b) - assert.NoError(t, err) - - j1, err := json.Marshal(&batchPayload) - assert.NoError(t, err) - j2, err := json.Marshal(&batchPayloadRead) - assert.NoError(t, err) - assert.Equal(t, string(j1), string(j2)) - - err = batchPayloadRead.Scan("") - assert.NoError(t, err) - - err = batchPayloadRead.Scan("{}") - assert.NoError(t, err) + bp, manifest := batch.Confirmed() + mfString := manifest.String() + assert.Equal(t, batch.BatchHeader, bp.BatchHeader) + assert.Equal(t, batch.Payload.TX, bp.TX) + assert.Equal(t, mfString, bp.Manifest.String()) + assert.NotNil(t, bp.Confirmed) - err = batchPayloadRead.Scan(nil) + var mf *BatchManifest + err := json.Unmarshal([]byte(mfString), &mf) assert.NoError(t, err) + assert.Equal(t, msgID1, mf.Messages[0].ID) + assert.Equal(t, msgID2, mf.Messages[1].ID) + mfHash := sha256.Sum256([]byte(mfString)) + assert.Equal(t, HashString(bp.GenManifest(batch.Payload.Messages, batch.Payload.Data).String()).String(), hex.EncodeToString(mfHash[:])) - var wrongType int - err = batchPayloadRead.Scan(&wrongType) - assert.Error(t, err) + assert.Equal(t, batch, bp.GenInflight(batch.Payload.Messages, batch.Payload.Data)) - hash := batchPayload.Hash() - assert.NotNil(t, hash) + assert.NotEqual(t, batch.Payload.Hash().String(), hex.EncodeToString(mfHash[:])) } diff --git a/pkg/fftypes/bytetypes.go b/pkg/fftypes/bytetypes.go index 0e12e43243..9be2610c5f 100644 --- a/pkg/fftypes/bytetypes.go +++ b/pkg/fftypes/bytetypes.go @@ -19,6 +19,7 @@ package fftypes import ( "context" "crypto/rand" + "crypto/sha256" "database/sql/driver" "encoding/hex" "hash" @@ -43,6 +44,12 @@ func HashResult(hash hash.Hash) *Bytes32 { return &b32 } +func HashString(s string) *Bytes32 { + hash := sha256.New() + hash.Write([]byte(s)) + return HashResult(hash) +} + func (b32 Bytes32) MarshalText() ([]byte, error) { hexstr := make([]byte, 64) hex.Encode(hexstr, b32[0:32]) diff --git a/pkg/fftypes/charthistogram.go b/pkg/fftypes/charthistogram.go index 218501a567..19716f08cc 100644 --- a/pkg/fftypes/charthistogram.go +++ b/pkg/fftypes/charthistogram.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -23,12 +23,22 @@ const ( ChartHistogramMinBuckets = 1 ) -// ChartHistogram is a timestamp and count +// ChartHistogram is a list of buckets with types type ChartHistogram struct { - // Timestamp of bucket in histogram + // Count for entire timestamp in histogram + Count string `json:"count"` + // Timestamp of bucket Timestamp *FFTime `json:"timestamp"` - // Count for timestamp in histogram + // Types list of histogram types and their count + Types []*ChartHistogramType `json:"types"` +} + +// ChartHistogramType is a type and count +type ChartHistogramType struct { + // Count for type in histogram bucket Count string `json:"count"` + // Type of bucket in histogram + Type string `json:"type"` } // ChartHistogramInterval specifies lower and upper timestamps for histogram bucket diff --git a/pkg/fftypes/constants.go b/pkg/fftypes/constants.go index b15d29f6c5..e1c75789bd 100644 --- a/pkg/fftypes/constants.go +++ b/pkg/fftypes/constants.go @@ -22,6 +22,13 @@ const ( SystemNamespace = "ff_system" ) +const ( + // SystemTopicDefinitions is the FireFly event topic for events that are confirmations of definition of pre-defined datatypes + SystemTopicDefinitions = "ff_definition" + // SystemBatchPinTopic is the FireFly event topic for events from the FireFly batch pin listener + SystemBatchPinTopic = "ff_batch_pin" +) + const ( // SystemTagDefineDatatype is the tag for messages that broadcast data definitions diff --git a/pkg/fftypes/contract_listener.go b/pkg/fftypes/contract_listener.go index 2c6ed27fdd..a2b23e0cbf 100644 --- a/pkg/fftypes/contract_listener.go +++ b/pkg/fftypes/contract_listener.go @@ -25,14 +25,20 @@ import ( ) type ContractListener struct { - ID *UUID `json:"id,omitempty"` - Interface *FFIReference `json:"interface,omitempty"` - Namespace string `json:"namespace,omitempty"` - Name string `json:"name,omitempty"` - ProtocolID string `json:"protocolId,omitempty"` - Location *JSONAny `json:"location,omitempty"` - Created *FFTime `json:"created,omitempty"` - Event *FFISerializedEvent `json:"event,omitempty"` + ID *UUID `json:"id,omitempty"` + Interface *FFIReference `json:"interface,omitempty"` + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` + ProtocolID string `json:"protocolId,omitempty"` + Location *JSONAny `json:"location,omitempty"` + Created *FFTime `json:"created,omitempty"` + Event *FFISerializedEvent `json:"event,omitempty"` + Topic string `json:"topic,omitempty"` + Options *ContractListenerOptions `json:"options,omitempty"` +} + +type ContractListenerOptions struct { + FirstEvent string `json:"firstEvent,omitempty"` } type ContractListenerInput struct { @@ -63,3 +69,23 @@ func (fse FFISerializedEvent) Value() (driver.Value, error) { bytes, _ := json.Marshal(fse) return bytes, nil } + +// Scan implements sql.Scanner +func (o *ContractListenerOptions) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + o = nil + return nil + case string: + return json.Unmarshal([]byte(src), &o) + case []byte: + return json.Unmarshal(src, &o) + default: + return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, o) + } +} + +func (o ContractListenerOptions) Value() (driver.Value, error) { + bytes, _ := json.Marshal(o) + return bytes, nil +} diff --git a/pkg/fftypes/contract_listener_test.go b/pkg/fftypes/contract_listener_test.go index 296fdd059b..66ba70b32f 100644 --- a/pkg/fftypes/contract_listener_test.go +++ b/pkg/fftypes/contract_listener_test.go @@ -61,3 +61,37 @@ func TestFFISerializedEventValue(t *testing.T) { assert.NoError(t, err) assert.Equal(t, `{"name":"event1","description":"a super event","params":[{"name":"details","schema":{"type":"integer","details":{"type":"uint256"}}}]}`, string(val.([]byte))) } + +func TestContractListenerOptionsScan(t *testing.T) { + options := &ContractListenerOptions{} + err := options.Scan([]byte(`{"firstBlock":"newest"}`)) + assert.NoError(t, err) +} + +func TestContractListenerOptionsScanNil(t *testing.T) { + options := &ContractListenerOptions{} + err := options.Scan(nil) + assert.Nil(t, err) +} + +func TestContractListenerOptionsScanString(t *testing.T) { + options := &ContractListenerOptions{} + err := options.Scan(`{"firstBlock":"newest"}`) + assert.NoError(t, err) +} + +func TestContractListenerOptionsScanError(t *testing.T) { + options := &ContractListenerOptions{} + err := options.Scan(false) + assert.Regexp(t, "FF10125", err) +} + +func TestContractListenerOptionsValue(t *testing.T) { + options := &ContractListenerOptions{ + FirstEvent: "newest", + } + + val, err := options.Value() + assert.NoError(t, err) + assert.Equal(t, `{"firstEvent":"newest"}`, string(val.([]byte))) +} diff --git a/pkg/fftypes/contracts.go b/pkg/fftypes/contracts.go index 43b46f964b..cad8b7d4b0 100644 --- a/pkg/fftypes/contracts.go +++ b/pkg/fftypes/contracts.go @@ -22,9 +22,9 @@ type ContractCallType = FFEnum var ( // CallTypeInvoke is an invocation that submits a transaction for inclusion in the chain - CallTypeInvoke ContractCallType = ffEnum("contractcalltype", "invoke") + CallTypeInvoke = ffEnum("contractcalltype", "invoke") // CallTypeQuery is a query that returns data from the chain - CallTypeQuery ContractCallType = ffEnum("contractcalltype", "query") + CallTypeQuery = ffEnum("contractcalltype", "query") ) type ContractCallRequest struct { @@ -74,7 +74,7 @@ func (c *ContractAPI) Validate(ctx context.Context, existing bool) (err error) { } func (c *ContractAPI) Topic() string { - return namespaceTopic(c.Namespace) + return typeNamespaceNameTopicHash("contractapi", c.Namespace, c.Name) } func (c *ContractAPI) SetBroadcastMessage(msgID *UUID) { diff --git a/pkg/fftypes/contracts_test.go b/pkg/fftypes/contracts_test.go index 156cd75984..6b7742b1d9 100644 --- a/pkg/fftypes/contracts_test.go +++ b/pkg/fftypes/contracts_test.go @@ -52,7 +52,7 @@ func TestContractAPITopic(t *testing.T) { api := &ContractAPI{ Namespace: "ns1", } - assert.Equal(t, "ff_ns_ns1", api.Topic()) + assert.Equal(t, "4cccc66c1f0eebcf578f1e63b73a2047d4eb4c84c0a00c69b0e00c7490403d20", api.Topic()) } func TestContractAPISetBroadCastMessage(t *testing.T) { diff --git a/pkg/fftypes/data.go b/pkg/fftypes/data.go index 7834a285d7..52362c82c6 100644 --- a/pkg/fftypes/data.go +++ b/pkg/fftypes/data.go @@ -52,6 +52,43 @@ type Data struct { ValueSize int64 `json:"-"` // Used internally for message size calcuation, without full payload retrieval } +func (br *BlobRef) BatchBlobRef(batchType BatchType) *BlobRef { + if br == nil { + return nil + } + switch batchType { + case BatchTypePrivate: + // For private we omit the "public" ref in all cases, to avoid an potential for the batch pay to change due + // to the same data being allocated by the same data being sent in a broadcast batch (thus assigining a public ref). + return &BlobRef{ + Hash: br.Hash, + Size: br.Size, + Name: br.Name, + } + default: + // For broadcast data the blob reference contains the "public" (shared storage) reference, which + // must have been allocated to this data item before sealing the batch. + return br + } +} + +// BatchData is the fields in a data record that are assured to be consistent on all parties. +// This is what is transferred and hashed in a batch payload between nodes. +func (d *Data) BatchData(batchType BatchType) *Data { + return &Data{ + ID: d.ID, + Validator: d.Validator, + Namespace: d.Namespace, + Hash: d.Hash, + Created: d.Created, + Datatype: d.Datatype, + Value: d.Value, + Blob: d.Blob.BatchBlobRef(batchType), + + ValueSize: d.ValueSize, + } +} + type DataAndBlob struct { Data *Data Blob *Blob @@ -77,6 +114,20 @@ func (d DataRefs) Hash() *Bytes32 { return &b32 } +type DataArray []*Data + +func (da DataArray) Refs() DataRefs { + dr := make(DataRefs, len(da)) + for i, d := range da { + dr[i] = &DataRef{ + ID: d.ID, + Hash: d.Hash, + ValueSize: d.ValueSize, + } + } + return dr +} + func CheckValidatorType(ctx context.Context, validator ValidatorType) error { switch validator { case ValidatorTypeJSON, ValidatorTypeNone, ValidatorTypeSystemDefinition: @@ -90,6 +141,9 @@ const dataSizeEstimateBase = int64(256) func (d *Data) EstimateSize() int64 { // For now we have a static estimate for the size of the serialized outer structure. + if d.ValueSize <= 0 { + d.ValueSize = d.Value.Length() + } // As long as this has been persisted, the value size will represent the length return dataSizeEstimateBase + d.ValueSize } @@ -151,6 +205,9 @@ func (d *Data) Seal(ctx context.Context, blob *Blob) (err error) { } else if d.Blob != nil && d.Blob.Hash != nil { return i18n.NewError(ctx, i18n.MsgBlobMismatchSealingData) } + if d.ValueSize <= 0 { + d.ValueSize = d.Value.Length() + } d.Hash, err = d.CalcHash(ctx) if err == nil { err = CheckValidatorType(ctx, d.Validator) diff --git a/pkg/fftypes/data_test.go b/pkg/fftypes/data_test.go index 9532317959..5ff194dc90 100644 --- a/pkg/fftypes/data_test.go +++ b/pkg/fftypes/data_test.go @@ -33,6 +33,7 @@ func TestEstimateDataSize(t *testing.T) { ValueSize: 4, } assert.Equal(t, dataSizeEstimateBase+int64(4), d.EstimateSize()) + assert.Equal(t, dataSizeEstimateBase+int64(4), d.EstimateSize()) } func TestDatatypeReference(t *testing.T) { @@ -206,3 +207,49 @@ func TestHashDataNull(t *testing.T) { assert.Equal(t, expectedHash.String(), hash.String()) } + +func TestDataImmutable(t *testing.T) { + data := &Data{ + ID: NewUUID(), + Validator: ValidatorTypeJSON, + Namespace: "ns1", + Hash: NewRandB32(), + Created: Now(), + } + assert.True(t, data.Hash.Equals(data.BatchData(BatchTypeBroadcast).Hash)) + + data.Blob = &BlobRef{ + Hash: NewRandB32(), + Size: 12345, + Name: "name.txt", + Public: "sharedStorageRef", + } + assert.Equal(t, data.Blob, data.BatchData(BatchTypeBroadcast).Blob) + assert.Empty(t, data.BatchData(BatchTypePrivate).Blob.Public) +} + +func TestDataArryToRefs(t *testing.T) { + data1 := &Data{ + ID: NewUUID(), + Validator: ValidatorTypeJSON, + Namespace: "ns1", + Hash: NewRandB32(), + Created: Now(), + ValueSize: 12345, + } + data2 := &Data{ + ID: NewUUID(), + Validator: ValidatorTypeJSON, + Namespace: "ns1", + Hash: NewRandB32(), + Created: Now(), + ValueSize: 23456, + } + + da := DataArray{data1, data2} + assert.Equal(t, da.Refs(), DataRefs{ + {ID: data1.ID, Hash: data1.Hash, ValueSize: 12345}, + {ID: data2.ID, Hash: data2.Hash, ValueSize: 23456}, + }) + +} diff --git a/pkg/fftypes/datatype.go b/pkg/fftypes/datatype.go index 60da473f09..d44bca7393 100644 --- a/pkg/fftypes/datatype.go +++ b/pkg/fftypes/datatype.go @@ -26,11 +26,11 @@ type ValidatorType = FFEnum var ( // ValidatorTypeJSON is the validator type for JSON Schema validation - ValidatorTypeJSON ValidatorType = ffEnum("validatortype", "json") + ValidatorTypeJSON = ffEnum("validatortype", "json") // ValidatorTypeNone explicitly disables validation, even when a datatype is set. Allowing categorization of datatype without validation. - ValidatorTypeNone ValidatorType = ffEnum("validatortype", "none") + ValidatorTypeNone = ffEnum("validatortype", "none") // ValidatorTypeSystemDefinition is the validator type for system definitions - ValidatorTypeSystemDefinition ValidatorType = ffEnum("validatortype", "definition") + ValidatorTypeSystemDefinition = ffEnum("validatortype", "definition") ) // Datatype is the structure defining a data definition, such as a JSON schema @@ -75,7 +75,7 @@ func (dt *Datatype) Validate(ctx context.Context, existing bool) (err error) { } func (dt *Datatype) Topic() string { - return namespaceTopic(dt.Namespace) + return typeNamespaceNameTopicHash("datatype", dt.Namespace, dt.Name) } func (dt *Datatype) SetBroadcastMessage(msgID *UUID) { diff --git a/pkg/fftypes/datatype_test.go b/pkg/fftypes/datatype_test.go index f325fcc8fd..db162b2760 100644 --- a/pkg/fftypes/datatype_test.go +++ b/pkg/fftypes/datatype_test.go @@ -75,7 +75,7 @@ func TestDatatypeValidation(t *testing.T) { assert.Regexp(t, "FF10201", dt.Validate(context.Background(), true)) var def Definition = dt - assert.Equal(t, "ff_ns_ok", def.Topic()) + assert.Equal(t, "8e23c0a7fa9ec15c68a662e0e502933facb3d249409efa2b4f89d479b9f990cb", def.Topic()) def.SetBroadcastMessage(NewUUID()) assert.NotNil(t, dt.Message) } diff --git a/pkg/fftypes/event.go b/pkg/fftypes/event.go index 560a519182..f6b8d63980 100644 --- a/pkg/fftypes/event.go +++ b/pkg/fftypes/event.go @@ -21,36 +21,36 @@ type EventType = FFEnum var ( // EventTypeTransactionSubmitted occurs only on the node that initiates a tranaction, when the transaction is submitted - EventTypeTransactionSubmitted EventType = ffEnum("eventtype", "transaction_submitted") + EventTypeTransactionSubmitted = ffEnum("eventtype", "transaction_submitted") // EventTypeMessageConfirmed is the most important event type in the system. This means a message and all of its data // is available for processing by an application. Most applications only need to listen to this event type - EventTypeMessageConfirmed EventType = ffEnum("eventtype", "message_confirmed") + EventTypeMessageConfirmed = ffEnum("eventtype", "message_confirmed") // EventTypeMessageRejected occurs if a message is received and confirmed from a sequencing perspective, but is rejected as invalid (mismatch to schema, or duplicate system broadcast) - EventTypeMessageRejected EventType = ffEnum("eventtype", "message_rejected") + EventTypeMessageRejected = ffEnum("eventtype", "message_rejected") // EventTypeNamespaceConfirmed occurs when a new namespace is ready for use (on the namespace itself) - EventTypeNamespaceConfirmed EventType = ffEnum("eventtype", "namespace_confirmed") + EventTypeNamespaceConfirmed = ffEnum("eventtype", "namespace_confirmed") // EventTypeDatatypeConfirmed occurs when a new datatype is ready for use (on the namespace of the datatype) - EventTypeDatatypeConfirmed EventType = ffEnum("eventtype", "datatype_confirmed") + EventTypeDatatypeConfirmed = ffEnum("eventtype", "datatype_confirmed") // EventTypeIdentityConfirmed occurs when a new identity has been confirmed, as as result of a signed claim broadcast, and any associated claim verification - EventTypeIdentityConfirmed EventType = ffEnum("eventtype", "identity_confirmed") + EventTypeIdentityConfirmed = ffEnum("eventtype", "identity_confirmed") // EventTypeIdentityUpdated occurs when an existing identity is update by the owner of that identity - EventTypeIdentityUpdated EventType = ffEnum("eventtype", "identity_updated") + EventTypeIdentityUpdated = ffEnum("eventtype", "identity_updated") // EventTypePoolConfirmed occurs when a new token pool is ready for use - EventTypePoolConfirmed EventType = ffEnum("eventtype", "token_pool_confirmed") + EventTypePoolConfirmed = ffEnum("eventtype", "token_pool_confirmed") // EventTypeTransferConfirmed occurs when a token transfer has been confirmed - EventTypeTransferConfirmed EventType = ffEnum("eventtype", "token_transfer_confirmed") + EventTypeTransferConfirmed = ffEnum("eventtype", "token_transfer_confirmed") // EventTypeTransferOpFailed occurs when a token transfer submitted by this node has failed (based on feedback from connector) - EventTypeTransferOpFailed EventType = ffEnum("eventtype", "token_transfer_op_failed") + EventTypeTransferOpFailed = ffEnum("eventtype", "token_transfer_op_failed") // EventTypeApprovalConfirmed occurs when a token approval has been confirmed - EventTypeApprovalConfirmed EventType = ffEnum("eventtype", "token_approval_confirmed") + EventTypeApprovalConfirmed = ffEnum("eventtype", "token_approval_confirmed") // EventTypeApprovalOpFailed occurs when a token approval submitted by this node has failed (based on feedback from connector) - EventTypeApprovalOpFailed EventType = ffEnum("eventtype", "token_approval_op_failed") + EventTypeApprovalOpFailed = ffEnum("eventtype", "token_approval_op_failed") // EventTypeContractInterfaceConfirmed occurs when a new contract interface has been confirmed - EventTypeContractInterfaceConfirmed EventType = ffEnum("eventtype", "contract_interface_confirmed") + EventTypeContractInterfaceConfirmed = ffEnum("eventtype", "contract_interface_confirmed") // EventTypeContractAPIConfirmed occurs when a new contract API has been confirmed - EventTypeContractAPIConfirmed EventType = ffEnum("eventtype", "contract_api_confirmed") + EventTypeContractAPIConfirmed = ffEnum("eventtype", "contract_api_confirmed") // EventTypeBlockchainEventReceived occurs when a new event has been received from the blockchain - EventTypeBlockchainEventReceived EventType = ffEnum("eventtype", "blockchain_event_received") + EventTypeBlockchainEventReceived = ffEnum("eventtype", "blockchain_event_received") ) // Event is an activity in the system, delivered reliably to applications, that indicates something has happened in the network @@ -62,15 +62,23 @@ type Event struct { Reference *UUID `json:"reference"` Correlator *UUID `json:"correlator,omitempty"` Transaction *UUID `json:"tx,omitempty"` + Topic string `json:"topic,omitempty"` Created *FFTime `json:"created"` } +// EnrichedEvent adds the referred object to an event +type EnrichedEvent struct { + Event + Message *Message `json:"message,omitempty"` + Transaction *Transaction `json:"transaction,omitempty"` + BlockchainEvent *BlockchainEvent `json:"blockchainevent,omitempty"` +} + // EventDelivery adds the referred object to an event, as well as details of the subscription that caused the event to // be dispatched to an application. type EventDelivery struct { - Event + EnrichedEvent Subscription SubscriptionRef `json:"subscription"` - Message *Message `json:"message,omitempty"` } // EventDeliveryResponse is the payload an application sends back, to confirm it has accepted (or rejected) the event and as such @@ -83,13 +91,14 @@ type EventDeliveryResponse struct { Reply *MessageInOut `json:"reply,omitempty"` } -func NewEvent(t EventType, ns string, ref *UUID, tx *UUID) *Event { +func NewEvent(t EventType, ns string, ref *UUID, tx *UUID, topic string) *Event { return &Event{ ID: NewUUID(), Type: t, Namespace: ns, Reference: ref, Transaction: tx, + Topic: topic, Created: Now(), } } diff --git a/pkg/fftypes/event_test.go b/pkg/fftypes/event_test.go index 5bcbe5bde3..96c0db39f1 100644 --- a/pkg/fftypes/event_test.go +++ b/pkg/fftypes/event_test.go @@ -26,11 +26,12 @@ func TestNewEvent(t *testing.T) { ref := NewUUID() tx := NewUUID() - e := NewEvent(EventTypeMessageConfirmed, "ns1", ref, tx) + e := NewEvent(EventTypeMessageConfirmed, "ns1", ref, tx, "topic1") assert.Equal(t, EventTypeMessageConfirmed, e.Type) assert.Equal(t, "ns1", e.Namespace) assert.Equal(t, *ref, *e.Reference) assert.Equal(t, *tx, *e.Transaction) + assert.Equal(t, "topic1", e.Topic) e.Sequence = 12345 var ls LocallySequenced = e diff --git a/pkg/fftypes/ffi.go b/pkg/fftypes/ffi.go index 81357d2a50..4f8aacffea 100644 --- a/pkg/fftypes/ffi.go +++ b/pkg/fftypes/ffi.go @@ -102,7 +102,7 @@ func (f *FFI) Validate(ctx context.Context, existing bool) (err error) { } func (f *FFI) Topic() string { - return namespaceTopic(f.Namespace) + return typeNamespaceNameTopicHash("ffi", f.Namespace, f.Name) } func (f *FFI) SetBroadcastMessage(msgID *UUID) { diff --git a/pkg/fftypes/ffi_test.go b/pkg/fftypes/ffi_test.go index 27a8bd91f1..2d9f2de16f 100644 --- a/pkg/fftypes/ffi_test.go +++ b/pkg/fftypes/ffi_test.go @@ -138,7 +138,7 @@ func TestFFITopic(t *testing.T) { ffi := &FFI{ Namespace: "ns1", } - assert.Equal(t, "ff_ns_ns1", ffi.Topic()) + assert.Equal(t, "01a982a7251400a7ec64fccce6febee3942a56e37967fa2ba26d7d6f43523c82", ffi.Topic()) } func TestFFISetBroadCastMessage(t *testing.T) { diff --git a/pkg/fftypes/manifest.go b/pkg/fftypes/id_and_sequence.go similarity index 70% rename from pkg/fftypes/manifest.go rename to pkg/fftypes/id_and_sequence.go index 8410c5b0c8..f6ceb589ff 100644 --- a/pkg/fftypes/manifest.go +++ b/pkg/fftypes/id_and_sequence.go @@ -16,15 +16,8 @@ package fftypes -import "encoding/json" - -// Manifest is a list of references to messages and data -type Manifest struct { - Messages []MessageRef `json:"messages"` - Data []DataRef `json:"data"` -} - -func (mf *Manifest) String() string { - b, _ := json.Marshal(&mf) - return string(b) +// IDAndSequence is a combination of a UUID and a stored sequence +type IDAndSequence struct { + ID UUID + Sequence int64 } diff --git a/pkg/fftypes/identity.go b/pkg/fftypes/identity.go index 592b70e879..e0f17d164d 100644 --- a/pkg/fftypes/identity.go +++ b/pkg/fftypes/identity.go @@ -30,11 +30,11 @@ type IdentityType = FFEnum var ( // IdentityTypeOrg is an organization - IdentityTypeOrg IdentityType = ffEnum("identitytype", "org") + IdentityTypeOrg = ffEnum("identitytype", "org") // IdentityTypeNode is a node - IdentityTypeNode IdentityType = ffEnum("identitytype", "node") + IdentityTypeNode = ffEnum("identitytype", "node") // IdentityTypeCustom is a user defined identity within a namespace - IdentityTypeCustom IdentityType = ffEnum("identitytype", "custom") + IdentityTypeCustom = ffEnum("identitytype", "custom") ) const ( diff --git a/pkg/fftypes/jsonany.go b/pkg/fftypes/jsonany.go index f8137f0848..cd895e4520 100644 --- a/pkg/fftypes/jsonany.go +++ b/pkg/fftypes/jsonany.go @@ -71,6 +71,13 @@ func (h JSONAny) MarshalJSON() ([]byte, error) { return []byte(h), nil } +func (h *JSONAny) Unmarshal(ctx context.Context, v interface{}) error { + if h == nil { + return i18n.NewError(ctx, i18n.MsgNilOrNullObject) + } + return json.Unmarshal([]byte(*h), v) +} + func (h *JSONAny) Hash() *Bytes32 { if h == nil { return nil diff --git a/pkg/fftypes/jsonany_test.go b/pkg/fftypes/jsonany_test.go index 8faf3e482f..a55b69a9d8 100644 --- a/pkg/fftypes/jsonany_test.go +++ b/pkg/fftypes/jsonany_test.go @@ -17,6 +17,7 @@ package fftypes import ( + "context" "encoding/json" "testing" @@ -152,3 +153,18 @@ func TestValue(t *testing.T) { assert.Equal(t, "{}", v) } + +func TestUnmarshal(t *testing.T) { + + var h *JSONAny + var myObj struct { + Key1 string `json:"key1"` + } + err := h.Unmarshal(context.Background(), &myObj) + assert.Regexp(t, "FF10368", err) + + h = JSONAnyPtr(`{"key1":"value1"}`) + err = h.Unmarshal(context.Background(), &myObj) + assert.NoError(t, err) + assert.Equal(t, "value1", myObj.Key1) +} diff --git a/pkg/fftypes/manifest_test.go b/pkg/fftypes/manifest_test.go deleted file mode 100644 index 6f8df08127..0000000000 --- a/pkg/fftypes/manifest_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright © 2021 Kaleido, Inc. -// -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fftypes - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestManifestToString(t *testing.T) { - - batch := &Batch{ - Payload: BatchPayload{ - Messages: []*Message{ - {Header: MessageHeader{ID: MustParseUUID("c38e76ec-92a6-4659-805d-8ae3b7437c40")}, Hash: MustParseBytes32("169ef5233cf44df3d71df59f25928743e9a76378bb1375e06539b732b1fc57e5")}, - }, - Data: []*Data{ - {ID: MustParseUUID("7bc49647-cd1c-4633-98fa-ddbb208d61bd"), Hash: MustParseBytes32("2b849d47e44a291cd83bee4e7ace66178a5245a151d3bbd02011312ec2604ed6")}, - {ID: MustParseUUID("5b80eec3-04b5-4557-bced-6a458ecb9ef2"), Hash: MustParseBytes32("2bcddd992d17e89a5aafbe99c59d954018ddadf4e533a164808ae2389bbf33dc")}, - }, - }, - } - assert.Equal(t, "{\"messages\":[{\"id\":\"c38e76ec-92a6-4659-805d-8ae3b7437c40\",\"hash\":\"169ef5233cf44df3d71df59f25928743e9a76378bb1375e06539b732b1fc57e5\"}],\"data\":[{\"id\":\"7bc49647-cd1c-4633-98fa-ddbb208d61bd\",\"hash\":\"2b849d47e44a291cd83bee4e7ace66178a5245a151d3bbd02011312ec2604ed6\"},{\"id\":\"5b80eec3-04b5-4557-bced-6a458ecb9ef2\",\"hash\":\"2bcddd992d17e89a5aafbe99c59d954018ddadf4e533a164808ae2389bbf33dc\"}]}", batch.Manifest().String()) -} diff --git a/pkg/fftypes/message.go b/pkg/fftypes/message.go index 50d00034a1..f749b05739 100644 --- a/pkg/fftypes/message.go +++ b/pkg/fftypes/message.go @@ -34,17 +34,17 @@ type MessageType = FFEnum var ( // MessageTypeDefinition is a message broadcasting a definition of a system type, pre-defined by firefly (namespaces, identities, data definitions, etc.) - MessageTypeDefinition MessageType = ffEnum("messagetype", "definition") + MessageTypeDefinition = ffEnum("messagetype", "definition") // MessageTypeBroadcast is a broadcast message, meaning it is intended to be visible by all parties in the network - MessageTypeBroadcast MessageType = ffEnum("messagetype", "broadcast") + MessageTypeBroadcast = ffEnum("messagetype", "broadcast") // MessageTypePrivate is a private message, meaning it is only sent explicitly to individual parties in the network - MessageTypePrivate MessageType = ffEnum("messagetype", "private") + MessageTypePrivate = ffEnum("messagetype", "private") // MessageTypeGroupInit is a special private message that contains the definition of the group - MessageTypeGroupInit MessageType = ffEnum("messagetype", "groupinit") + MessageTypeGroupInit = ffEnum("messagetype", "groupinit") // MessageTypeTransferBroadcast is a broadcast message to accompany/annotate a token transfer - MessageTypeTransferBroadcast MessageType = ffEnum("messagetype", "transfer_broadcast") + MessageTypeTransferBroadcast = ffEnum("messagetype", "transfer_broadcast") // MessageTypeTransferPrivate is a private message to accompany/annotate a token transfer - MessageTypeTransferPrivate MessageType = ffEnum("messagetype", "transfer_private") + MessageTypeTransferPrivate = ffEnum("messagetype", "transfer_private") ) // MessageState is the current transmission/confirmation state of a message @@ -52,17 +52,17 @@ type MessageState = FFEnum var ( // MessageStateStaged is a message created locally which is not ready to send - MessageStateStaged MessageState = ffEnum("messagestate", "staged") + MessageStateStaged = ffEnum("messagestate", "staged") // MessageStateReady is a message created locally which is ready to send - MessageStateReady MessageState = ffEnum("messagestate", "ready") + MessageStateReady = ffEnum("messagestate", "ready") // MessageStateSent is a message created locally which has been sent in a batch - MessageStateSent MessageState = ffEnum("messagestate", "sent") + MessageStateSent = ffEnum("messagestate", "sent") // MessageStatePending is a message that has been received but is awaiting aggregation/confirmation - MessageStatePending MessageState = ffEnum("messagestate", "pending") + MessageStatePending = ffEnum("messagestate", "pending") // MessageStateConfirmed is a message that has completed all required confirmations (blockchain if pinned, token transfer if transfer coupled, etc) - MessageStateConfirmed MessageState = ffEnum("messagestate", "confirmed") + MessageStateConfirmed = ffEnum("messagestate", "confirmed") // MessageStateRejected is a message that has completed confirmation, but has been rejected by FireFly - MessageStateRejected MessageState = ffEnum("messagestate", "rejected") + MessageStateRejected = ffEnum("messagestate", "rejected") ) // MessageHeader contains all fields that contribute to the hash @@ -95,6 +95,18 @@ type Message struct { Sequence int64 `json:"-"` // Local database sequence used internally for batch assembly } +// BatchMessage is the fields in a message record that are assured to be consistent on all parties. +// This is what is transferred and hashed in a batch payload between nodes. +func (m *Message) BatchMessage() *Message { + return &Message{ + Header: m.Header, + Hash: m.Hash, + Data: m.Data, + // The pins are immutable once assigned by the sender, which happens before the batch is sealed + Pins: m.Pins, + } +} + // MessageInOut allows API users to submit values in-line in the payload submitted, which // will be broken out and stored separately during the call. type MessageInOut struct { @@ -170,14 +182,6 @@ func (m *Message) Seal(ctx context.Context) (err error) { if len(m.Header.Topics) == 0 { m.Header.Topics = []string{DefaultTopic} } - if err := m.Header.Topics.Validate(ctx, "header.topics", true); err != nil { - return err - } - if m.Header.Tag != "" { - if err := ValidateFFNameField(ctx, m.Header.Tag, "header.tag"); err != nil { - return err - } - } if m.Header.ID == nil { m.Header.ID = NewUUID() } @@ -188,7 +192,10 @@ func (m *Message) Seal(ctx context.Context) (err error) { if m.Data == nil { m.Data = DataRefs{} } - err = m.DupDataCheck(ctx) + if m.Header.TxType == "" { + m.Header.TxType = TransactionTypeBatchPin + } + err = m.VerifyFields(ctx) if err == nil { m.Header.DataHash = m.Data.Hash() m.Hash = m.Header.Hash() @@ -211,14 +218,14 @@ func (m *Message) DupDataCheck(ctx context.Context) (err error) { return nil } -func (m *Message) Verify(ctx context.Context) error { +func (m *Message) VerifyFields(ctx context.Context) error { switch m.Header.TxType { case TransactionTypeBatchPin: case TransactionTypeUnpinned: default: return i18n.NewError(ctx, i18n.MsgInvalidTXTypeForMessage, m.Header.TxType) } - if err := m.Header.Topics.Validate(ctx, "header.topics", true); err != nil { + if err := m.Header.Topics.Validate(ctx, "header.topics", true, 10 /* Pins need 96 chars each*/); err != nil { return err } if m.Header.Tag != "" { @@ -226,7 +233,11 @@ func (m *Message) Verify(ctx context.Context) error { return err } } - err := m.DupDataCheck(ctx) + return m.DupDataCheck(ctx) +} + +func (m *Message) Verify(ctx context.Context) error { + err := m.VerifyFields(ctx) if err != nil { return err } diff --git a/pkg/fftypes/message_test.go b/pkg/fftypes/message_test.go index 24cda7e7b2..6c7dc489e3 100644 --- a/pkg/fftypes/message_test.go +++ b/pkg/fftypes/message_test.go @@ -85,8 +85,6 @@ func TestVerifyTXType(t *testing.T) { msg.Header.TxType = TransactionTypeTokenPool err = msg.Seal(context.Background()) - assert.NoError(t, err) - err = msg.Verify(context.Background()) assert.Regexp(t, "FF10343", err) } @@ -260,3 +258,20 @@ func TestSetInlineData(t *testing.T) { assert.NoError(t, err) assert.Regexp(t, "some data", string(b)) } + +func TestMessageImmutable(t *testing.T) { + msg := &Message{ + Header: MessageHeader{ + ID: NewUUID(), + }, + BatchID: NewUUID(), + Hash: NewRandB32(), + State: MessageStateConfirmed, + Confirmed: Now(), + Data: DataRefs{ + {ID: NewUUID(), Hash: NewRandB32()}, + }, + Pins: NewFFStringArray("pin1", "pin2"), + } + assert.True(t, msg.Hash.Equals(msg.BatchMessage().Hash)) +} diff --git a/pkg/fftypes/namespace.go b/pkg/fftypes/namespace.go index 338a86780a..a4086697fd 100644 --- a/pkg/fftypes/namespace.go +++ b/pkg/fftypes/namespace.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -18,7 +18,7 @@ package fftypes import ( "context" - "fmt" + "crypto/sha256" "github.com/hyperledger/firefly/internal/i18n" ) @@ -28,11 +28,11 @@ type NamespaceType = FFEnum var ( // NamespaceTypeLocal is a namespace that only exists because it was defined in the local configuration of the node - NamespaceTypeLocal NamespaceType = ffEnum("namespacetype", "local") + NamespaceTypeLocal = ffEnum("namespacetype", "local") // NamespaceTypeBroadcast is a namespace that was broadcast through the network. Broadcast namespaces can overwrite a local namespace - NamespaceTypeBroadcast NamespaceType = ffEnum("namespacetype", "broadcast") + NamespaceTypeBroadcast = ffEnum("namespacetype", "broadcast") // NamespaceTypeSystem is a reserved namespace used by FireFly itself - NamespaceTypeSystem NamespaceType = ffEnum("namespacetype", "system") + NamespaceTypeSystem = ffEnum("namespacetype", "system") ) // Namespace is a isolate set of named resources, to allow multiple applications to co-exist in the same network, with the same named objects. @@ -61,12 +61,18 @@ func (ns *Namespace) Validate(ctx context.Context, existing bool) (err error) { return nil } -func namespaceTopic(ns string) string { - return fmt.Sprintf("ff_ns_%s", ns) +func typeNamespaceNameTopicHash(objType string, ns string, name string) string { + // Topic generation function for ordering anything with a type, namespace and name. + // Means all messages racing for this name will be consistently ordered by all parties. + h := sha256.New() + h.Write([]byte(objType)) + h.Write([]byte(ns)) + h.Write([]byte(name)) + return HashResult(h).String() } func (ns *Namespace) Topic() string { - return namespaceTopic(ns.Name) + return typeNamespaceNameTopicHash("namespace", ns.Name, "") } func (ns *Namespace) SetBroadcastMessage(msgID *UUID) { diff --git a/pkg/fftypes/namespace_test.go b/pkg/fftypes/namespace_test.go index f8ab69fdc6..eb6815fad4 100644 --- a/pkg/fftypes/namespace_test.go +++ b/pkg/fftypes/namespace_test.go @@ -45,7 +45,7 @@ func TestNamespaceValidation(t *testing.T) { assert.Regexp(t, "FF10203", ns.Validate(context.Background(), true)) var nsDef Definition = ns - assert.Equal(t, "ff_ns_ok", nsDef.Topic()) + assert.Equal(t, "358de1708c312f6b9eb4c44e0d9811c6f69bf389871d38dd7501992b2c00b557", nsDef.Topic()) nsDef.SetBroadcastMessage(NewUUID()) assert.NotNil(t, ns.Message) diff --git a/pkg/fftypes/offset.go b/pkg/fftypes/offset.go index 1bbc8afb91..246f9ee9c4 100644 --- a/pkg/fftypes/offset.go +++ b/pkg/fftypes/offset.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -20,11 +20,11 @@ type OffsetType = FFEnum var ( // OffsetTypeBatch is an offset stored by the batch manager on the messages table - OffsetTypeBatch OffsetType = ffEnum("offsettype", "batch") + OffsetTypeBatch = ffEnum("offsettype", "batch") // OffsetTypeAggregator is an offset stored by the aggregator on the events table - OffsetTypeAggregator OffsetType = ffEnum("offsettype", "aggregator") + OffsetTypeAggregator = ffEnum("offsettype", "aggregator") // OffsetTypeSubscription is an offeset stored by a dispatcher on the events table - OffsetTypeSubscription OffsetType = ffEnum("offsettype", "subscription") + OffsetTypeSubscription = ffEnum("offsettype", "subscription") ) // Offset is a simple stored data structure that records a sequence position within another collection diff --git a/pkg/fftypes/operation.go b/pkg/fftypes/operation.go index af611aa11d..9d1fa290cf 100644 --- a/pkg/fftypes/operation.go +++ b/pkg/fftypes/operation.go @@ -22,24 +22,30 @@ package fftypes type OpType = FFEnum var ( - // OpTypeBlockchainBatchPin is a blockchain transaction to pin a batch - OpTypeBlockchainBatchPin OpType = ffEnum("optype", "blockchain_batch_pin") + // OpTypeBlockchainPinBatch is a blockchain transaction to pin a batch + OpTypeBlockchainPinBatch = ffEnum("optype", "blockchain_pin_batch") // OpTypeBlockchainInvoke is a smart contract invoke - OpTypeBlockchainInvoke OpType = ffEnum("optype", "blockchain_invoke") - // OpTypeSharedStorageBatchBroadcast is a shared storage operation to store broadcast data - OpTypeSharedStorageBatchBroadcast OpType = ffEnum("optype", "sharedstorage_batch_broadcast") - // OpTypeDataExchangeBatchSend is a private send - OpTypeDataExchangeBatchSend OpType = ffEnum("optype", "dataexchange_batch_send") - // OpTypeDataExchangeBlobSend is a private send - OpTypeDataExchangeBlobSend OpType = ffEnum("optype", "dataexchange_blob_send") + OpTypeBlockchainInvoke = ffEnum("optype", "blockchain_invoke") + // OpTypeSharedStorageUploadBatch is a shared storage operation to upload broadcast data + OpTypeSharedStorageUploadBatch = ffEnum("optype", "sharedstorage_upload_batch") + // OpTypeSharedStorageUploadBlob is a shared storage operation to upload blob data + OpTypeSharedStorageUploadBlob = ffEnum("optype", "sharedstorage_upload_blob") + // OpTypeSharedStorageDownloadBatch is a shared storage operation to download broadcast data + OpTypeSharedStorageDownloadBatch = ffEnum("optype", "sharedstorage_download_batch") + // OpTypeSharedStorageDownloadBlob is a shared storage operation to download broadcast data + OpTypeSharedStorageDownloadBlob = ffEnum("optype", "sharedstorage_download_blob") + // OpTypeDataExchangeSendBatch is a private send of a batch + OpTypeDataExchangeSendBatch = ffEnum("optype", "dataexchange_send_batch") + // OpTypeDataExchangeSendBlob is a private send of a blob + OpTypeDataExchangeSendBlob = ffEnum("optype", "dataexchange_send_blob") // OpTypeTokenCreatePool is a token pool creation - OpTypeTokenCreatePool OpType = ffEnum("optype", "token_create_pool") + OpTypeTokenCreatePool = ffEnum("optype", "token_create_pool") // OpTypeTokenActivatePool is a token pool activation - OpTypeTokenActivatePool OpType = ffEnum("optype", "token_activate_pool") + OpTypeTokenActivatePool = ffEnum("optype", "token_activate_pool") // OpTypeTokenTransfer is a token transfer - OpTypeTokenTransfer OpType = ffEnum("optype", "token_transfer") + OpTypeTokenTransfer = ffEnum("optype", "token_transfer") // OpTypeTokenApproval is a token approval - OpTypeTokenApproval OpType = ffEnum("optype", "token_approval") + OpTypeTokenApproval = ffEnum("optype", "token_approval") ) // OpStatus is the current status of an operation @@ -48,7 +54,7 @@ type OpStatus string const ( // OpStatusPending indicates the operation has been submitted, but is not yet confirmed as successful or failed OpStatusPending OpStatus = "Pending" - // OpStatusSucceeded the infrastructure runtime has returned success for the operation. + // OpStatusSucceeded the infrastructure runtime has returned success for the operation OpStatusSucceeded OpStatus = "Succeeded" // OpStatusFailed happens when an error is reported by the infrastructure runtime OpStatusFailed OpStatus = "Failed" @@ -86,4 +92,15 @@ type Operation struct { Output JSONObject `json:"output,omitempty"` Created *FFTime `json:"created,omitempty"` Updated *FFTime `json:"updated,omitempty"` + Retry *UUID `json:"retry,omitempty"` +} + +// PreparedOperation is an operation that has gathered all the raw data ready to send to a plugin +// It is never stored, but it should always be possible for the owning Manager to generate a +// PreparedOperation from an Operation. Data is defined by the Manager, but should be JSON-serializable +// to support inspection and debugging. +type PreparedOperation struct { + ID *UUID `json:"id"` + Type OpType `json:"type" ffenum:"optype"` + Data interface{} `json:"data"` } diff --git a/pkg/fftypes/operation_test.go b/pkg/fftypes/operation_test.go index 135d0a0ae5..af56fd4c61 100644 --- a/pkg/fftypes/operation_test.go +++ b/pkg/fftypes/operation_test.go @@ -29,13 +29,13 @@ func (f *fakePlugin) Name() string { return "fake" } func TestNewPendingMessageOp(t *testing.T) { txID := NewUUID() - op := NewOperation(&fakePlugin{}, "ns1", txID, OpTypeSharedStorageBatchBroadcast) + op := NewOperation(&fakePlugin{}, "ns1", txID, OpTypeSharedStorageUploadBatch) assert.Equal(t, Operation{ ID: op.ID, Namespace: "ns1", Transaction: txID, Plugin: "fake", - Type: OpTypeSharedStorageBatchBroadcast, + Type: OpTypeSharedStorageUploadBatch, Status: OpStatusPending, Created: op.Created, Updated: op.Created, diff --git a/pkg/fftypes/pin.go b/pkg/fftypes/pin.go index f58a5f2475..9790406d17 100644 --- a/pkg/fftypes/pin.go +++ b/pkg/fftypes/pin.go @@ -37,11 +37,12 @@ package fftypes // This is because the sequence must be in the order the pins arrive. // type Pin struct { - Sequence int64 `json:"sequence,omitempty"` + Sequence int64 `json:"sequence"` Masked bool `json:"masked,omitempty"` Hash *Bytes32 `json:"hash,omitempty"` Batch *UUID `json:"batch,omitempty"` - Index int64 `json:"index,omitempty"` + BatchHash *Bytes32 `json:"batchHash,omitempty"` + Index int64 `json:"index"` Dispatched bool `json:"dispatched,omitempty"` Signer string `json:"signer,omitempty"` Created *FFTime `json:"created,omitempty"` diff --git a/pkg/fftypes/stringarray.go b/pkg/fftypes/stringarray.go index 242315feff..2761dcfbbf 100644 --- a/pkg/fftypes/stringarray.go +++ b/pkg/fftypes/stringarray.go @@ -87,7 +87,7 @@ func (sa FFStringArray) String() string { return strings.Join([]string(sa), ",") } -func (sa FFStringArray) Validate(ctx context.Context, fieldName string, isName bool) error { +func (sa FFStringArray) Validate(ctx context.Context, fieldName string, isName bool, maxItems int) error { var totalLength int dupCheck := make(map[string]bool) for i, n := range sa { @@ -106,7 +106,7 @@ func (sa FFStringArray) Validate(ctx context.Context, fieldName string, isName b } } } - if isName && len(sa) > FFStringNameItemsMax { + if maxItems > 0 && len(sa) > maxItems { return i18n.NewError(ctx, i18n.MsgTooManyItems, fieldName, FFStringNameItemsMax, len(sa)) } if totalLength > FFStringArrayStandardMax { diff --git a/pkg/fftypes/stringarray_test.go b/pkg/fftypes/stringarray_test.go index c8be55bdb1..9c5af55132 100644 --- a/pkg/fftypes/stringarray_test.go +++ b/pkg/fftypes/stringarray_test.go @@ -30,25 +30,25 @@ func TestFFStringArrayVerifyTooLong(t *testing.T) { for i := 0; i < 16; i++ { na[i] = fmt.Sprintf("item_%d", i) } - err := na.Validate(context.Background(), "field1", true) + err := na.Validate(context.Background(), "field1", true, FFStringNameItemsMax) assert.Regexp(t, `FF10227.*field1`, err) } func TestFFStringArrayVerifyDuplicate(t *testing.T) { na := FFStringArray{"value1", "value2", "value1"} - err := na.Validate(context.Background(), "field1", true) + err := na.Validate(context.Background(), "field1", true, FFStringNameItemsMax) assert.Regexp(t, `FF10228.*field1`, err) } func TestFFStringArrayVerifyBadName(t *testing.T) { na := FFStringArray{"!valid"} - err := na.Validate(context.Background(), "field1", true) + err := na.Validate(context.Background(), "field1", true, FFStringNameItemsMax) assert.Regexp(t, `FF10131.*field1\[0\]`, err) } func TestFFStringArrayVerifyBadNonName(t *testing.T) { na := FFStringArray{"!valid"} - err := na.Validate(context.Background(), "field1", false) + err := na.Validate(context.Background(), "field1", false, FFStringNameItemsMax) assert.Regexp(t, `FF10335.*field1\[0\]`, err) } @@ -58,7 +58,7 @@ func TestFFStringArrayVerifyTooLongTotal(t *testing.T) { longstr.WriteRune('a') } na := FFStringArray{longstr.String()} - err := na.Validate(context.Background(), "field1", false) + err := na.Validate(context.Background(), "field1", false, FFStringNameItemsMax) assert.Regexp(t, `FF10188.*field1`, err) } diff --git a/pkg/fftypes/subscription.go b/pkg/fftypes/subscription.go index 296d01d34d..72d0ae7d32 100644 --- a/pkg/fftypes/subscription.go +++ b/pkg/fftypes/subscription.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -20,19 +20,62 @@ import ( "context" "database/sql/driver" "encoding/json" + "net/url" "github.com/hyperledger/firefly/internal/i18n" ) // SubscriptionFilter contains regular expressions to match against events. All must match for an event to be dispatched to a subscription type SubscriptionFilter struct { - Events string `json:"events,omitempty"` - Topics string `json:"topics,omitempty"` + Events string `json:"events,omitempty"` + Message MessageFilter `json:"message,omitempty"` + Transaction TransactionFilter `json:"transaction,omitempty"` + BlockchainEvent BlockchainEventFilter `json:"blockchainevent,omitempty"` + Topic string `json:"topic,omitempty"` + DeprecatedTopics string `json:"topics,omitempty"` + DeprecatedTag string `json:"tag,omitempty"` + DeprecatedGroup string `json:"group,omitempty"` + DeprecatedAuthor string `json:"author,omitempty"` +} + +func NewSubscriptionFilterFromQuery(query url.Values) SubscriptionFilter { + return SubscriptionFilter{ + Events: query.Get("filter.events"), + Message: MessageFilter{ + Group: query.Get("filter.message.group"), + Tag: query.Get("filter.message.tag"), + Author: query.Get("filter.message.author"), + }, + BlockchainEvent: BlockchainEventFilter{ + Name: query.Get("filter.blockchain.name"), + Listener: query.Get("filter.blockchain.listener"), + }, + Transaction: TransactionFilter{ + Type: query.Get("filter.transaction.type"), + }, + Topic: query.Get("filter.topic"), + DeprecatedTag: query.Get("filter.tag"), + DeprecatedTopics: query.Get("filter.topics"), + DeprecatedGroup: query.Get("filter.group"), + DeprecatedAuthor: query.Get("filter.author"), + } +} + +type MessageFilter struct { Tag string `json:"tag,omitempty"` Group string `json:"group,omitempty"` Author string `json:"author,omitempty"` } +type TransactionFilter struct { + Type string `json:"type,omitempty"` +} + +type BlockchainEventFilter struct { + Name string `json:"name,omitempty"` + Listener string `json:"listener,omitempty"` +} + // SubOptsFirstEvent picks the first event that should be dispatched on the subscription, and can be a string containing an exact sequence as well as one of the enum values type SubOptsFirstEvent string @@ -134,3 +177,26 @@ func (so *SubscriptionOptions) Scan(src interface{}) error { func (so SubscriptionOptions) Value() (driver.Value, error) { return so.MarshalJSON() } + +func (sf SubscriptionFilter) Value() (driver.Value, error) { + return json.Marshal(&sf) +} + +func (sf *SubscriptionFilter) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case []byte: + return json.Unmarshal(src, &sf) + + case string: + if src == "" { + return nil + } + return json.Unmarshal([]byte(src), &sf) + + default: + return i18n.NewError(context.Background(), i18n.MsgScanFailed, src, sf) + } +} diff --git a/pkg/fftypes/subscription_test.go b/pkg/fftypes/subscription_test.go index f7eaa63a4d..604e4e3403 100644 --- a/pkg/fftypes/subscription_test.go +++ b/pkg/fftypes/subscription_test.go @@ -18,13 +18,13 @@ package fftypes import ( "encoding/json" + "net/url" "testing" "github.com/stretchr/testify/assert" ) func TestSubscriptionOptionsDatabaseSerialization(t *testing.T) { - firstEvent := SubOptsFirstEventNewest readAhead := uint16(50) yes := true @@ -36,6 +36,7 @@ func TestSubscriptionOptionsDatabaseSerialization(t *testing.T) { WithData: &yes, }, }, + Filter: SubscriptionFilter{}, } sub1.Options.TransportOptions()["my-nested-opts"] = map[string]interface{}{ "myopt1": 12345, @@ -47,9 +48,14 @@ func TestSubscriptionOptionsDatabaseSerialization(t *testing.T) { assert.NoError(t, err) assert.Equal(t, `{"firstEvent":"newest","my-nested-opts":{"myopt1":12345,"myopt2":"test"},"readAhead":50,"withData":true}`, string(b1.([]byte))) + f1, err := sub1.Filter.Value() + assert.NoError(t, err) + assert.Equal(t, `{"message":{},"transaction":{},"blockchainevent":{}}`, string(f1.([]byte))) + // Verify it restores ok sub2 := &Subscription{} err = sub2.Options.Scan(b1) + err = sub2.Filter.Scan(f1) assert.NoError(t, err) b2, err := sub1.Options.Value() assert.NoError(t, err) @@ -66,14 +72,26 @@ func TestSubscriptionOptionsDatabaseSerialization(t *testing.T) { assert.Equal(t, float64(12345), sub2.Options.TransportOptions().GetObject("my-nested-opts")["myopt1"]) assert.Equal(t, "test", sub2.Options.TransportOptions().GetObject("my-nested-opts")["myopt2"]) + // Verify it can scan nil + err = sub2.Filter.Scan(nil) + assert.NoError(t, err) + // Verify it can also scan as a string err = sub2.Options.Scan(string(b1.([]byte))) assert.NoError(t, err) + err = sub2.Filter.Scan(string(f1.([]byte))) + assert.NoError(t, err) + + err = sub2.Filter.Scan(string("")) + assert.NoError(t, err) + // Out of luck with anything else err = sub2.Options.Scan(false) assert.Regexp(t, "FF10125", err) + err = sub2.Filter.Scan(false) + assert.Regexp(t, "FF10125", err) } func TestSubscriptionUnMarshalFail(t *testing.T) { @@ -87,5 +105,25 @@ func TestSubscriptionUnMarshalFail(t *testing.T) { err = json.Unmarshal([]byte(`{"readAhead": "!a number"}`), &SubscriptionOptions{}) assert.Regexp(t, "readAhead", err) +} + +func TestNewSubscriptionFilterFromQuery(t *testing.T) { + query, _ := url.ParseQuery("filter.events=message_confirmed&filter.topic=topic1&filter.message.author=did:firefly:org/author1&filter.blockchain.name=flapflip&filter.transaction.type=test&filter.group=deprecated") + expectedFilter := SubscriptionFilter{ + Events: "message_confirmed", + Topic: "topic1", + Message: MessageFilter{ + Author: "did:firefly:org/author1", + }, + BlockchainEvent: BlockchainEventFilter{ + Name: "flapflip", + }, + Transaction: TransactionFilter{ + Type: "test", + }, + DeprecatedGroup: "deprecated", + } + filter := NewSubscriptionFilterFromQuery(query) + assert.Equal(t, expectedFilter, filter) } diff --git a/pkg/fftypes/tokenpool.go b/pkg/fftypes/tokenpool.go index c40f348733..954f8d0a6c 100644 --- a/pkg/fftypes/tokenpool.go +++ b/pkg/fftypes/tokenpool.go @@ -23,8 +23,8 @@ import ( type TokenType = FFEnum var ( - TokenTypeFungible TokenType = ffEnum("tokentype", "fungible") - TokenTypeNonFungible TokenType = ffEnum("tokentype", "nonfungible") + TokenTypeFungible = ffEnum("tokentype", "fungible") + TokenTypeNonFungible = ffEnum("tokentype", "nonfungible") ) // TokenPoolState is the current confirmation state of a token pool @@ -33,11 +33,11 @@ type TokenPoolState = FFEnum var ( // TokenPoolStateUnknown is a token pool that may not yet be activated // (should not be used in the code - only set via database migration for previously-created pools) - TokenPoolStateUnknown TokenPoolState = ffEnum("tokenpoolstate", "unknown") + TokenPoolStateUnknown = ffEnum("tokenpoolstate", "unknown") // TokenPoolStatePending is a token pool that has been announced but not yet confirmed - TokenPoolStatePending TokenPoolState = ffEnum("tokenpoolstate", "pending") + TokenPoolStatePending = ffEnum("tokenpoolstate", "pending") // TokenPoolStateConfirmed is a token pool that has been confirmed on chain - TokenPoolStateConfirmed TokenPoolState = ffEnum("tokenpoolstate", "confirmed") + TokenPoolStateConfirmed = ffEnum("tokenpoolstate", "confirmed") ) type TokenPool struct { @@ -54,6 +54,7 @@ type TokenPool struct { State TokenPoolState `json:"state,omitempty" ffenum:"tokenpoolstate"` Created *FFTime `json:"created,omitempty"` Config JSONObject `json:"config,omitempty"` // for REST calls only (not stored) + Info JSONObject `json:"info,omitempty"` TX TransactionRef `json:"tx,omitempty"` } @@ -73,7 +74,7 @@ func (t *TokenPool) Validate(ctx context.Context) (err error) { } func (t *TokenPoolAnnouncement) Topic() string { - return namespaceTopic(t.Pool.Namespace) + return typeNamespaceNameTopicHash("tokenpool", t.Pool.Namespace, t.Pool.Name) } func (t *TokenPoolAnnouncement) SetBroadcastMessage(msgID *UUID) { diff --git a/pkg/fftypes/tokenpool_test.go b/pkg/fftypes/tokenpool_test.go index d700327265..fb37aaf1c1 100644 --- a/pkg/fftypes/tokenpool_test.go +++ b/pkg/fftypes/tokenpool_test.go @@ -52,7 +52,7 @@ func TestTokenPoolDefinition(t *testing.T) { Name: "ok", } var def Definition = &TokenPoolAnnouncement{Pool: pool} - assert.Equal(t, "ff_ns_ok", def.Topic()) + assert.Equal(t, "73008386c5579b7015385528eb892f7773e13a20015c692f6b90b26e413fe8a4", def.Topic()) id := NewUUID() def.SetBroadcastMessage(id) diff --git a/pkg/fftypes/tokentransfer.go b/pkg/fftypes/tokentransfer.go index 5e3bd75bc9..5cd8fe7b3f 100644 --- a/pkg/fftypes/tokentransfer.go +++ b/pkg/fftypes/tokentransfer.go @@ -19,9 +19,9 @@ package fftypes type TokenTransferType = FFEnum var ( - TokenTransferTypeMint TokenType = ffEnum("tokentransfertype", "mint") - TokenTransferTypeBurn TokenType = ffEnum("tokentransfertype", "burn") - TokenTransferTypeTransfer TokenType = ffEnum("tokentransfertype", "transfer") + TokenTransferTypeMint = ffEnum("tokentransfertype", "mint") + TokenTransferTypeBurn = ffEnum("tokentransfertype", "burn") + TokenTransferTypeTransfer = ffEnum("tokentransfertype", "transfer") ) type TokenTransfer struct { diff --git a/pkg/fftypes/transaction.go b/pkg/fftypes/transaction.go index 0489766426..abbb5e4d25 100644 --- a/pkg/fftypes/transaction.go +++ b/pkg/fftypes/transaction.go @@ -18,21 +18,23 @@ package fftypes type TransactionType = FFEnum +const transactionBaseSizeEstimate = int64(256) + var ( // TransactionTypeNone deprecated - replaced by TransactionTypeUnpinned - TransactionTypeNone TransactionType = ffEnum("txtype", "none") + TransactionTypeNone = ffEnum("txtype", "none") // TransactionTypeUnpinned indicates the message will be sent without pinning any evidence to the blockchain. Not supported for broadcast. The FireFly transaction will be used to track the sends to all group members. - TransactionTypeUnpinned TransactionType = ffEnum("txtype", "unpinned") + TransactionTypeUnpinned = ffEnum("txtype", "unpinned") // TransactionTypeBatchPin represents a pinning transaction, that verifies the originator of the data, and sequences the event deterministically between parties - TransactionTypeBatchPin TransactionType = ffEnum("txtype", "batch_pin") + TransactionTypeBatchPin = ffEnum("txtype", "batch_pin") // TransactionTypeTokenPool represents a token pool creation - TransactionTypeTokenPool TransactionType = ffEnum("txtype", "token_pool") + TransactionTypeTokenPool = ffEnum("txtype", "token_pool") // TransactionTypeTokenTransfer represents a token transfer - TransactionTypeTokenTransfer TransactionType = ffEnum("txtype", "token_transfer") + TransactionTypeTokenTransfer = ffEnum("txtype", "token_transfer") // TransactionTypeContractInvoke is a smart contract invoke - TransactionTypeContractInvoke OpType = ffEnum("txtype", "contract_invoke") + TransactionTypeContractInvoke = ffEnum("txtype", "contract_invoke") // TransactionTypeTokenTransfer represents a token approval - TransactionTypeTokenApproval TransactionType = ffEnum("txtype", "token_approval") + TransactionTypeTokenApproval = ffEnum("txtype", "token_approval") ) // TransactionRef refers to a transaction, in other types @@ -76,3 +78,7 @@ type TransactionStatus struct { Status OpStatus `json:"status"` Details []*TransactionStatusDetails `json:"details"` } + +func (tx *Transaction) Size() int64 { + return transactionBaseSizeEstimate // currently a static size assessment for caching +} diff --git a/pkg/fftypes/transaction_test.go b/pkg/fftypes/transaction_test.go new file mode 100644 index 0000000000..366f886c3c --- /dev/null +++ b/pkg/fftypes/transaction_test.go @@ -0,0 +1,27 @@ +// Copyright © 2021 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fftypes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTXSizeEstimate(t *testing.T) { + assert.Equal(t, transactionBaseSizeEstimate, (&Transaction{}).Size()) +} diff --git a/pkg/fftypes/transport_wrapper.go b/pkg/fftypes/transport_wrapper.go index 4823aedce2..363d809b13 100644 --- a/pkg/fftypes/transport_wrapper.go +++ b/pkg/fftypes/transport_wrapper.go @@ -19,8 +19,8 @@ package fftypes type TransportPayloadType = FFEnum var ( - TransportPayloadTypeMessage TransportPayloadType = ffEnum("transportpayload", "message") - TransportPayloadTypeBatch TransportPayloadType = ffEnum("transportpayload", "batch") + TransportPayloadTypeMessage = ffEnum("transportpayload", "message") + TransportPayloadTypeBatch = ffEnum("transportpayload", "batch") ) // TransportWrapper wraps paylaods over data exchange transfers, for easy deserialization at target diff --git a/pkg/fftypes/transport_wrapper_test.go b/pkg/fftypes/transport_wrapper_test.go index 80207d2e28..7d09d63281 100644 --- a/pkg/fftypes/transport_wrapper_test.go +++ b/pkg/fftypes/transport_wrapper_test.go @@ -38,7 +38,8 @@ func TestBatchManifest(t *testing.T) { }, }, } - tm := tw.Batch.Manifest() + bp, _ := tw.Batch.Confirmed() + tm := bp.GenManifest(tw.Batch.Payload.Messages, tw.Batch.Payload.Data) assert.Equal(t, 2, len(tm.Messages)) assert.Equal(t, tw.Batch.Payload.Messages[0].Header.ID.String(), tm.Messages[0].ID.String()) assert.Equal(t, tw.Batch.Payload.Messages[1].Header.ID.String(), tm.Messages[1].ID.String()) @@ -51,10 +52,3 @@ func TestBatchManifest(t *testing.T) { assert.Equal(t, tw.Batch.Payload.Data[1].Hash.String(), tm.Data[1].Hash.String()) } - -func TestNillBatchManifest(t *testing.T) { - - tw := TransportWrapper{} - assert.Nil(t, tw.Batch.Manifest()) - -} diff --git a/pkg/fftypes/verifier.go b/pkg/fftypes/verifier.go index ab320e013c..0397e59fcf 100644 --- a/pkg/fftypes/verifier.go +++ b/pkg/fftypes/verifier.go @@ -23,11 +23,11 @@ type VerifierType = FFEnum var ( // VerifierTypeEthAddress is an Ethereum (secp256k1) address string - VerifierTypeEthAddress VerifierType = ffEnum("verifiertype", "ethereum_address") + VerifierTypeEthAddress = ffEnum("verifiertype", "ethereum_address") // VerifierTypeMSPIdentity is the MSP id (X509 distinguished name) of an issued signing certificate / keypair - VerifierTypeMSPIdentity VerifierType = ffEnum("verifiertype", "fabric_msp_id") + VerifierTypeMSPIdentity = ffEnum("verifiertype", "fabric_msp_id") // VerifierTypeFFDXPeerID is the peer identifier that FireFly Data Exchange verifies (using plugin specific tech) when receiving data - VerifierTypeFFDXPeerID VerifierType = ffEnum("verifiertype", "dx_peer_id") + VerifierTypeFFDXPeerID = ffEnum("verifiertype", "dx_peer_id") ) // VerifierRef is just the type + value (public key identifier etc.) from the verifier diff --git a/pkg/fftypes/websocket_actions.go b/pkg/fftypes/websocket_actions.go index 1fa0a0fdd5..4591cd4c5d 100644 --- a/pkg/fftypes/websocket_actions.go +++ b/pkg/fftypes/websocket_actions.go @@ -1,4 +1,4 @@ -// Copyright © 2021 Kaleido, Inc. +// Copyright © 2022 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -21,15 +21,15 @@ type WSClientPayloadType = FFEnum var ( // WSClientActionStart is a request to the server to start delivering messages to the client - WSClientActionStart WSClientPayloadType = ffEnum("wstype", "start") + WSClientActionStart = ffEnum("wstype", "start") // WSClientActionAck acknowledges an event that was delivered, allowing further messages to be sent - WSClientActionAck WSClientPayloadType = ffEnum("wstype", "ack") + WSClientActionAck = ffEnum("wstype", "ack") // WSProtocolErrorEventType is a special event "type" field for server to send the client, if it performs a ProtocolError - WSProtocolErrorEventType WSClientPayloadType = ffEnum("wstype", "protocol_error") + WSProtocolErrorEventType = ffEnum("wstype", "protocol_error") // WSClientActionChangeNotifcation a special event type that is a local database change event, and never requires an ack - WSClientActionChangeNotifcation WSClientPayloadType = ffEnum("wstype", "change_notification") + WSClientActionChangeNotifcation = ffEnum("wstype", "change_notification") ) // WSClientActionBase is the base fields of all client actions sent on the websocket diff --git a/pkg/sharedstorage/plugin.go b/pkg/sharedstorage/plugin.go index d63cb0a350..67f9fc630e 100644 --- a/pkg/sharedstorage/plugin.go +++ b/pkg/sharedstorage/plugin.go @@ -38,11 +38,11 @@ type Plugin interface { // Capabilities returns capabilities - not called until after Init Capabilities() *Capabilities - // PublishData publishes data to the Shared Storage, and returns a payload reference ID - PublishData(ctx context.Context, data io.Reader) (payloadRef string, err error) + // UploadData publishes data to the Shared Storage, and returns a payload reference ID + UploadData(ctx context.Context, data io.Reader) (payloadRef string, err error) - // RetrieveData reads data back from IPFS using the payload reference format returned from PublishData - RetrieveData(ctx context.Context, payloadRef string) (data io.ReadCloser, err error) + // DownloadData reads data back from IPFS using the payload reference format returned from UploadData + DownloadData(ctx context.Context, payloadRef string) (data io.ReadCloser, err error) } type Callbacks interface { diff --git a/pkg/tokens/plugin.go b/pkg/tokens/plugin.go index 112204750f..6bd705fb0a 100644 --- a/pkg/tokens/plugin.go +++ b/pkg/tokens/plugin.go @@ -45,7 +45,7 @@ type Plugin interface { CreateTokenPool(ctx context.Context, opID *fftypes.UUID, pool *fftypes.TokenPool) (complete bool, err error) // ActivateTokenPool activates a pool in order to begin receiving events - ActivateTokenPool(ctx context.Context, opID *fftypes.UUID, pool *fftypes.TokenPool, event *fftypes.BlockchainEvent) (complete bool, err error) + ActivateTokenPool(ctx context.Context, opID *fftypes.UUID, pool *fftypes.TokenPool, blockchainInfo fftypes.JSONObject) (complete bool, err error) // MintTokens mints new tokens in a pool and adds them to the recipient's account MintTokens(ctx context.Context, opID *fftypes.UUID, poolProtocolID string, mint *fftypes.TokenTransfer) error @@ -104,9 +104,8 @@ type TokenPool struct { // ProtocolID is the ID assigned to this pool by the connector (must be unique for this connector) ProtocolID string - // TransactionID is the FireFly-assigned ID to correlate this to a transaction (optional) - // Not guaranteed to be set for pool creation events triggered outside of FireFly - TransactionID *fftypes.UUID + // TX is the FireFly-assigned information to correlate this to a transaction (optional) + TX fftypes.TransactionRef // Connector is the configured name of this connector Connector string @@ -114,6 +113,12 @@ type TokenPool struct { // Standard is the well-defined token standard that this pool conforms to (optional) Standard string + // Symbol is the short token symbol, if the connector uses one (optional) + Symbol string + + // Info is any other connector-specific info on the pool that may be worth saving (optional) + Info fftypes.JSONObject + // Event contains info on the underlying blockchain event for this pool creation Event blockchain.Event } diff --git a/test/data/erc20/ERC20WithData.json b/test/data/erc20/ERC20WithData.json new file mode 100644 index 0000000000..9e6842873d --- /dev/null +++ b/test/data/erc20/ERC20WithData.json @@ -0,0 +1,489 @@ +{ + "contracts": { + "contracts/ERC20WithData.sol:ERC20WithData": { + "abi": [ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnershipTransferred", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "approveWithData", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "burnWithData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "decimals", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "subtractedValue", + "type": "uint256" + } + ], + "name": "decreaseAllowance", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "addedValue", + "type": "uint256" + } + ], + "name": "increaseAllowance", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "mintWithData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "renounceOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "transferOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "transferWithData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } + ], + "bin": "60806040523480156200001157600080fd5b506040518060400160405280600981526020017f6532652d657263323000000000000000000000000000000000000000000000008152506040518060400160405280600481526020017f45453230000000000000000000000000000000000000000000000000000000008152506200009e62000092620000d860201b60201c565b620000e060201b60201c565b8160049080519060200190620000b6929190620001a4565b508060059080519060200190620000cf929190620001a4565b505050620002b8565b600033905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b828054620001b29062000283565b90600052602060002090601f016020900481019282620001d6576000855562000222565b82601f10620001f157805160ff191683800117855562000222565b8280016001018555821562000222579182015b828111156200022157825182559160200191906001019062000204565b5b50905062000231919062000235565b5090565b5b808211156200025057600081600090555060010162000236565b5090565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b600060028204905060018216806200029c57607f821691505b602082108103620002b257620002b162000254565b5b50919050565b6120eb80620002c86000396000f3fe608060405234801561001057600080fd5b50600436106101165760003560e01c806383e49c53116100a2578063a457c2d711610071578063a457c2d7146102e3578063a9059cbb14610313578063c0ea7a7a14610343578063dd62ed3e1461035f578063f2fde38b1461038f57610116565b806383e49c531461026f5780638da5cb5b1461028b57806395d89b41146102a957806396adfe42146102c757610116565b8063313ce567116100e9578063313ce567146101b757806339509351146101d557806370a0823114610205578063715018a6146102355780637b5eb5641461023f57610116565b806306fdde031461011b578063095ea7b31461013957806318160ddd1461016957806323b872dd14610187575b600080fd5b6101236103ab565b604051610130919061149c565b60405180910390f35b610153600480360381019061014e919061155c565b61043d565b60405161016091906115b7565b60405180910390f35b61017161045b565b60405161017e91906115e1565b60405180910390f35b6101a1600480360381019061019c91906115fc565b610465565b6040516101ae91906115b7565b60405180910390f35b6101bf61055d565b6040516101cc919061166b565b60405180910390f35b6101ef60048036038101906101ea919061155c565b610566565b6040516101fc91906115b7565b60405180910390f35b61021f600480360381019061021a9190611686565b610612565b60405161022c91906115e1565b60405180910390f35b61023d61065b565b005b61025960048036038101906102549190611718565b6106e3565b60405161026691906115b7565b60405180910390f35b61028960048036038101906102849190611718565b6106f9565b005b61029361077e565b6040516102a0919061179b565b60405180910390f35b6102b16107a7565b6040516102be919061149c565b60405180910390f35b6102e160048036038101906102dc9190611718565b610839565b005b6102fd60048036038101906102f8919061155c565b6108c5565b60405161030a91906115b7565b60405180910390f35b61032d6004803603810190610328919061155c565b6109b0565b60405161033a91906115b7565b60405180910390f35b61035d600480360381019061035891906117b6565b6109ce565b005b6103796004803603810190610374919061183e565b610a2c565b60405161038691906115e1565b60405180910390f35b6103a960048036038101906103a49190611686565b610ab3565b005b6060600480546103ba906118ad565b80601f01602080910402602001604051908101604052809291908181526020018280546103e6906118ad565b80156104335780601f1061040857610100808354040283529160200191610433565b820191906000526020600020905b81548152906001019060200180831161041657829003601f168201915b5050505050905090565b600061045161044a610baa565b8484610bb2565b6001905092915050565b6000600354905090565b6000610472848484610d7b565b6000600260008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006104bd610baa565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205490508281101561053d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161053490611950565b60405180910390fd5b61055185610549610baa565b858403610bb2565b60019150509392505050565b60006012905090565b6000610608610573610baa565b848460026000610581610baa565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008873ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610603919061199f565b610bb2565b6001905092915050565b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b610663610baa565b73ffffffffffffffffffffffffffffffffffffffff1661068161077e565b73ffffffffffffffffffffffffffffffffffffffff16146106d7576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106ce90611a41565b60405180910390fd5b6106e16000610ffd565b565b60006106ef858561043d565b9050949350505050565b610701610baa565b73ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff161461076e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161076590611ad3565b60405180910390fd5b61077884846110c1565b50505050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b6060600580546107b6906118ad565b80601f01602080910402602001604051908101604052809291908181526020018280546107e2906118ad565b801561082f5780601f106108045761010080835404028352916020019161082f565b820191906000526020600020905b81548152906001019060200180831161081257829003601f168201915b5050505050905090565b610841610baa565b73ffffffffffffffffffffffffffffffffffffffff1661085f61077e565b73ffffffffffffffffffffffffffffffffffffffff16146108b5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016108ac90611a41565b60405180910390fd5b6108bf8484611299565b50505050565b600080600260006108d4610baa565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905082811015610991576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161098890611b65565b60405180910390fd5b6109a561099c610baa565b85858403610bb2565b600191505092915050565b60006109c46109bd610baa565b8484610d7b565b6001905092915050565b6109d6610baa565b73ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff1603610a1857610a1284846109b0565b50610a25565b610a23858585610465565b505b5050505050565b6000600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905092915050565b610abb610baa565b73ffffffffffffffffffffffffffffffffffffffff16610ad961077e565b73ffffffffffffffffffffffffffffffffffffffff1614610b2f576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b2690611a41565b60405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603610b9e576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b9590611bf7565b60405180910390fd5b610ba781610ffd565b50565b600033905090565b600073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff1603610c21576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c1890611c89565b60405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1603610c90576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c8790611d1b565b60405180910390fd5b80600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92583604051610d6e91906115e1565b60405180910390a3505050565b600073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff1603610dea576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610de190611dad565b60405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1603610e59576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610e5090611e3f565b60405180910390fd5b610e648383836113f9565b6000600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905081811015610eeb576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610ee290611ed1565b60405180910390fd5b818103600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000208190555081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254610f80919061199f565b925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef84604051610fe491906115e1565b60405180910390a3610ff78484846113fe565b50505050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1603611130576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161112790611f63565b60405180910390fd5b61113c826000836113f9565b6000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050818110156111c3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016111ba90611ff5565b60405180910390fd5b818103600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550816003600082825461121b9190612015565b92505081905550600073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8460405161128091906115e1565b60405180910390a3611294836000846113fe565b505050565b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1603611308576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016112ff90612095565b60405180910390fd5b611314600083836113f9565b8060036000828254611326919061199f565b9250508190555080600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825461137c919061199f565b925050819055508173ffffffffffffffffffffffffffffffffffffffff16600073ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040516113e191906115e1565b60405180910390a36113f5600083836113fe565b5050565b505050565b505050565b600081519050919050565b600082825260208201905092915050565b60005b8381101561143d578082015181840152602081019050611422565b8381111561144c576000848401525b50505050565b6000601f19601f8301169050919050565b600061146e82611403565b611478818561140e565b935061148881856020860161141f565b61149181611452565b840191505092915050565b600060208201905081810360008301526114b68184611463565b905092915050565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006114f3826114c8565b9050919050565b611503816114e8565b811461150e57600080fd5b50565b600081359050611520816114fa565b92915050565b6000819050919050565b61153981611526565b811461154457600080fd5b50565b60008135905061155681611530565b92915050565b60008060408385031215611573576115726114be565b5b600061158185828601611511565b925050602061159285828601611547565b9150509250929050565b60008115159050919050565b6115b18161159c565b82525050565b60006020820190506115cc60008301846115a8565b92915050565b6115db81611526565b82525050565b60006020820190506115f660008301846115d2565b92915050565b600080600060608486031215611615576116146114be565b5b600061162386828701611511565b935050602061163486828701611511565b925050604061164586828701611547565b9150509250925092565b600060ff82169050919050565b6116658161164f565b82525050565b6000602082019050611680600083018461165c565b92915050565b60006020828403121561169c5761169b6114be565b5b60006116aa84828501611511565b91505092915050565b600080fd5b600080fd5b600080fd5b60008083601f8401126116d8576116d76116b3565b5b8235905067ffffffffffffffff8111156116f5576116f46116b8565b5b602083019150836001820283011115611711576117106116bd565b5b9250929050565b60008060008060608587031215611732576117316114be565b5b600061174087828801611511565b945050602061175187828801611547565b935050604085013567ffffffffffffffff811115611772576117716114c3565b5b61177e878288016116c2565b925092505092959194509250565b611795816114e8565b82525050565b60006020820190506117b0600083018461178c565b92915050565b6000806000806000608086880312156117d2576117d16114be565b5b60006117e088828901611511565b95505060206117f188828901611511565b945050604061180288828901611547565b935050606086013567ffffffffffffffff811115611823576118226114c3565b5b61182f888289016116c2565b92509250509295509295909350565b60008060408385031215611855576118546114be565b5b600061186385828601611511565b925050602061187485828601611511565b9150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b600060028204905060018216806118c557607f821691505b6020821081036118d8576118d761187e565b5b50919050565b7f45524332303a207472616e7366657220616d6f756e742065786365656473206160008201527f6c6c6f77616e6365000000000000000000000000000000000000000000000000602082015250565b600061193a60288361140e565b9150611945826118de565b604082019050919050565b600060208201905081810360008301526119698161192d565b9050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60006119aa82611526565b91506119b583611526565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156119ea576119e9611970565b5b828201905092915050565b7f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572600082015250565b6000611a2b60208361140e565b9150611a36826119f5565b602082019050919050565b60006020820190508181036000830152611a5a81611a1e565b9050919050565b7f455243323057697468446174613a2063616c6c6572206973206e6f74206f776e60008201527f6572000000000000000000000000000000000000000000000000000000000000602082015250565b6000611abd60228361140e565b9150611ac882611a61565b604082019050919050565b60006020820190508181036000830152611aec81611ab0565b9050919050565b7f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f7760008201527f207a65726f000000000000000000000000000000000000000000000000000000602082015250565b6000611b4f60258361140e565b9150611b5a82611af3565b604082019050919050565b60006020820190508181036000830152611b7e81611b42565b9050919050565b7f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160008201527f6464726573730000000000000000000000000000000000000000000000000000602082015250565b6000611be160268361140e565b9150611bec82611b85565b604082019050919050565b60006020820190508181036000830152611c1081611bd4565b9050919050565b7f45524332303a20617070726f76652066726f6d20746865207a65726f2061646460008201527f7265737300000000000000000000000000000000000000000000000000000000602082015250565b6000611c7360248361140e565b9150611c7e82611c17565b604082019050919050565b60006020820190508181036000830152611ca281611c66565b9050919050565b7f45524332303a20617070726f766520746f20746865207a65726f20616464726560008201527f7373000000000000000000000000000000000000000000000000000000000000602082015250565b6000611d0560228361140e565b9150611d1082611ca9565b604082019050919050565b60006020820190508181036000830152611d3481611cf8565b9050919050565b7f45524332303a207472616e736665722066726f6d20746865207a65726f20616460008201527f6472657373000000000000000000000000000000000000000000000000000000602082015250565b6000611d9760258361140e565b9150611da282611d3b565b604082019050919050565b60006020820190508181036000830152611dc681611d8a565b9050919050565b7f45524332303a207472616e7366657220746f20746865207a65726f206164647260008201527f6573730000000000000000000000000000000000000000000000000000000000602082015250565b6000611e2960238361140e565b9150611e3482611dcd565b604082019050919050565b60006020820190508181036000830152611e5881611e1c565b9050919050565b7f45524332303a207472616e7366657220616d6f756e742065786365656473206260008201527f616c616e63650000000000000000000000000000000000000000000000000000602082015250565b6000611ebb60268361140e565b9150611ec682611e5f565b604082019050919050565b60006020820190508181036000830152611eea81611eae565b9050919050565b7f45524332303a206275726e2066726f6d20746865207a65726f2061646472657360008201527f7300000000000000000000000000000000000000000000000000000000000000602082015250565b6000611f4d60218361140e565b9150611f5882611ef1565b604082019050919050565b60006020820190508181036000830152611f7c81611f40565b9050919050565b7f45524332303a206275726e20616d6f756e7420657863656564732062616c616e60008201527f6365000000000000000000000000000000000000000000000000000000000000602082015250565b6000611fdf60228361140e565b9150611fea82611f83565b604082019050919050565b6000602082019050818103600083015261200e81611fd2565b9050919050565b600061202082611526565b915061202b83611526565b92508282101561203e5761203d611970565b5b828203905092915050565b7f45524332303a206d696e7420746f20746865207a65726f206164647265737300600082015250565b600061207f601f8361140e565b915061208a82612049565b602082019050919050565b600060208201905081810360008301526120ae81612072565b905091905056fea2646970667358221220aae08d74316893d58e8a79f83b11a7962df6b7e5d2ba6e8e6859b0e12c83b92f64736f6c634300080d0033", + "devdoc": { + "details": "Mintable+burnable form of ERC20 with data event support.", + "kind": "dev", + "methods": { + "allowance(address,address)": { + "details": "See {IERC20-allowance}." + }, + "approve(address,uint256)": { + "details": "See {IERC20-approve}. Requirements: - `spender` cannot be the zero address." + }, + "balanceOf(address)": { + "details": "See {IERC20-balanceOf}." + }, + "decimals()": { + "details": "Returns the number of decimals used to get its user representation. For example, if `decimals` equals `2`, a balance of `505` tokens should be displayed to a user as `5.05` (`505 / 10 ** 2`). Tokens usually opt for a value of 18, imitating the relationship between Ether and Wei. This is the value {ERC20} uses, unless this function is overridden; NOTE: This information is only used for _display_ purposes: it in no way affects any of the arithmetic of the contract, including {IERC20-balanceOf} and {IERC20-transfer}." + }, + "decreaseAllowance(address,uint256)": { + "details": "Atomically decreases the allowance granted to `spender` by the caller. This is an alternative to {approve} that can be used as a mitigation for problems described in {IERC20-approve}. Emits an {Approval} event indicating the updated allowance. Requirements: - `spender` cannot be the zero address. - `spender` must have allowance for the caller of at least `subtractedValue`." + }, + "increaseAllowance(address,uint256)": { + "details": "Atomically increases the allowance granted to `spender` by the caller. This is an alternative to {approve} that can be used as a mitigation for problems described in {IERC20-approve}. Emits an {Approval} event indicating the updated allowance. Requirements: - `spender` cannot be the zero address." + }, + "name()": { + "details": "Returns the name of the token." + }, + "owner()": { + "details": "Returns the address of the current owner." + }, + "renounceOwnership()": { + "details": "Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner." + }, + "symbol()": { + "details": "Returns the symbol of the token, usually a shorter version of the name." + }, + "totalSupply()": { + "details": "See {IERC20-totalSupply}." + }, + "transfer(address,uint256)": { + "details": "See {IERC20-transfer}. Requirements: - `recipient` cannot be the zero address. - the caller must have a balance of at least `amount`." + }, + "transferFrom(address,address,uint256)": { + "details": "See {IERC20-transferFrom}. Emits an {Approval} event indicating the updated allowance. This is not required by the EIP. See the note at the beginning of {ERC20}. Requirements: - `sender` and `recipient` cannot be the zero address. - `sender` must have a balance of at least `amount`. - the caller must have allowance for ``sender``'s tokens of at least `amount`." + }, + "transferOwnership(address)": { + "details": "Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner." + } + }, + "version": 1 + } + } + }, + "version": "0.8.13+commit.abaa5c0e.Darwin.appleclang" +} \ No newline at end of file diff --git a/test/data/erc721/ERC721WithData.json b/test/data/erc721/ERC721WithData.json new file mode 100644 index 0000000000..db954e86b3 --- /dev/null +++ b/test/data/erc721/ERC721WithData.json @@ -0,0 +1,572 @@ +{ + "contracts": { + "contracts/ERC721WithData.sol:ERC721WithData": { + "abi": [ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "approved", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "operator", + "type": "address" + }, + { + "indexed": false, + "internalType": "bool", + "name": "approved", + "type": "bool" + } + ], + "name": "ApprovalForAll", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnershipTransferred", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "approveWithData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "burnWithData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "getApproved", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "operator", + "type": "address" + } + ], + "name": "isApprovedForAll", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "name": "mintWithData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "ownerOf", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "renounceOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "safeTransferFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "_data", + "type": "bytes" + } + ], + "name": "safeTransferFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "operator", + "type": "address" + }, + { + "internalType": "bool", + "name": "approved", + "type": "bool" + } + ], + "name": "setApprovalForAll", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "operator", + "type": "address" + }, + { + "internalType": "bool", + "name": "approved", + "type": "bool" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "setApprovalForAllWithData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes4", + "name": "interfaceId", + "type": "bytes4" + } + ], + "name": "supportsInterface", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "tokenURI", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "transferOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "name": "transferWithData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } + ], + "bin": "60806040523480156200001157600080fd5b506040518060400160405280600a81526020017f6532652d657263373231000000000000000000000000000000000000000000008152506040518060400160405280600581526020017f45453732310000000000000000000000000000000000000000000000000000008152506200009e62000092620000d860201b60201c565b620000e060201b60201c565b8160019080519060200190620000b6929190620001a4565b508060029080519060200190620000cf929190620001a4565b505050620002b8565b600033905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b828054620001b29062000283565b90600052602060002090601f016020900481019282620001d6576000855562000222565b82601f10620001f157805160ff191683800117855562000222565b8280016001018555821562000222579182015b828111156200022157825182559160200191906001019062000204565b5b50905062000231919062000235565b5090565b5b808211156200025057600081600090555060010162000236565b5090565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b600060028204905060018216806200029c57607f821691505b602082108103620002b257620002b162000254565b5b50919050565b6130bd80620002c86000396000f3fe608060405234801561001057600080fd5b50600436106101375760003560e01c80637b5eb564116100b8578063a22cb4651161007c578063a22cb46514610324578063b88d4fde14610340578063c0ea7a7a1461035c578063c87b56dd14610378578063e985e9c5146103a8578063f2fde38b146103d857610137565b80637b5eb5641461029457806383e49c53146102b05780638da5cb5b146102cc57806395d89b41146102ea57806396adfe421461030857610137565b806323b872dd116100ff57806323b872dd146101f257806342842e0e1461020e5780636352211e1461022a57806370a082311461025a578063715018a61461028a57610137565b806301ffc9a71461013c57806306fdde031461016c578063081812fc1461018a578063095ea7b3146101ba5780631cbf311d146101d6575b600080fd5b61015660048036038101906101519190611ce2565b6103f4565b6040516101639190611d2a565b60405180910390f35b6101746104d6565b6040516101819190611dde565b60405180910390f35b6101a4600480360381019061019f9190611e36565b610568565b6040516101b19190611ea4565b60405180910390f35b6101d460048036038101906101cf9190611eeb565b6105ed565b005b6101f060048036038101906101eb9190611fbc565b610704565b005b61020c60048036038101906102079190612030565b610714565b005b61022860048036038101906102239190612030565b610774565b005b610244600480360381019061023f9190611e36565b610794565b6040516102519190611ea4565b60405180910390f35b610274600480360381019061026f9190612083565b610845565b60405161028191906120bf565b60405180910390f35b6102926108fc565b005b6102ae60048036038101906102a991906120da565b610984565b005b6102ca60048036038101906102c591906120da565b610994565b005b6102d4610a18565b6040516102e19190611ea4565b60405180910390f35b6102f2610a41565b6040516102ff9190611dde565b60405180910390f35b610322600480360381019061031d91906120da565b610ad3565b005b61033e6004803603810190610339919061214e565b610ba4565b005b61035a600480360381019061035591906122be565b610bba565b005b61037660048036038101906103719190612341565b610c1c565b005b610392600480360381019061038d9190611e36565b610c73565b60405161039f9190611dde565b60405180910390f35b6103c260048036038101906103bd91906123c9565b610d1a565b6040516103cf9190611d2a565b60405180910390f35b6103f260048036038101906103ed9190612083565b610dae565b005b60007f80ac58cd000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614806104bf57507f5b5e139f000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916145b806104cf57506104ce82610ea5565b5b9050919050565b6060600180546104e590612438565b80601f016020809104026020016040519081016040528092919081815260200182805461051190612438565b801561055e5780601f106105335761010080835404028352916020019161055e565b820191906000526020600020905b81548152906001019060200180831161054157829003601f168201915b5050505050905090565b600061057382610f0f565b6105b2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016105a9906124db565b60405180910390fd5b6005600083815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050919050565b60006105f882610794565b90508073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff1603610668576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161065f9061256d565b60405180910390fd5b8073ffffffffffffffffffffffffffffffffffffffff16610687610f7b565b73ffffffffffffffffffffffffffffffffffffffff1614806106b657506106b5816106b0610f7b565b610d1a565b5b6106f5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106ec906125ff565b60405180910390fd5b6106ff8383610f83565b505050565b61070e8484610ba4565b50505050565b61072561071f610f7b565b8261103c565b610764576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161075b90612691565b60405180910390fd5b61076f83838361111a565b505050565b61078f83838360405180602001604052806000815250610bba565b505050565b6000806003600084815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff160361083c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161083390612723565b60405180910390fd5b80915050919050565b60008073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16036108b5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016108ac906127b5565b60405180910390fd5b600460008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b610904610f7b565b73ffffffffffffffffffffffffffffffffffffffff16610922610a18565b73ffffffffffffffffffffffffffffffffffffffff1614610978576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161096f90612821565b60405180910390fd5b6109826000611375565b565b61098e84846105ed565b50505050565b61099c610f7b565b73ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff1614610a09576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610a00906128b3565b60405180910390fd5b610a1283611439565b50505050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b606060028054610a5090612438565b80601f0160208091040260200160405190810160405280929190818152602001828054610a7c90612438565b8015610ac95780601f10610a9e57610100808354040283529160200191610ac9565b820191906000526020600020905b815481529060010190602001808311610aac57829003601f168201915b5050505050905090565b610adb610f7b565b73ffffffffffffffffffffffffffffffffffffffff16610af9610a18565b73ffffffffffffffffffffffffffffffffffffffff1614610b4f576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b4690612821565b60405180910390fd5b610b9e848484848080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505061154a565b50505050565b610bb6610baf610f7b565b83836115a5565b5050565b610bcb610bc5610f7b565b8361103c565b610c0a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c0190612691565b60405180910390fd5b610c1684848484611711565b50505050565b610c6c85858585858080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050610bba565b5050505050565b6060610c7e82610f0f565b610cbd576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610cb490612945565b60405180910390fd5b6000610cc761176d565b90506000815111610ce75760405180602001604052806000815250610d12565b80610cf1846117aa565b604051602001610d029291906129a1565b6040516020818303038152906040525b915050919050565b6000600660008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff16905092915050565b610db6610f7b565b73ffffffffffffffffffffffffffffffffffffffff16610dd4610a18565b73ffffffffffffffffffffffffffffffffffffffff1614610e2a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610e2190612821565b60405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603610e99576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610e9090612a37565b60405180910390fd5b610ea281611375565b50565b60007f01ffc9a7000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916149050919050565b60008073ffffffffffffffffffffffffffffffffffffffff166003600084815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614159050919050565b600033905090565b816005600083815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550808273ffffffffffffffffffffffffffffffffffffffff16610ff683610794565b73ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92560405160405180910390a45050565b600061104782610f0f565b611086576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161107d90612ac9565b60405180910390fd5b600061109183610794565b90508073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16148061110057508373ffffffffffffffffffffffffffffffffffffffff166110e884610568565b73ffffffffffffffffffffffffffffffffffffffff16145b8061111157506111108185610d1a565b5b91505092915050565b8273ffffffffffffffffffffffffffffffffffffffff1661113a82610794565b73ffffffffffffffffffffffffffffffffffffffff1614611190576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161118790612b5b565b60405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16036111ff576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016111f690612bed565b60405180910390fd5b61120a83838361190a565b611215600082610f83565b6001600460008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282546112659190612c3c565b925050819055506001600460008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282546112bc9190612c70565b92505081905550816003600083815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550808273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef60405160405180910390a4505050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600061144482610794565b90506114528160008461190a565b61145d600083610f83565b6001600460008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282546114ad9190612c3c565b925050819055506003600083815260200190815260200160002060006101000a81549073ffffffffffffffffffffffffffffffffffffffff021916905581600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef60405160405180910390a45050565b611554838361190f565b6115616000848484611adc565b6115a0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161159790612d38565b60405180910390fd5b505050565b8173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff1603611613576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161160a90612da4565b60405180910390fd5b80600660008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff0219169083151502179055508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167f17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31836040516117049190611d2a565b60405180910390a3505050565b61171c84848461111a565b61172884848484611adc565b611767576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161175e90612d38565b60405180910390fd5b50505050565b60606040518060400160405280601081526020017f66697265666c793a2f2f746f6b656e2f00000000000000000000000000000000815250905090565b6060600082036117f1576040518060400160405280600181526020017f30000000000000000000000000000000000000000000000000000000000000008152509050611905565b600082905060005b6000821461182357808061180c90612dc4565b915050600a8261181c9190612e3b565b91506117f9565b60008167ffffffffffffffff81111561183f5761183e612193565b5b6040519080825280601f01601f1916602001820160405280156118715781602001600182028036833780820191505090505b5090505b600085146118fe5760018261188a9190612c3c565b9150600a856118999190612e6c565b60306118a59190612c70565b60f81b8183815181106118bb576118ba612e9d565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a905350600a856118f79190612e3b565b9450611875565b8093505050505b919050565b505050565b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff160361197e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161197590612f18565b60405180910390fd5b61198781610f0f565b156119c7576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016119be90612f84565b60405180910390fd5b6119d36000838361190a565b6001600460008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254611a239190612c70565b92505081905550816003600083815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550808273ffffffffffffffffffffffffffffffffffffffff16600073ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef60405160405180910390a45050565b6000611afd8473ffffffffffffffffffffffffffffffffffffffff16611c63565b15611c56578373ffffffffffffffffffffffffffffffffffffffff1663150b7a02611b26610f7b565b8786866040518563ffffffff1660e01b8152600401611b489493929190612ff9565b6020604051808303816000875af1925050508015611b8457506040513d601f19601f82011682018060405250810190611b81919061305a565b60015b611c06573d8060008114611bb4576040519150601f19603f3d011682016040523d82523d6000602084013e611bb9565b606091505b506000815103611bfe576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401611bf590612d38565b60405180910390fd5b805181602001fd5b63150b7a0260e01b7bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916817bffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614915050611c5b565b600190505b949350505050565b600080823b905060008111915050919050565b6000604051905090565b600080fd5b600080fd5b60007fffffffff0000000000000000000000000000000000000000000000000000000082169050919050565b611cbf81611c8a565b8114611cca57600080fd5b50565b600081359050611cdc81611cb6565b92915050565b600060208284031215611cf857611cf7611c80565b5b6000611d0684828501611ccd565b91505092915050565b60008115159050919050565b611d2481611d0f565b82525050565b6000602082019050611d3f6000830184611d1b565b92915050565b600081519050919050565b600082825260208201905092915050565b60005b83811015611d7f578082015181840152602081019050611d64565b83811115611d8e576000848401525b50505050565b6000601f19601f8301169050919050565b6000611db082611d45565b611dba8185611d50565b9350611dca818560208601611d61565b611dd381611d94565b840191505092915050565b60006020820190508181036000830152611df88184611da5565b905092915050565b6000819050919050565b611e1381611e00565b8114611e1e57600080fd5b50565b600081359050611e3081611e0a565b92915050565b600060208284031215611e4c57611e4b611c80565b5b6000611e5a84828501611e21565b91505092915050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000611e8e82611e63565b9050919050565b611e9e81611e83565b82525050565b6000602082019050611eb96000830184611e95565b92915050565b611ec881611e83565b8114611ed357600080fd5b50565b600081359050611ee581611ebf565b92915050565b60008060408385031215611f0257611f01611c80565b5b6000611f1085828601611ed6565b9250506020611f2185828601611e21565b9150509250929050565b611f3481611d0f565b8114611f3f57600080fd5b50565b600081359050611f5181611f2b565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f840112611f7c57611f7b611f57565b5b8235905067ffffffffffffffff811115611f9957611f98611f5c565b5b602083019150836001820283011115611fb557611fb4611f61565b5b9250929050565b60008060008060608587031215611fd657611fd5611c80565b5b6000611fe487828801611ed6565b9450506020611ff587828801611f42565b935050604085013567ffffffffffffffff81111561201657612015611c85565b5b61202287828801611f66565b925092505092959194509250565b60008060006060848603121561204957612048611c80565b5b600061205786828701611ed6565b935050602061206886828701611ed6565b925050604061207986828701611e21565b9150509250925092565b60006020828403121561209957612098611c80565b5b60006120a784828501611ed6565b91505092915050565b6120b981611e00565b82525050565b60006020820190506120d460008301846120b0565b92915050565b600080600080606085870312156120f4576120f3611c80565b5b600061210287828801611ed6565b945050602061211387828801611e21565b935050604085013567ffffffffffffffff81111561213457612133611c85565b5b61214087828801611f66565b925092505092959194509250565b6000806040838503121561216557612164611c80565b5b600061217385828601611ed6565b925050602061218485828601611f42565b9150509250929050565b600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6121cb82611d94565b810181811067ffffffffffffffff821117156121ea576121e9612193565b5b80604052505050565b60006121fd611c76565b905061220982826121c2565b919050565b600067ffffffffffffffff82111561222957612228612193565b5b61223282611d94565b9050602081019050919050565b82818337600083830152505050565b600061226161225c8461220e565b6121f3565b90508281526020810184848401111561227d5761227c61218e565b5b61228884828561223f565b509392505050565b600082601f8301126122a5576122a4611f57565b5b81356122b584826020860161224e565b91505092915050565b600080600080608085870312156122d8576122d7611c80565b5b60006122e687828801611ed6565b94505060206122f787828801611ed6565b935050604061230887828801611e21565b925050606085013567ffffffffffffffff81111561232957612328611c85565b5b61233587828801612290565b91505092959194509250565b60008060008060006080868803121561235d5761235c611c80565b5b600061236b88828901611ed6565b955050602061237c88828901611ed6565b945050604061238d88828901611e21565b935050606086013567ffffffffffffffff8111156123ae576123ad611c85565b5b6123ba88828901611f66565b92509250509295509295909350565b600080604083850312156123e0576123df611c80565b5b60006123ee85828601611ed6565b92505060206123ff85828601611ed6565b9150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b6000600282049050600182168061245057607f821691505b60208210810361246357612462612409565b5b50919050565b7f4552433732313a20617070726f76656420717565727920666f72206e6f6e657860008201527f697374656e7420746f6b656e0000000000000000000000000000000000000000602082015250565b60006124c5602c83611d50565b91506124d082612469565b604082019050919050565b600060208201905081810360008301526124f4816124b8565b9050919050565b7f4552433732313a20617070726f76616c20746f2063757272656e74206f776e6560008201527f7200000000000000000000000000000000000000000000000000000000000000602082015250565b6000612557602183611d50565b9150612562826124fb565b604082019050919050565b600060208201905081810360008301526125868161254a565b9050919050565b7f4552433732313a20617070726f76652063616c6c6572206973206e6f74206f7760008201527f6e6572206e6f7220617070726f76656420666f7220616c6c0000000000000000602082015250565b60006125e9603883611d50565b91506125f48261258d565b604082019050919050565b60006020820190508181036000830152612618816125dc565b9050919050565b7f4552433732313a207472616e736665722063616c6c6572206973206e6f74206f60008201527f776e6572206e6f7220617070726f766564000000000000000000000000000000602082015250565b600061267b603183611d50565b91506126868261261f565b604082019050919050565b600060208201905081810360008301526126aa8161266e565b9050919050565b7f4552433732313a206f776e657220717565727920666f72206e6f6e657869737460008201527f656e7420746f6b656e0000000000000000000000000000000000000000000000602082015250565b600061270d602983611d50565b9150612718826126b1565b604082019050919050565b6000602082019050818103600083015261273c81612700565b9050919050565b7f4552433732313a2062616c616e636520717565727920666f7220746865207a6560008201527f726f206164647265737300000000000000000000000000000000000000000000602082015250565b600061279f602a83611d50565b91506127aa82612743565b604082019050919050565b600060208201905081810360008301526127ce81612792565b9050919050565b7f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572600082015250565b600061280b602083611d50565b9150612816826127d5565b602082019050919050565b6000602082019050818103600083015261283a816127fe565b9050919050565b7f45524337323157697468446174613a2063616c6c6572206973206e6f74206f7760008201527f6e65720000000000000000000000000000000000000000000000000000000000602082015250565b600061289d602383611d50565b91506128a882612841565b604082019050919050565b600060208201905081810360008301526128cc81612890565b9050919050565b7f4552433732314d657461646174613a2055524920717565727920666f72206e6f60008201527f6e6578697374656e7420746f6b656e0000000000000000000000000000000000602082015250565b600061292f602f83611d50565b915061293a826128d3565b604082019050919050565b6000602082019050818103600083015261295e81612922565b9050919050565b600081905092915050565b600061297b82611d45565b6129858185612965565b9350612995818560208601611d61565b80840191505092915050565b60006129ad8285612970565b91506129b98284612970565b91508190509392505050565b7f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160008201527f6464726573730000000000000000000000000000000000000000000000000000602082015250565b6000612a21602683611d50565b9150612a2c826129c5565b604082019050919050565b60006020820190508181036000830152612a5081612a14565b9050919050565b7f4552433732313a206f70657261746f7220717565727920666f72206e6f6e657860008201527f697374656e7420746f6b656e0000000000000000000000000000000000000000602082015250565b6000612ab3602c83611d50565b9150612abe82612a57565b604082019050919050565b60006020820190508181036000830152612ae281612aa6565b9050919050565b7f4552433732313a207472616e73666572206f6620746f6b656e2074686174206960008201527f73206e6f74206f776e0000000000000000000000000000000000000000000000602082015250565b6000612b45602983611d50565b9150612b5082612ae9565b604082019050919050565b60006020820190508181036000830152612b7481612b38565b9050919050565b7f4552433732313a207472616e7366657220746f20746865207a65726f2061646460008201527f7265737300000000000000000000000000000000000000000000000000000000602082015250565b6000612bd7602483611d50565b9150612be282612b7b565b604082019050919050565b60006020820190508181036000830152612c0681612bca565b9050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000612c4782611e00565b9150612c5283611e00565b925082821015612c6557612c64612c0d565b5b828203905092915050565b6000612c7b82611e00565b9150612c8683611e00565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff03821115612cbb57612cba612c0d565b5b828201905092915050565b7f4552433732313a207472616e7366657220746f206e6f6e20455243373231526560008201527f63656976657220696d706c656d656e7465720000000000000000000000000000602082015250565b6000612d22603283611d50565b9150612d2d82612cc6565b604082019050919050565b60006020820190508181036000830152612d5181612d15565b9050919050565b7f4552433732313a20617070726f766520746f2063616c6c657200000000000000600082015250565b6000612d8e601983611d50565b9150612d9982612d58565b602082019050919050565b60006020820190508181036000830152612dbd81612d81565b9050919050565b6000612dcf82611e00565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203612e0157612e00612c0d565b5b600182019050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b6000612e4682611e00565b9150612e5183611e00565b925082612e6157612e60612e0c565b5b828204905092915050565b6000612e7782611e00565b9150612e8283611e00565b925082612e9257612e91612e0c565b5b828206905092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4552433732313a206d696e7420746f20746865207a65726f2061646472657373600082015250565b6000612f02602083611d50565b9150612f0d82612ecc565b602082019050919050565b60006020820190508181036000830152612f3181612ef5565b9050919050565b7f4552433732313a20746f6b656e20616c7265616479206d696e74656400000000600082015250565b6000612f6e601c83611d50565b9150612f7982612f38565b602082019050919050565b60006020820190508181036000830152612f9d81612f61565b9050919050565b600081519050919050565b600082825260208201905092915050565b6000612fcb82612fa4565b612fd58185612faf565b9350612fe5818560208601611d61565b612fee81611d94565b840191505092915050565b600060808201905061300e6000830187611e95565b61301b6020830186611e95565b61302860408301856120b0565b818103606083015261303a8184612fc0565b905095945050505050565b60008151905061305481611cb6565b92915050565b6000602082840312156130705761306f611c80565b5b600061307e84828501613045565b9150509291505056fea2646970667358221220d74d462d2b1ae886a3506b37c071c0c0b9289f82e1c9e7bbb77dc38a59ab6b6264736f6c634300080d0033", + "devdoc": { + "details": "Mintable+burnable form of ERC721 with data event support.", + "kind": "dev", + "methods": { + "approve(address,uint256)": { + "details": "See {IERC721-approve}." + }, + "balanceOf(address)": { + "details": "See {IERC721-balanceOf}." + }, + "getApproved(uint256)": { + "details": "See {IERC721-getApproved}." + }, + "isApprovedForAll(address,address)": { + "details": "See {IERC721-isApprovedForAll}." + }, + "name()": { + "details": "See {IERC721Metadata-name}." + }, + "owner()": { + "details": "Returns the address of the current owner." + }, + "ownerOf(uint256)": { + "details": "See {IERC721-ownerOf}." + }, + "renounceOwnership()": { + "details": "Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner." + }, + "safeTransferFrom(address,address,uint256)": { + "details": "See {IERC721-safeTransferFrom}." + }, + "safeTransferFrom(address,address,uint256,bytes)": { + "details": "See {IERC721-safeTransferFrom}." + }, + "setApprovalForAll(address,bool)": { + "details": "See {IERC721-setApprovalForAll}." + }, + "supportsInterface(bytes4)": { + "details": "See {IERC165-supportsInterface}." + }, + "symbol()": { + "details": "See {IERC721Metadata-symbol}." + }, + "tokenURI(uint256)": { + "details": "See {IERC721Metadata-tokenURI}." + }, + "transferFrom(address,address,uint256)": { + "details": "See {IERC721-transferFrom}." + }, + "transferOwnership(address)": { + "details": "Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner." + } + }, + "version": 1 + } + } + }, + "version": "0.8.13+commit.abaa5c0e.Darwin.appleclang" +} \ No newline at end of file diff --git a/test/data/simplestorage/simple_storage.json b/test/data/simplestorage/simple_storage.json new file mode 100644 index 0000000000..57ce28dde5 --- /dev/null +++ b/test/data/simplestorage/simple_storage.json @@ -0,0 +1,55 @@ +{ + "contracts": { + "simple_storage.sol:SimpleStorage": { + "abi": [ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Changed", + "type": "event" + }, + { + "inputs": [], + "name": "get", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "newValue", + "type": "uint256" + } + ], + "name": "set", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } + ], + "bin": "608060405234801561001057600080fd5b5061019e806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806360fe47b11461003b5780636d4ce63c14610057575b600080fd5b61005560048036038101906100509190610111565b610075565b005b61005f6100cd565b60405161006c919061014d565b60405180910390f35b806000819055503373ffffffffffffffffffffffffffffffffffffffff167fb52dda022b6c1a1f40905a85f257f689aa5d69d850e49cf939d688fbe5af5946826040516100c2919061014d565b60405180910390a250565b60008054905090565b600080fd5b6000819050919050565b6100ee816100db565b81146100f957600080fd5b50565b60008135905061010b816100e5565b92915050565b600060208284031215610127576101266100d6565b5b6000610135848285016100fc565b91505092915050565b610147816100db565b82525050565b6000602082019050610162600083018461013e565b9291505056fea2646970667358221220e6cbd7725b98b234d07bc1823b60ac065b567c6645d15c8f8f6986e5fa5317c664736f6c634300080b0033" + } + }, + "version": "0.8.11+commit.d7f03943.Darwin.appleclang" +} diff --git a/test/data/simplestorage/simplestorage.abi.json b/test/data/simplestorage/simplestorage.abi.json deleted file mode 100644 index 3e031b88c9..0000000000 --- a/test/data/simplestorage/simplestorage.abi.json +++ /dev/null @@ -1,47 +0,0 @@ -[ - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "_from", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "_value", - "type": "uint256" - } - ], - "name": "Changed", - "type": "event" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "newValue", - "type": "uint256" - } - ], - "name": "set", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "get", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - } -] \ No newline at end of file diff --git a/test/data/simplestorage/simplestorage.bin b/test/data/simplestorage/simplestorage.bin deleted file mode 100644 index 76a4073c28f4c34ecb4f2496b56f014927d1b11c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 446 zcmYdjNN@-;X%J0h5C~6TX!si)kjOZ%A(25Lfx$9?;q$))4EY2WGl4_~GoWhIgaNMLB35E@`y{2vPttKX`7i%C02O5R~YRO_ek zZ=I`RGj9Yune)@~TF39F>m%Koz@{ETHFZ${&;o{rkO=`3z>L2@UgHFyvw-4>4DT8f z8E%I~Hv*mi6B-a8RgI<-fC0h{lzbW%J!xV9*o*{)rWR%)(L_e|@I*$nM22f&(Lkk% zjHWG3t&E8bf5M`H3L%OU8QmcUHwD4SaY)NFagBts9QA%b(T5*Juki!0X u+ofp&#jgK diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index c388b05077..8425c73c26 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -83,15 +83,23 @@ func pollForUp(t *testing.T, client *resty.Client) { assert.Equal(t, 200, resp.StatusCode()) } -func validateReceivedMessages(ts *testState, client *resty.Client, topic string, msgType fftypes.MessageType, txtype fftypes.TransactionType, count int) (data []*fftypes.Data) { +func validateReceivedMessages(ts *testState, client *resty.Client, topic string, msgType fftypes.MessageType, txtype fftypes.TransactionType, count int) (data fftypes.DataArray) { var group *fftypes.Bytes32 - messages := GetMessages(ts.t, client, ts.startTime, msgType, topic, 200) - for i, message := range messages { - ts.t.Logf("Message %d: %+v", i, *message) - if group != nil { - assert.Equal(ts.t, group.String(), message.Header.Group.String(), "All messages must be same group") + var messages []*fftypes.Message + events := GetMessageEvents(ts.t, client, ts.startTime, topic, 200) + for i, event := range events { + if event.Message != nil { + message := event.Message + ts.t.Logf("Message %d: %+v", i, *message) + if message.Header.Type != msgType { + continue + } + if group != nil { + assert.Equal(ts.t, group.String(), message.Header.Group.String(), "All messages must be same group") + } + group = message.Header.Group + messages = append(messages, message) } - group = message.Header.Group } assert.Equal(ts.t, count, len(messages)) @@ -137,7 +145,6 @@ func validateReceivedMessages(ts *testState, client *resty.Client, topic string, func validateAccountBalances(t *testing.T, client *resty.Client, poolID *fftypes.UUID, tokenIndex string, balances map[string]int64) { for key, balance := range balances { account := GetTokenBalance(t, client, poolID, tokenIndex, key) - assert.Equal(t, "erc1155", account.Connector) assert.Equal(t, balance, account.Balance.Int().Int64()) } } @@ -336,10 +343,12 @@ func wsReader(conn *websocket.Conn, dbChanges bool) (chan *fftypes.EventDelivery func waitForEvent(t *testing.T, c chan *fftypes.EventDelivery, eventType fftypes.EventType, ref *fftypes.UUID) { for { - eventDelivery := <-c - if eventDelivery.Type == eventType && (ref == nil || *ref == *eventDelivery.Reference) { + ed := <-c + if ed.Type == eventType && (ref == nil || *ref == *ed.Reference) { + t.Logf("Detected '%s' event for ref '%s'", ed.Type, ed.Reference) return } + t.Logf("Ignored event '%s'", ed.ID) } } diff --git a/test/e2e/ethereum_contract_test.go b/test/e2e/ethereum_contract_test.go index b98aa19924..d7cdaec6fd 100644 --- a/test/e2e/ethereum_contract_test.go +++ b/test/e2e/ethereum_contract_test.go @@ -17,10 +17,9 @@ package e2e import ( - "encoding/hex" "encoding/json" "fmt" - "io/ioutil" + "os" "testing" "time" @@ -108,40 +107,6 @@ func simpleStorageFFIGet() *fftypes.FFIMethod { } } -func loadSimpleStorageABI(t *testing.T) map[string]string { - abi, err := ioutil.ReadFile("../data/simplestorage/simplestorage.abi.json") - require.NoError(t, err) - bytecode, err := ioutil.ReadFile("../data/simplestorage/simplestorage.bin") - require.NoError(t, err) - return map[string]string{ - "abi": string(abi), - "bytecode": "0x" + hex.EncodeToString(bytecode), - } -} - -func uploadABI(t *testing.T, client *resty.Client, abi map[string]string) (result uploadABIResult) { - path := "/abis" - resp, err := client.R(). - SetMultipartFormData(abi). - SetResult(&result). - Post(path) - require.NoError(t, err) - require.Equal(t, 200, resp.StatusCode(), "POST %s [%d]: %s", path, resp.StatusCode(), resp.String()) - return result -} - -func deployABI(t *testing.T, client *resty.Client, identity, abiID string) (result deployABIResult) { - path := "/abis/" + abiID - resp, err := client.R(). - SetHeader("x-firefly-from", identity). - SetHeader("x-firefly-sync", "true"). - SetResult(&result). - Post(path) - require.NoError(t, err) - require.Equal(t, 200, resp.StatusCode(), "POST %s [%d]: %s", path, resp.StatusCode(), resp.String()) - return result -} - func invokeEthContract(t *testing.T, client *resty.Client, identity, contractAddress, method string, body interface{}) { path := "/contracts/" + contractAddress + "/" + method resp, err := client.R(). @@ -165,18 +130,13 @@ type EthereumContractTestSuite struct { func (suite *EthereumContractTestSuite) SetupSuite() { suite.testState = beforeE2ETest(suite.T()) stack := readStackFile(suite.T()) - - abi := loadSimpleStorageABI(suite.T()) - suite.ethClient = NewResty(suite.T()) suite.ethClient.SetBaseURL(fmt.Sprintf("http://localhost:%d", stack.Members[0].ExposedConnectorPort)) suite.ethIdentity = suite.testState.org1key.Value - - abiResult := uploadABI(suite.T(), suite.ethClient, abi) - contractResult := deployABI(suite.T(), suite.ethClient, suite.ethIdentity, abiResult.ID) - - suite.contractAddress = contractResult.ContractAddress - + suite.contractAddress = os.Getenv("CONTRACT_ADDRESS") + if suite.contractAddress == "" { + suite.T().Fatal("CONTRACT_ADDRESS must be set") + } suite.T().Logf("contractAddress: %s", suite.contractAddress) res, err := CreateFFI(suite.T(), suite.testState.client1, simpleStorageFFI()) diff --git a/test/e2e/restclient_test.go b/test/e2e/restclient_test.go index 9e818e8a28..c5210f5a31 100644 --- a/test/e2e/restclient_test.go +++ b/test/e2e/restclient_test.go @@ -90,6 +90,20 @@ func GetNamespaces(client *resty.Client) (*resty.Response, error) { Get(urlGetNamespaces) } +func GetMessageEvents(t *testing.T, client *resty.Client, startTime time.Time, topic string, expectedStatus int) (events []*fftypes.EnrichedEvent) { + path := urlGetEvents + resp, err := client.R(). + SetQueryParam("created", fmt.Sprintf(">%d", startTime.UnixNano())). + SetQueryParam("topic", topic). + SetQueryParam("sort", "sequence"). + SetQueryParam("fetchreferences", "true"). + SetResult(&events). + Get(path) + require.NoError(t, err) + require.Equal(t, expectedStatus, resp.StatusCode(), "GET %s [%d]: %s (count=%d)", path, resp.StatusCode(), resp.String(), len(events)) + return events +} + func GetMessages(t *testing.T, client *resty.Client, startTime time.Time, msgType fftypes.MessageType, topic string, expectedStatus int) (msgs []*fftypes.Message) { path := urlGetMessages resp, err := client.R(). @@ -104,7 +118,7 @@ func GetMessages(t *testing.T, client *resty.Client, startTime time.Time, msgTyp return msgs } -func GetData(t *testing.T, client *resty.Client, startTime time.Time, expectedStatus int) (data []*fftypes.Data) { +func GetData(t *testing.T, client *resty.Client, startTime time.Time, expectedStatus int) (data fftypes.DataArray) { path := urlGetData resp, err := client.R(). SetQueryParam("created", fmt.Sprintf(">%d", startTime.UnixNano())). @@ -115,7 +129,7 @@ func GetData(t *testing.T, client *resty.Client, startTime time.Time, expectedSt return data } -func GetDataForMessage(t *testing.T, client *resty.Client, startTime time.Time, msgID *fftypes.UUID) (data []*fftypes.Data) { +func GetDataForMessage(t *testing.T, client *resty.Client, startTime time.Time, msgID *fftypes.UUID) (data fftypes.DataArray) { path := urlGetMessages path += "/" + msgID.String() + "/data" resp, err := client.R(). diff --git a/test/e2e/run.sh b/test/e2e/run.sh index 54ecf5d0bc..67ef7b37ab 100755 --- a/test/e2e/run.sh +++ b/test/e2e/run.sh @@ -86,6 +86,21 @@ if [ "$CREATE_STACK" == "true" ]; then $CLI start -b $STACK_NAME checkOk $? + + if [ "$TEST_SUITE" == "TestEthereumE2ESuite" ]; then + prefix='contract address: ' + output=$($CLI deploy $STACK_NAME ../data/simplestorage/simple_storage.json | grep address) + export CONTRACT_ADDRESS=${output#"$prefix"} + fi +fi + +if [ "$TOKENS_PROVIDER" == "erc20_erc721" ]; then + prefix='contract address: ' + output=$($CLI deploy $STACK_NAME ../data/erc20/ERC20WithData.json | grep address) + export ERC20_CONTRACT_ADDRESS=${output#"$prefix"} + prefix='contract address: ' + output=$($CLI deploy $STACK_NAME ../data/erc721/ERC721WithData.json | grep address) + export ERC721_CONTRACT_ADDRESS=${output#"$prefix"} fi $CLI info $STACK_NAME @@ -95,3 +110,14 @@ export STACK_FILE go clean -testcache && go test -v . -run $TEST_SUITE checkOk $? + +if [ "$RESTART" == "true" ]; then + $CLI stop $STACK_NAME + checkOk $? + + $CLI start $STACK_NAME + checkOk $? + + go clean -testcache && go test -v . -run $TEST_SUITE + checkOk $? +fi diff --git a/test/e2e/stack.go b/test/e2e/stack.go index dfac7b25e9..d3004be39e 100644 --- a/test/e2e/stack.go +++ b/test/e2e/stack.go @@ -25,6 +25,7 @@ type Stack struct { Name string `json:"name,omitempty"` ExposedBlockchainPort int `json:"exposedGethPort,omitempty"` BlockchainProvider string `json:"blockchainProvider"` + TokenProviders []string `json:"tokenProviders"` Members []*Member `json:"members,omitempty"` } diff --git a/test/e2e/tokens_test.go b/test/e2e/tokens_test.go index 3e42b7db7a..5f1c59c6b8 100644 --- a/test/e2e/tokens_test.go +++ b/test/e2e/tokens_test.go @@ -18,6 +18,7 @@ package e2e import ( "fmt" + "os" "time" "github.com/hyperledger/firefly/pkg/fftypes" @@ -27,7 +28,28 @@ import ( type TokensTestSuite struct { suite.Suite - testState *testState + testState *testState + connector string + erc20Address string + erc721Address string +} + +func (suite *TokensTestSuite) SetupSuite() { + suite.testState = beforeE2ETest(suite.T()) + stack := readStackFile(suite.T()) + suite.connector = stack.TokenProviders[0] + if suite.connector == "erc20_erc721" { + suite.erc20Address = os.Getenv("ERC20_CONTRACT_ADDRESS") + if suite.erc20Address == "" { + suite.T().Fatal("ERC20_CONTRACT_ADDRESS must be set") + } + suite.T().Logf("ERC20 address: %s", suite.erc20Address) + suite.erc721Address = os.Getenv("ERC721_CONTRACT_ADDRESS") + if suite.erc20Address == "" { + suite.T().Fatal("ERC721_CONTRACT_ADDRESS must be set") + } + suite.T().Logf("ERC721 address: %s", suite.erc721Address) + } } func (suite *TokensTestSuite) BeforeTest(suiteName, testName string) { @@ -45,9 +67,14 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { suite.T().Logf("Pool name: %s", poolName) pool := &fftypes.TokenPool{ - Name: poolName, - Type: fftypes.TokenTypeFungible, + Name: poolName, + Type: fftypes.TokenTypeFungible, + Config: fftypes.JSONObject{}, } + if suite.erc20Address != "" { + pool.Config["address"] = suite.erc20Address + } + poolResp := CreateTokenPool(suite.T(), suite.testState.client1, pool, false) poolID := poolResp.ID @@ -55,7 +82,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { pools = GetTokenPools(suite.T(), suite.testState.client1, suite.testState.startTime) assert.Equal(suite.T(), 1, len(pools)) assert.Equal(suite.T(), "default", pools[0].Namespace) - assert.Equal(suite.T(), "erc1155", pools[0].Connector) + assert.Equal(suite.T(), suite.connector, pools[0].Connector) assert.Equal(suite.T(), poolName, pools[0].Name) assert.Equal(suite.T(), fftypes.TokenTypeFungible, pools[0].Type) assert.NotEmpty(suite.T(), pools[0].ProtocolID) @@ -64,7 +91,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { pools = GetTokenPools(suite.T(), suite.testState.client1, suite.testState.startTime) assert.Equal(suite.T(), 1, len(pools)) assert.Equal(suite.T(), "default", pools[0].Namespace) - assert.Equal(suite.T(), "erc1155", pools[0].Connector) + assert.Equal(suite.T(), suite.connector, pools[0].Connector) assert.Equal(suite.T(), poolName, pools[0].Name) assert.Equal(suite.T(), fftypes.TokenTypeFungible, pools[0].Type) assert.NotEmpty(suite.T(), pools[0].ProtocolID) @@ -75,13 +102,14 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { Operator: suite.testState.org2key.Value, Approved: true, }, + Pool: poolName, } approvalOut := TokenApproval(suite.T(), suite.testState.client1, approval, false) waitForEvent(suite.T(), received1, fftypes.EventTypeApprovalConfirmed, approvalOut.LocalID) approvals := GetTokenApprovals(suite.T(), suite.testState.client1, poolID) assert.Equal(suite.T(), 1, len(approvals)) - assert.Equal(suite.T(), "erc1155", approvals[0].Connector) + assert.Equal(suite.T(), suite.connector, approvals[0].Connector) assert.Equal(suite.T(), true, approvals[0].Approved) transfer := &fftypes.TokenTransferInput{ @@ -93,7 +121,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { waitForEvent(suite.T(), received1, fftypes.EventTypeTransferConfirmed, transferOut.LocalID) transfers := GetTokenTransfers(suite.T(), suite.testState.client1, poolID) assert.Equal(suite.T(), 1, len(transfers)) - assert.Equal(suite.T(), "erc1155", transfers[0].Connector) + assert.Equal(suite.T(), suite.connector, transfers[0].Connector) assert.Equal(suite.T(), fftypes.TokenTransferTypeMint, transfers[0].Type) assert.Equal(suite.T(), int64(1), transfers[0].Amount.Int().Int64()) validateAccountBalances(suite.T(), suite.testState.client1, poolID, "", map[string]int64{ @@ -103,7 +131,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { waitForEvent(suite.T(), received2, fftypes.EventTypeTransferConfirmed, nil) transfers = GetTokenTransfers(suite.T(), suite.testState.client2, poolID) assert.Equal(suite.T(), 1, len(transfers)) - assert.Equal(suite.T(), "erc1155", transfers[0].Connector) + assert.Equal(suite.T(), suite.connector, transfers[0].Connector) assert.Equal(suite.T(), fftypes.TokenTransferTypeMint, transfers[0].Type) assert.Equal(suite.T(), int64(1), transfers[0].Amount.Int().Int64()) validateAccountBalances(suite.T(), suite.testState.client2, poolID, "", map[string]int64{ @@ -131,7 +159,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { waitForEvent(suite.T(), received1, fftypes.EventTypeMessageConfirmed, transferOut.Message) transfers = GetTokenTransfers(suite.T(), suite.testState.client1, poolID) assert.Equal(suite.T(), 2, len(transfers)) - assert.Equal(suite.T(), "erc1155", transfers[0].Connector) + assert.Equal(suite.T(), suite.connector, transfers[0].Connector) assert.Equal(suite.T(), fftypes.TokenTransferTypeTransfer, transfers[0].Type) assert.Equal(suite.T(), int64(1), transfers[0].Amount.Int().Int64()) data := GetDataForMessage(suite.T(), suite.testState.client1, suite.testState.startTime, transfers[0].Message) @@ -145,7 +173,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { waitForEvent(suite.T(), received2, fftypes.EventTypeMessageConfirmed, transferOut.Message) transfers = GetTokenTransfers(suite.T(), suite.testState.client2, poolID) assert.Equal(suite.T(), 2, len(transfers)) - assert.Equal(suite.T(), "erc1155", transfers[0].Connector) + assert.Equal(suite.T(), suite.connector, transfers[0].Connector) assert.Equal(suite.T(), fftypes.TokenTransferTypeTransfer, transfers[0].Type) assert.Equal(suite.T(), int64(1), transfers[0].Amount.Int().Int64()) validateAccountBalances(suite.T(), suite.testState.client2, poolID, "", map[string]int64{ @@ -162,7 +190,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { waitForEvent(suite.T(), received2, fftypes.EventTypeTransferConfirmed, transferOut.LocalID) transfers = GetTokenTransfers(suite.T(), suite.testState.client2, poolID) assert.Equal(suite.T(), 3, len(transfers)) - assert.Equal(suite.T(), "erc1155", transfers[0].Connector) + assert.Equal(suite.T(), suite.connector, transfers[0].Connector) assert.Equal(suite.T(), fftypes.TokenTransferTypeBurn, transfers[0].Type) assert.Equal(suite.T(), "", transfers[0].TokenIndex) assert.Equal(suite.T(), int64(1), transfers[0].Amount.Int().Int64()) @@ -174,7 +202,7 @@ func (suite *TokensTestSuite) TestE2EFungibleTokensAsync() { waitForEvent(suite.T(), received1, fftypes.EventTypeTransferConfirmed, nil) transfers = GetTokenTransfers(suite.T(), suite.testState.client1, poolID) assert.Equal(suite.T(), 3, len(transfers)) - assert.Equal(suite.T(), "erc1155", transfers[0].Connector) + assert.Equal(suite.T(), suite.connector, transfers[0].Connector) assert.Equal(suite.T(), fftypes.TokenTransferTypeBurn, transfers[0].Type) assert.Equal(suite.T(), "", transfers[0].TokenIndex) assert.Equal(suite.T(), int64(1), transfers[0].Amount.Int().Int64()) @@ -209,9 +237,14 @@ func (suite *TokensTestSuite) TestE2ENonFungibleTokensSync() { suite.T().Logf("Pool name: %s", poolName) pool := &fftypes.TokenPool{ - Name: poolName, - Type: fftypes.TokenTypeNonFungible, + Name: poolName, + Type: fftypes.TokenTypeNonFungible, + Config: fftypes.JSONObject{}, + } + if suite.erc721Address != "" { + pool.Config["address"] = suite.erc721Address } + poolOut := CreateTokenPool(suite.T(), suite.testState.client1, pool, true) assert.Equal(suite.T(), "default", poolOut.Namespace) assert.Equal(suite.T(), poolName, poolOut.Name) @@ -242,12 +275,15 @@ func (suite *TokensTestSuite) TestE2ENonFungibleTokensSync() { waitForEvent(suite.T(), received1, fftypes.EventTypeApprovalConfirmed, approvalOut.LocalID) approvals := GetTokenApprovals(suite.T(), suite.testState.client1, poolID) assert.Equal(suite.T(), 1, len(approvals)) - assert.Equal(suite.T(), "erc1155", approvals[0].Connector) + assert.Equal(suite.T(), suite.connector, approvals[0].Connector) assert.Equal(suite.T(), true, approvals[0].Approved) transfer := &fftypes.TokenTransferInput{ - TokenTransfer: fftypes.TokenTransfer{Amount: *fftypes.NewFFBigInt(1)}, - Pool: poolName, + TokenTransfer: fftypes.TokenTransfer{ + TokenIndex: "1", + Amount: *fftypes.NewFFBigInt(1), + }, + Pool: poolName, } transferOut := MintTokens(suite.T(), suite.testState.client1, transfer, true) assert.Equal(suite.T(), fftypes.TokenTransferTypeMint, transferOut.Type) From d2c15fccf91f48ce5d98b5ca56821318e411e881 Mon Sep 17 00:00:00 2001 From: Nicko Guyer Date: Mon, 21 Mar 2022 14:22:18 -0400 Subject: [PATCH 6/6] Pass blockchain metrics by reference Signed-off-by: Nicko Guyer --- internal/events/batch_pin_complete.go | 2 +- internal/events/blockchain_event.go | 4 ++-- internal/events/blockchain_event_test.go | 2 +- internal/events/token_pool_created.go | 2 +- internal/events/tokens_approved.go | 2 +- internal/events/tokens_transferred.go | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/events/batch_pin_complete.go b/internal/events/batch_pin_complete.go index 1a0d8985c7..dddb381759 100644 --- a/internal/events/batch_pin_complete.go +++ b/internal/events/batch_pin_complete.go @@ -62,7 +62,7 @@ func (em *eventManager) BatchPinComplete(bi blockchain.Plugin, batchPin *blockch if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { return err } - em.emitBlockchainEventMetric(batchPin.Event) + em.emitBlockchainEventMetric(&batchPin.Event) private := batchPin.BatchPayloadRef == "" if err := em.persistContexts(ctx, batchPin, signingKey, private); err != nil { return err diff --git a/internal/events/blockchain_event.go b/internal/events/blockchain_event.go index 4afc8e7c4c..f711b6c9d6 100644 --- a/internal/events/blockchain_event.go +++ b/internal/events/blockchain_event.go @@ -101,7 +101,7 @@ func (em *eventManager) persistBlockchainEvent(ctx context.Context, chainEvent * return nil } -func (em *eventManager) emitBlockchainEventMetric(event blockchain.Event) { +func (em *eventManager) emitBlockchainEventMetric(event *blockchain.Event) { if em.metrics.IsMetricsEnabled() && event.Location != "" && event.Signature != "" { em.metrics.BlockchainEvent(event.Location, event.Signature) } @@ -123,7 +123,7 @@ func (em *eventManager) BlockchainEvent(event *blockchain.EventWithSubscription) if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { return err } - em.emitBlockchainEventMetric(event.Event) + em.emitBlockchainEventMetric(&event.Event) return nil }) return err != nil, err diff --git a/internal/events/blockchain_event_test.go b/internal/events/blockchain_event_test.go index a5754b5600..bea78da9a7 100644 --- a/internal/events/blockchain_event_test.go +++ b/internal/events/blockchain_event_test.go @@ -165,6 +165,6 @@ func TestBlockchainEventMetric(t *testing.T) { Signature: "John Hancock", } - em.emitBlockchainEventMetric(event) + em.emitBlockchainEventMetric(&event) mm.AssertExpectations(t) } diff --git a/internal/events/token_pool_created.go b/internal/events/token_pool_created.go index 0964d0a625..2faad11468 100644 --- a/internal/events/token_pool_created.go +++ b/internal/events/token_pool_created.go @@ -53,7 +53,7 @@ func (em *eventManager) confirmPool(ctx context.Context, pool *fftypes.TokenPool if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { return err } - em.emitBlockchainEventMetric(*ev) + em.emitBlockchainEventMetric(ev) if op, err := em.findTXOperation(ctx, pool.TX.ID, fftypes.OpTypeTokenActivatePool); err != nil { return err } else if op == nil { diff --git a/internal/events/tokens_approved.go b/internal/events/tokens_approved.go index f1cfabda42..b5945739a5 100644 --- a/internal/events/tokens_approved.go +++ b/internal/events/tokens_approved.go @@ -88,7 +88,7 @@ func (em *eventManager) persistTokenApproval(ctx context.Context, approval *toke if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { return false, err } - em.emitBlockchainEventMetric(approval.Event) + em.emitBlockchainEventMetric(&approval.Event) if err := em.database.UpsertTokenApproval(ctx, &approval.TokenApproval); err != nil { log.L(ctx).Errorf("Failed to record token approval '%s': %s", approval.ProtocolID, err) return false, err diff --git a/internal/events/tokens_transferred.go b/internal/events/tokens_transferred.go index d0031b4e7f..b2f7ca3e47 100644 --- a/internal/events/tokens_transferred.go +++ b/internal/events/tokens_transferred.go @@ -102,7 +102,7 @@ func (em *eventManager) persistTokenTransfer(ctx context.Context, transfer *toke if err := em.persistBlockchainEvent(ctx, chainEvent); err != nil { return false, err } - em.emitBlockchainEventMetric(transfer.Event) + em.emitBlockchainEventMetric(&transfer.Event) if err := em.database.UpsertTokenTransfer(ctx, &transfer.TokenTransfer); err != nil { log.L(ctx).Errorf("Failed to record token transfer '%s': %s", transfer.ProtocolID, err) return false, err @@ -144,7 +144,7 @@ func (em *eventManager) TokensTransferred(ti tokens.Plugin, transfer *tokens.Tok } } } - em.emitBlockchainEventMetric(transfer.Event) + em.emitBlockchainEventMetric(&transfer.Event) event := fftypes.NewEvent(fftypes.EventTypeTransferConfirmed, transfer.Namespace, transfer.LocalID, transfer.TX.ID, transfer.Pool.String()) return em.database.InsertEvent(ctx, event)