-
Notifications
You must be signed in to change notification settings - Fork 242
Allow sending a message with a token transfer #245
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
1e505ea
11188fb
1ff0916
c75f160
4884273
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -25,6 +25,7 @@ import ( | |
| "github.com/hyperledger/firefly/internal/data" | ||
| "github.com/hyperledger/firefly/internal/i18n" | ||
| "github.com/hyperledger/firefly/internal/identity" | ||
| "github.com/hyperledger/firefly/internal/privatemessaging" | ||
| "github.com/hyperledger/firefly/internal/retry" | ||
| "github.com/hyperledger/firefly/internal/syncasync" | ||
| "github.com/hyperledger/firefly/internal/txcommon" | ||
|
|
@@ -42,11 +43,10 @@ type Manager interface { | |
| GetTokenTransfers(ctx context.Context, ns, typeName, poolName string, filter database.AndFilter) ([]*fftypes.TokenTransfer, *database.FilterResult, error) | ||
| MintTokens(ctx context.Context, ns, typeName, poolName string, transfer *fftypes.TokenTransfer, waitConfirm bool) (*fftypes.TokenTransfer, error) | ||
| BurnTokens(ctx context.Context, ns, typeName, poolName string, transfer *fftypes.TokenTransfer, waitConfirm bool) (*fftypes.TokenTransfer, error) | ||
| TransferTokens(ctx context.Context, ns, typeName, poolName string, transfer *fftypes.TokenTransfer, waitConfirm bool) (*fftypes.TokenTransfer, error) | ||
| TransferTokens(ctx context.Context, ns, typeName, poolName string, transfer *fftypes.TokenTransferInput, waitConfirm bool) (*fftypes.TokenTransfer, error) | ||
|
|
||
| // Bound token callbacks | ||
| TokenPoolCreated(tk tokens.Plugin, pool *fftypes.TokenPool, protocolTxID string, additionalInfo fftypes.JSONObject) error | ||
| TokensTransferred(tk tokens.Plugin, transfer *fftypes.TokenTransfer, protocolTxID string, additionalInfo fftypes.JSONObject) error | ||
|
|
||
| Start() error | ||
| WaitStop() | ||
|
|
@@ -59,13 +59,14 @@ type assetManager struct { | |
| data data.Manager | ||
| syncasync syncasync.Bridge | ||
| broadcast broadcast.Manager | ||
| messaging privatemessaging.Manager | ||
| tokens map[string]tokens.Plugin | ||
| retry retry.Retry | ||
| txhelper txcommon.Helper | ||
| } | ||
|
|
||
| func NewAssetManager(ctx context.Context, di database.Plugin, im identity.Manager, dm data.Manager, sa syncasync.Bridge, bm broadcast.Manager, ti map[string]tokens.Plugin) (Manager, error) { | ||
| if di == nil || im == nil || sa == nil || bm == nil || ti == nil { | ||
| func NewAssetManager(ctx context.Context, di database.Plugin, im identity.Manager, dm data.Manager, sa syncasync.Bridge, bm broadcast.Manager, pm privatemessaging.Manager, ti map[string]tokens.Plugin) (Manager, error) { | ||
| if di == nil || im == nil || sa == nil || bm == nil || pm == nil || ti == nil { | ||
| return nil, i18n.NewError(ctx, i18n.MsgInitializationNilDepError) | ||
| } | ||
| am := &assetManager{ | ||
|
|
@@ -75,6 +76,7 @@ func NewAssetManager(ctx context.Context, di database.Plugin, im identity.Manage | |
| data: dm, | ||
| syncasync: sa, | ||
| broadcast: bm, | ||
| messaging: pm, | ||
| tokens: ti, | ||
| retry: retry.Retry{ | ||
| InitialDelay: config.GetDuration(config.AssetManagerRetryInitialDelay), | ||
|
|
@@ -119,21 +121,13 @@ func retrieveTokenPoolCreateInputs(ctx context.Context, op *fftypes.Operation, p | |
| return nil | ||
| } | ||
|
|
||
| // Note: the counterpart to below (retrieveTokenTransferInputs) lives in the events package | ||
| func addTokenTransferInputs(op *fftypes.Operation, transfer *fftypes.TokenTransfer) { | ||
| op.Input = fftypes.JSONObject{ | ||
| "id": transfer.LocalID.String(), | ||
| } | ||
| } | ||
|
|
||
| func retrieveTokenTransferInputs(ctx context.Context, op *fftypes.Operation, transfer *fftypes.TokenTransfer) (err error) { | ||
| input := &op.Input | ||
| transfer.LocalID, err = fftypes.ParseUUID(ctx, input.GetString("id")) | ||
| if err != nil { | ||
| return err | ||
| } | ||
| return nil | ||
| } | ||
|
|
||
| func (am *assetManager) CreateTokenPool(ctx context.Context, ns string, typeName string, pool *fftypes.TokenPool, waitConfirm bool) (*fftypes.TokenPool, error) { | ||
| return am.createTokenPoolWithID(ctx, fftypes.NewUUID(), ns, typeName, pool, waitConfirm) | ||
| } | ||
|
|
@@ -291,7 +285,25 @@ func (am *assetManager) BurnTokens(ctx context.Context, ns, typeName, poolName s | |
| return am.transferTokensWithID(ctx, fftypes.NewUUID(), ns, typeName, poolName, transfer, waitConfirm) | ||
| } | ||
|
|
||
| func (am *assetManager) TransferTokens(ctx context.Context, ns, typeName, poolName string, transfer *fftypes.TokenTransfer, waitConfirm bool) (*fftypes.TokenTransfer, error) { | ||
| func (am *assetManager) sendTransferMessage(ctx context.Context, ns string, in *fftypes.MessageInOut) (*fftypes.Message, error) { | ||
| allowedTypes := []fftypes.FFEnum{ | ||
| fftypes.MessageTypeTransferBroadcast, | ||
| fftypes.MessageTypeTransferPrivate, | ||
| } | ||
| if in.Header.Type == "" { | ||
| in.Header.Type = fftypes.MessageTypeTransferBroadcast | ||
| } | ||
| switch in.Header.Type { | ||
| case fftypes.MessageTypeTransferBroadcast: | ||
| return am.broadcast.BroadcastMessage(ctx, ns, in, false) | ||
| case fftypes.MessageTypeTransferPrivate: | ||
| return am.messaging.SendMessage(ctx, ns, in, false) | ||
| default: | ||
| return nil, i18n.NewError(ctx, i18n.MsgInvalidMessageType, allowedTypes) | ||
| } | ||
| } | ||
|
|
||
| func (am *assetManager) TransferTokens(ctx context.Context, ns, typeName, poolName string, transfer *fftypes.TokenTransferInput, waitConfirm bool) (*fftypes.TokenTransfer, error) { | ||
| transfer.Type = fftypes.TokenTransferTypeTransfer | ||
| if transfer.Key == "" { | ||
| org, err := am.identity.GetLocalOrganization(ctx) | ||
|
|
@@ -309,7 +321,17 @@ func (am *assetManager) TransferTokens(ctx context.Context, ns, typeName, poolNa | |
| if transfer.From == transfer.To { | ||
| return nil, i18n.NewError(ctx, i18n.MsgCannotTransferToSelf) | ||
| } | ||
| return am.transferTokensWithID(ctx, fftypes.NewUUID(), ns, typeName, poolName, transfer, waitConfirm) | ||
|
|
||
| if transfer.Message != nil { | ||
| msg, err := am.sendTransferMessage(ctx, ns, transfer.Message) | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
| transfer.MessageHash = msg.Hash | ||
| } | ||
|
|
||
| result, err := am.transferTokensWithID(ctx, fftypes.NewUUID(), ns, typeName, poolName, &transfer.TokenTransfer, waitConfirm) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's my understanding @awrichar that this will switch around, so that in the case of a message+transfer we'll no longer use
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Correct
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. #249 is my parallel attempt to restructure the messaging code in order to have a hook at the point that the message is sealed but not sent. Hopefully I can leverage that to fire off the token transfer just before firing the message, and then wait for the message to be confirmed. Once both of these PRs are merged, I'll tackle that follow-on change. |
||
| return result, err | ||
| } | ||
|
|
||
| func (am *assetManager) transferTokensWithID(ctx context.Context, id *fftypes.UUID, ns, typeName, poolName string, transfer *fftypes.TokenTransfer, waitConfirm bool) (*fftypes.TokenTransfer, error) { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Note that even in the
waitConfirm=truecase, the message is dispatched async. This is because dispatching the message synchronously would block forever waiting for the transfer to arrive.Technically we could build and seal the message without sending it in this case, then record the hash, then send the transfer synchronously, then send the message synchronously. I spent some time going down that road and it requires some non-trivial refactoring of the message sending logic, so I tabled it for the moment.
This means that the current behavior of
waitConfirm=trueguarantees the token transfer has occurred, but does not totally guarantee the message data has been received (although it should have been in most cases).There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
My understanding of where we were aiming with this feature, was that the transaction object would be the thing we would generate an event on, and as such the sync/async code would need to have a path where the thing in-flight wasn't a message, but instead a higher-level transaction.
... going to look through the rest of the changes to see if we've learned more that means this approach didn't work out, or if we're still on that path.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There is a transaction in flight, but it's possible I'm resolving that prematurely. We discussed holding the message confirmation until the transfer completes, which is what I tackled in the event aggregation - but I think I am still missing a few pieces of the puzzle here.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Just to close the loop on the comments here, per the summary #245 (comment) when we're done with this, we will block until the message confirms.
However, that's not a blocker on merging this PR as we've done the architectural work to know where we're up to