Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion itest/psbt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3663,7 +3663,7 @@ func testPsbtRelativeLockTimeSendProofFail(t *harnessTest) {

AssertSendEvents(
t.t, aliceScriptKeyBytes, sendEvents,
tapfreighter.SendStateStorePreBroadcast,
tapfreighter.SendStateVerifyPreBroadcast,
tapfreighter.SendStateWaitTxConf,
)

Expand Down
136 changes: 73 additions & 63 deletions rpcserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -2687,7 +2687,8 @@ func (r *rpcServer) CommitVirtualPsbts(ctx context.Context,
// Make sure the assets given fully satisfy the input commitments.
allPackets := append([]*tappsbt.VPacket{}, activePackets...)
allPackets = append(allPackets, passivePackets...)
err = r.validateInputAssets(ctx, pkt, allPackets)
purgedAssets := r.collectPurgedAssets(ctx, allPackets)
err = r.validateInputAssets(ctx, pkt, allPackets, purgedAssets)
if err != nil {
return nil, fmt.Errorf("error validating input assets: %w", err)
}
Expand Down Expand Up @@ -2877,10 +2878,77 @@ func (r *rpcServer) CommitVirtualPsbts(ctx context.Context,
return response, nil
}

// collectPurgedAssets gathers the assets that are being purged for each input
// commitment across all virtual packets.
func (r *rpcServer) collectPurgedAssets(ctx context.Context,
vPackets []*tappsbt.VPacket) map[wire.OutPoint][]*asset.Asset {

purgedAssetsDeDup := make(map[wire.OutPoint]map[[32]byte]*asset.Asset)
for _, vPkt := range vPackets {
for _, vIn := range vPkt.Inputs {
inputAsset := vIn.Asset()
outpoint := vIn.PrevID.OutPoint

input, err := r.cfg.AssetStore.FetchCommitment(
ctx, inputAsset.ID(), outpoint,
inputAsset.GroupKey, &inputAsset.ScriptKey,
true,
)
if err != nil {
// If we can't fetch the input commitment, it
// means this input asset isn't ours. We cannot
// find out if there were any purged assets in
// the commitment, so we just rely on all assets
// being present. If some purged assets are
// missing, then the anchor input equality check
// further down will fail.
rpcsLog.Warnf("Could not fetch input "+
"commitment for outpoint %v: %v",
outpoint, err)

continue
}

assetsToPurge := tapsend.ExtractUnSpendable(
input.Commitment,
)
if len(assetsToPurge) == 0 {
continue
}

assetMap := purgedAssetsDeDup[outpoint]
if assetMap == nil {
assetMap = make(map[[32]byte]*asset.Asset)
purgedAssetsDeDup[outpoint] = assetMap
}

for _, a := range assetsToPurge {
key := a.AssetCommitmentKey()
assetMap[key] = a
}
}
}

// With the assets de-duplicated by their asset commitment key, we can
// now collect them grouped by input outpoint.
purgedAssets := make(map[wire.OutPoint][]*asset.Asset)
for outpoint, assets := range purgedAssetsDeDup {
for key := range assets {
purgedAssets[outpoint] = append(
purgedAssets[outpoint], assets[key],
)
}
Comment on lines +2936 to +2940

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This inner loop can be simplified by iterating over the map's values directly, instead of its keys. This is more idiomatic and slightly more readable.

Suggested change
for key := range assets {
purgedAssets[outpoint] = append(
purgedAssets[outpoint], assets[key],
)
}
for _, asset := range assets {
purgedAssets[outpoint] = append(
purgedAssets[outpoint], asset,
)
}

}

return purgedAssets
}

// validateInputAssets makes sure that the input assets are correct and their
// combined commitments match the inputs of the BTC level anchor transaction.
// The purgedAssets map contains the commitment leftovers per anchor input.
func (r *rpcServer) validateInputAssets(ctx context.Context,
btcPkt *psbt.Packet, vPackets []*tappsbt.VPacket) error {
btcPkt *psbt.Packet, vPackets []*tappsbt.VPacket,
purgedAssets map[wire.OutPoint][]*asset.Asset) error {

err := tapsend.ValidateVPacketVersions(vPackets)
if err != nil {
Expand Down Expand Up @@ -2939,65 +3007,6 @@ func (r *rpcServer) validateInputAssets(ctx context.Context,
}
}

// We also want to make sure we actually have the assets that are being
// spent in our database. We fetch the input commitments of all packets
// to asset that. And while we're doing that, we also extract all the
// pruned assets that are not re-created in the outputs, which we need
// for the final validation. We de-duplicate the pruned assets in a
// temporary map keyed by input outpoint and the asset commitment key.
purgedAssetsDeDup := make(map[wire.OutPoint]map[[32]byte]*asset.Asset)
for _, vPkt := range vPackets {
for _, vIn := range vPkt.Inputs {
inputAsset := vIn.Asset()
outpoint := vIn.PrevID.OutPoint

input, err := r.cfg.AssetStore.FetchCommitment(
ctx, inputAsset.ID(), outpoint,
inputAsset.GroupKey, &inputAsset.ScriptKey,
true,
)
if err != nil {
// If we can't fetch the input commitment, it
// means this input asset isn't ours. We cannot
// find out if there were any purged assets in
// the commitment, so we just rely on all assets
// being present. If some purged assets are
// missing, then the anchor input equality check
// further down will fail.
rpcsLog.Warnf("Could not fetch input "+
"commitment for outpoint %v: %v",
outpoint, err)

continue
}

assetsToPurge := tapsend.ExtractUnSpendable(
input.Commitment,
)
for _, a := range assetsToPurge {
key := a.AssetCommitmentKey()
if purgedAssetsDeDup[outpoint] == nil {
purgedAssetsDeDup[outpoint] = make(
map[[32]byte]*asset.Asset,
)
}

purgedAssetsDeDup[outpoint][key] = a
}
}
}

// With the assets de-duplicated by their asset commitment key, we can
// now collect them grouped by input outpoint.
purgedAssets := make(map[wire.OutPoint][]*asset.Asset)
for outpoint, assets := range purgedAssetsDeDup {
for key := range assets {
purgedAssets[outpoint] = append(
purgedAssets[outpoint], assets[key],
)
}
}

// At this point all the virtual packet inputs and outputs should fully
// match the BTC level anchor transaction. Version 0 assets should also
// be signed now.
Expand Down Expand Up @@ -3072,7 +3081,8 @@ func (r *rpcServer) PublishAndLogTransfer(ctx context.Context,
// sure everything is in order. We start by validating the inputs.
allPackets := append([]*tappsbt.VPacket{}, activePackets...)
allPackets = append(allPackets, passivePackets...)
err = r.validateInputAssets(ctx, pkt, allPackets)
purgedAssets := r.collectPurgedAssets(ctx, allPackets)
err = r.validateInputAssets(ctx, pkt, allPackets, purgedAssets)
if err != nil {
return nil, fmt.Errorf("error validating input assets: %w", err)
}
Expand Down Expand Up @@ -3128,7 +3138,7 @@ func (r *rpcServer) PublishAndLogTransfer(ctx context.Context,
resp, err := r.cfg.ChainPorter.RequestShipment(
tapfreighter.NewPreAnchoredParcel(
activePackets, passivePackets, anchorTx,
req.SkipAnchorTxBroadcast, req.Label,
req.SkipAnchorTxBroadcast, req.Label, purgedAssets,
),
)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion tapchannel/aux_closer.go
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@ func shipChannelTxn(txSender tapfreighter.Porter, chanTx *wire.MsgTx,
FinalTx: chanTx,
}
preSignedParcel := tapfreighter.NewPreAnchoredParcel(
vPkts, nil, closeAnchor, false, "",
vPkts, nil, closeAnchor, false, "", nil,
)
_, err = txSender.RequestShipment(preSignedParcel)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion tapchannel/aux_funding_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1457,7 +1457,7 @@ func (f *FundingController) completeChannelFunding(ctx context.Context,
FinalTx: signedFundingTx,
}
preSignedParcel := tapfreighter.NewPreAnchoredParcel(
activePkts, passivePkts, anchorTx, false, "",
activePkts, passivePkts, anchorTx, false, "", nil,
)
_, err = f.cfg.TxSender.RequestShipment(preSignedParcel)
if err != nil {
Expand Down
23 changes: 20 additions & 3 deletions tapfreighter/chain_porter.go
Original file line number Diff line number Diff line change
Expand Up @@ -1609,10 +1609,24 @@ func (p *ChainPorter) stateStep(currentPkg sendPackage) (*sendPackage, error) {
// finalization.
currentPkg.AnchorTx = anchorTx

currentPkg.SendState = SendStateVerifyPreBroadcast
return &currentPkg, nil

// Run final pre-broadcast checks on the send package.
case SendStateVerifyPreBroadcast:
ctx, cancel := p.WithCtxQuitNoTimeout()
defer cancel()

// For the final validation, we need to also supply the assets
// that were committed to the input tree but pruned because they
// were burns or tombstones.
// were burns or tombstones. Some parcels (like the pre-anchored
// flow) already provide those pruned assets up-front.
prunedAssets := make(map[wire.OutPoint][]*asset.Asset)
for outpoint, assets := range currentPkg.PrunedAssets {
prunedAssets[outpoint] = append(
prunedAssets[outpoint], assets...,
)
}
for prevID := range currentPkg.InputCommitments {
c := currentPkg.InputCommitments[prevID]
prunedAssets[prevID.OutPoint] = append(
Expand All @@ -1622,16 +1636,19 @@ func (p *ChainPorter) stateStep(currentPkg sendPackage) (*sendPackage, error) {
}

// Make sure everything is ready for the finalization.
err = currentPkg.validateReadyForPublish(prunedAssets)
err := currentPkg.validateReadyForPublish(prunedAssets)
if err != nil {
p.unlockInputs(ctx, &currentPkg)

return nil, fmt.Errorf("unable to validate send "+
"package: %w", err)
}

currentPkg.SendState = SendStateStorePreBroadcast
// TODO(ffranr): Extend this state with input proof
// verification and also possibly partial output proof
// verification.

currentPkg.SendState = SendStateStorePreBroadcast
return &currentPkg, nil

// In this state, the parcel state is stored before the fully signed
Expand Down
22 changes: 20 additions & 2 deletions tapfreighter/parcel.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ const (
// then finalize to place the necessary signatures in the transaction.
SendStateAnchorSign

// SendStateVerifyPreBroadcast runs final pre-broadcast checks on the
// send package.
SendStateVerifyPreBroadcast

// SendStateStorePreBroadcast is the state in which the finalized fully
// signed transaction is written to persistent storage before broadcast.
SendStateStorePreBroadcast
Expand Down Expand Up @@ -85,6 +89,9 @@ func (s SendState) String() string {
case SendStateAnchorSign:
return "SendStateAnchorSign"

case SendStateVerifyPreBroadcast:
return "SendStateVerifyPreBroadcast"

case SendStateStorePreBroadcast:
return "SendStateStorePreBroadcast"

Expand Down Expand Up @@ -383,6 +390,10 @@ type PreAnchoredParcel struct {

anchorTx *tapsend.AnchorTransaction

// prunedAssets holds any assets that were part of the input commitment
// but are not recreated by the virtual packets (e.g. tombstones).
prunedAssets map[wire.OutPoint][]*asset.Asset

// skipAnchorTxBroadcast bool is a flag that indicates whether the
// anchor transaction broadcast should be skipped. This is useful when
// an external system handles broadcasting, such as in custom
Expand All @@ -400,7 +411,8 @@ var _ Parcel = (*PreAnchoredParcel)(nil)
// NewPreAnchoredParcel creates a new PreAnchoredParcel.
func NewPreAnchoredParcel(vPackets []*tappsbt.VPacket,
passiveAssets []*tappsbt.VPacket, anchorTx *tapsend.AnchorTransaction,
skipAnchorTxBroadcast bool, label string) *PreAnchoredParcel {
skipAnchorTxBroadcast bool, label string,
prunedAssets map[wire.OutPoint][]*asset.Asset) *PreAnchoredParcel {

return &PreAnchoredParcel{
parcelKit: &parcelKit{
Expand All @@ -410,6 +422,7 @@ func NewPreAnchoredParcel(vPackets []*tappsbt.VPacket,
virtualPackets: vPackets,
passiveAssets: passiveAssets,
anchorTx: anchorTx,
prunedAssets: prunedAssets,
skipAnchorTxBroadcast: skipAnchorTxBroadcast,
label: label,
}
Expand All @@ -424,10 +437,11 @@ func (p *PreAnchoredParcel) pkg() *sendPackage {
// commitment.
return &sendPackage{
Parcel: p,
SendState: SendStateStorePreBroadcast,
SendState: SendStateVerifyPreBroadcast,
VirtualPackets: p.virtualPackets,
PassiveAssets: p.passiveAssets,
AnchorTx: p.anchorTx,
PrunedAssets: p.prunedAssets,
Label: p.label,
SkipAnchorTxBroadcast: p.skipAnchorTxBroadcast,
}
Expand Down Expand Up @@ -486,6 +500,10 @@ type sendPackage struct {
// associated Taproot Asset commitment.
InputCommitments tappsbt.InputCommitments

// PrunedAssets holds any assets that were part of the input commitment
// but are not recreated by the virtual packets (e.g. tombstones).
PrunedAssets map[wire.OutPoint][]*asset.Asset

// SendManifests is a map of send manifests that need to be sent to the
// auth mailbox server to complete an address V2 transfer. It is keyed
// by the anchor output index.
Expand Down
70 changes: 70 additions & 0 deletions tmp_change_dsc.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
TestTaprootAssetsDaemon/tranche00/83-of-98/anchor_multiple_virtual_transactions was failing inside ValidateAnchorInputs
with “anchor input script mismatch”. The mismatch appeared whenever we executed the PublishAndLogTransfer (pre‑anchored)
path: the RPC would validate the user-supplied PSBT/vpkts,
but once ChainPorter picked up the resulting parcel it no longer had the full tap trees needed to recreate each anchor
input, so the final pre-broadcast check failed.

Root cause

ValidateAnchorInputs needs all assets that were committed in the original anchor input:

1. active + passive assets (present in the virtual packets), and
2. “purged assets” (tombstones/burns) that were part of the commitment but are not recreated.

When ChainPorter orchestrates a send from scratch it keeps the original InputCommitments in memory, so in
SendStateVerifyPreBroadcast we can call tapsend.ExtractUnSpendable on those commitments and hand the pruned leaves to
ValidateAnchorInputs. However, in the pre‑anchored RPC flow we never stored
those commitments—InputCommitments is empty—and although rpcServer.validateInputAssets fetched and used the purged
leaves to check the user’s PSBT, it didn’t persist them anywhere. By the time ChainPorter ran, the only data left were
the active/passive assets; the tombstones/burns were gone, so the
reconstructed tap tree didn’t match the on-chain script.

What changed

1. rpcServer.validateInputAssets now returns the pruned assets it discovers while validating a PSBT’s inputs. The helper
still performs all prior checks (version validation, local key derivation, commitment lookups, supply conservation),
but it also hands the tombstones/burns back to the caller.
2. PublishAndLogTransfer captures that map and passes it down to ChainPorter by storing it in the PreAnchoredParcel.
3. sendPackage and ChainPorter gain a PrunedAssets field. When we reach SendStateVerifyPreBroadcast, we merge any
pre-supplied pruned leaves with the ones we can still derive from InputCommitments and feed the combined set into
ValidateAnchorInputs.
4. Existing internal callers of NewPreAnchoredParcel (aux funding controller, aux closer) pass nil because they still
have full commitments in memory; nothing changes for those flows.

This ensures that every code path which validated a PSBT against the full tap tree can supply the same tombstones/burns
later, so the final pre-broadcast check reconstructs the exact script that is committed on chain.

Why not just populate InputCommitments?

For pre-anchored flows we often cannot build the full InputCommitments map:

- Inputs might belong to another party; we can’t fetch or persist their entire tap commitments.
- Even for local inputs the full commitment can be large, and we’d only be storing it to re-derive a small subset (the
unspendable leaves). That’s heavyweight and redundant.

Passing just the pruned leaves is the minimal data we need to satisfy ValidateAnchorInputs, and it works even when the
full commitments aren’t available.

Is rpcServer.validateInputAssets redundant now?

No. It serves as the early validation barrier at the RPC boundary:

- It decorates the PSBT with local derivation info (so later signing works).
- It fetches whatever commitments the node knows about to enforce supply conservation and collect pruned leaves.
- It ensures malformed PSBTs are rejected before we ask the wallet to fund/sign or modify any state.

ChainPorter’s validateReadyForPublish is the final gate after coin selection and passive re-anchoring. Both stages call
into ValidateAnchorInputs/Outputs, but at different points in the lifecycle and with different responsibilities.
Removing either would either expose us to malformed user input (if
we removed the RPC check) or allow inconsistencies to slip through right before broadcast (if we removed the ChainPorter
check). The new change simply lets the early-stage information flow to the late stage so both checks see the same
complete data.

Summary

- Returning pruned assets from the RPC-layer validation and carrying them through the parcel keeps tombstones/burns
alive for the final validation step.
- ChainPorter still gathers unspendables from InputCommitments when it has them; the new field only matters for
pre‑anchored workflows that previously had no way to reproduce the missing leaves.
- validateInputAssets remains necessary as the RPC boundary guard; the additional return value just makes its work
reusable later.
Loading