diff --git a/benchmarks/testinstance/testinstance.go b/benchmarks/testinstance/testinstance.go index 59b82383..6732610e 100644 --- a/benchmarks/testinstance/testinstance.go +++ b/benchmarks/testinstance/testinstance.go @@ -2,7 +2,6 @@ package testinstance import ( "context" - "os" "time" "github.com/ipfs/go-datastore" @@ -165,8 +164,8 @@ func NewInstance(ctx context.Context, net tn.Network, tempDir string, diskBasedD linkSystem := storeutil.LinkSystemForBlockstore(bstore) gs := gsimpl.New(ctx, gsNet, linkSystem, gsimpl.RejectAllRequestsByDefault()) - transport := gstransport.NewTransport(p, gs, dtNet) - dt, err := dtimpl.NewDataTransfer(namespace.Wrap(dstore, datastore.NewKey("/data-transfers/transfers")), os.TempDir(), dtNet, transport) + transport := gstransport.NewTransport(p, gs) + dt, err := dtimpl.NewDataTransfer(namespace.Wrap(dstore, datastore.NewKey("/data-transfers/transfers")), dtNet, transport) if err != nil { return Instance{}, err } diff --git a/channelmonitor/channelmonitor_test.go b/channelmonitor/channelmonitor_test.go index b710530b..cd07bbea 100644 --- a/channelmonitor/channelmonitor_test.go +++ b/channelmonitor/channelmonitor_test.go @@ -610,14 +610,6 @@ func (m *mockChannelState) Stages() *datatransfer.ChannelStages { panic("implement me") } -func (m *mockChannelState) ReceivedCids() []cid.Cid { - panic("implement me") -} - -func (m *mockChannelState) ReceivedCidsLen() int { - panic("implement me") -} - func (m *mockChannelState) ReceivedCidsTotal() int64 { panic("implement me") } diff --git a/channels/channel_state.go b/channels/channel_state.go index 76141249..9305d4d4 100644 --- a/channels/channel_state.go +++ b/channels/channel_state.go @@ -57,7 +57,6 @@ type channelState struct { voucherResults []internal.EncodedVoucherResult voucherResultDecoder DecoderByTypeFunc voucherDecoder DecoderByTypeFunc - receivedCids ReceivedCidsReader // stages tracks the timeline of events related to a data transfer, for // traceability purposes. @@ -110,24 +109,6 @@ func (c channelState) Voucher() datatransfer.Voucher { return encodable.(datatransfer.Voucher) } -// ReceivedCids returns the cids received so far on this channel -func (c channelState) ReceivedCids() []cid.Cid { - receivedCids, err := c.receivedCids.ToArray(c.ChannelID()) - if err != nil { - log.Error(err) - } - return receivedCids -} - -// ReceivedCids returns the number of unique cids received so far on this channel -func (c channelState) ReceivedCidsLen() int { - len, err := c.receivedCids.Len(c.ChannelID()) - if err != nil { - log.Error(err) - } - return len -} - // ReceivedCidsTotal returns the number of (non-unique) cids received so far // on the channel - note that a block can exist in more than one place in the DAG func (c channelState) ReceivedCidsTotal() int64 { @@ -233,7 +214,7 @@ func (c channelState) Stages() *datatransfer.ChannelStages { return c.stages } -func fromInternalChannelState(c internal.ChannelState, voucherDecoder DecoderByTypeFunc, voucherResultDecoder DecoderByTypeFunc, receivedCidsReader ReceivedCidsReader) datatransfer.ChannelState { +func fromInternalChannelState(c internal.ChannelState, voucherDecoder DecoderByTypeFunc, voucherResultDecoder DecoderByTypeFunc) datatransfer.ChannelState { return channelState{ selfPeer: c.SelfPeer, isPull: c.Initiator == c.Recipient, @@ -255,7 +236,6 @@ func fromInternalChannelState(c internal.ChannelState, voucherDecoder DecoderByT voucherResults: c.VoucherResults, voucherResultDecoder: voucherResultDecoder, voucherDecoder: voucherDecoder, - receivedCids: receivedCidsReader, stages: c.Stages, missingCids: c.MissingCids, } diff --git a/channels/channels.go b/channels/channels.go index 90c73a6c..b459dfd9 100644 --- a/channels/channels.go +++ b/channels/channels.go @@ -7,7 +7,6 @@ import ( "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" "github.com/ipld/go-ipld-prime" peer "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" @@ -21,18 +20,11 @@ import ( datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-data-transfer/channels/internal" "github.com/filecoin-project/go-data-transfer/channels/internal/migrations" - "github.com/filecoin-project/go-data-transfer/cidlists" - "github.com/filecoin-project/go-data-transfer/cidsets" "github.com/filecoin-project/go-data-transfer/encoding" ) type DecoderByTypeFunc func(identifier datatransfer.TypeIdentifier) (encoding.Decoder, bool) -type ReceivedCidsReader interface { - ToArray(chid datatransfer.ChannelID) ([]cid.Cid, error) - Len(chid datatransfer.ChannelID) (int, error) -} - type Notifier func(datatransfer.Event, datatransfer.ChannelState) // ErrNotFound is returned when a channel cannot be found with a given channel ID @@ -59,7 +51,6 @@ type Channels struct { blockIndexCache *blockIndexCache stateMachines fsm.Group migrateStateMachines func(context.Context) error - seenCIDs *cidsets.CIDSetManager } // ChannelEnvironment -- just a proxy for DTNetwork for now @@ -72,22 +63,19 @@ type ChannelEnvironment interface { // New returns a new thread safe list of channels func New(ds datastore.Batching, - cidLists cidlists.CIDLists, notifier Notifier, voucherDecoder DecoderByTypeFunc, voucherResultDecoder DecoderByTypeFunc, env ChannelEnvironment, selfPeer peer.ID) (*Channels, error) { - seenCIDsDS := namespace.Wrap(ds, datastore.NewKey("seencids")) c := &Channels{ - seenCIDs: cidsets.NewCIDSetManager(seenCIDsDS), notifier: notifier, voucherDecoder: voucherDecoder, voucherResultDecoder: voucherResultDecoder, } c.blockIndexCache = newBlockIndexCache() - channelMigrations, err := migrations.GetChannelStateMigrations(selfPeer, cidLists) + channelMigrations, err := migrations.GetChannelStateMigrations(selfPeer) if err != nil { return nil, err } @@ -127,19 +115,6 @@ func (c *Channels) dispatch(eventName fsm.EventName, channel fsm.StateType) { } log.Debugw("process data transfer listeners", "name", datatransfer.Events[evtCode], "transfer ID", realChannel.TransferID) c.notifier(evt, c.fromInternalChannelState(realChannel)) - - // When the channel has been cleaned up, remove the caches of seen cids - if evt.Code == datatransfer.CleanupComplete { - chid := datatransfer.ChannelID{ - Initiator: realChannel.Initiator, - Responder: realChannel.Responder, - ID: realChannel.TransferID, - } - err := c.removeSeenCIDCaches(chid) - if err != nil { - log.Errorf("failed to clean up channel %s: %s", err) - } - } } // CreateNew creates a new channel id and channel state and saves to channels. @@ -271,12 +246,6 @@ func (c *Channels) DataQueued(chid datatransfer.ChannelID, k cid.Cid, delta uint // Returns true if this is the first time the block has been received func (c *Channels) DataReceived(chid datatransfer.ChannelID, k cid.Cid, delta uint64, index int64, unique bool) (bool, error) { new, err := c.fireProgressEvent(chid, datatransfer.DataReceived, datatransfer.DataReceivedProgress, k, delta, index, unique, c.getReceivedIndex) - // TODO: remove when ReceivedCids and legacy protocol is removed - // write the seen received cids, but write async in order to avoid blocking processing - if err == nil { - sid := seenCidsSetID(chid, datatransfer.DataReceived) - _, _ = c.seenCIDs.InsertSetCID(sid, k) - } return new, err } @@ -395,25 +364,6 @@ func (c *Channels) HasChannel(chid datatransfer.ChannelID) (bool, error) { return c.stateMachines.Has(chid) } -// removeSeenCIDCaches cleans up the caches of "seen" blocks, ie -// blocks that have already been queued / sent / received -func (c *Channels) removeSeenCIDCaches(chid datatransfer.ChannelID) error { - // Clean up seen block caches - progressStates := []datatransfer.EventCode{ - datatransfer.DataQueued, - datatransfer.DataSent, - datatransfer.DataReceived, - } - for _, evt := range progressStates { - sid := seenCidsSetID(chid, evt) - err := c.seenCIDs.DeleteSet(sid) - if err != nil { - return err - } - } - return nil -} - // fireProgressEvent fires // - an event for queuing / sending / receiving blocks // - a corresponding "progress" event if the block has not been seen before @@ -463,37 +413,7 @@ func (c *Channels) checkChannelExists(chid datatransfer.ChannelID, code datatran return nil } -// Get the ID of the CID set for the given channel ID and event code. -// The CID set stores a unique list of queued / sent / received CIDs. -func seenCidsSetID(chid datatransfer.ChannelID, evt datatransfer.EventCode) cidsets.SetID { - return cidsets.SetID(chid.String() + "/" + datatransfer.Events[evt]) -} - // Convert from the internally used channel state format to the externally exposed ChannelState func (c *Channels) fromInternalChannelState(ch internal.ChannelState) datatransfer.ChannelState { - rcr := &receivedCidsReader{seenCIDs: c.seenCIDs} - return fromInternalChannelState(ch, c.voucherDecoder, c.voucherResultDecoder, rcr) -} - -// Implements the ReceivedCidsReader interface so that the internal channel -// state has access to the received CIDs. -// The interface is used (instead of passing these values directly) -// so the values can be loaded lazily. Reading all CIDs from the datastore -// is an expensive operation so we want to avoid doing it unless necessary. -// Note that the received CIDs get cleaned up when the channel completes, so -// these methods will return an empty array after that point. -type receivedCidsReader struct { - seenCIDs *cidsets.CIDSetManager + return fromInternalChannelState(ch, c.voucherDecoder, c.voucherResultDecoder) } - -func (r *receivedCidsReader) ToArray(chid datatransfer.ChannelID) ([]cid.Cid, error) { - sid := seenCidsSetID(chid, datatransfer.DataReceived) - return r.seenCIDs.SetToArray(sid) -} - -func (r *receivedCidsReader) Len(chid datatransfer.ChannelID) (int, error) { - sid := seenCidsSetID(chid, datatransfer.DataReceived) - return r.seenCIDs.SetLen(sid) -} - -var _ ReceivedCidsReader = (*receivedCidsReader)(nil) diff --git a/channels/channels_test.go b/channels/channels_test.go index 2dd365da..1121a3d3 100644 --- a/channels/channels_test.go +++ b/channels/channels_test.go @@ -1,35 +1,22 @@ package channels_test import ( - "bytes" "context" "errors" - "math/rand" - "os" "testing" "time" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" dss "github.com/ipfs/go-datastore/sync" - "github.com/ipld/go-ipld-prime/codec/dagcbor" basicnode "github.com/ipld/go-ipld-prime/node/basic" "github.com/ipld/go-ipld-prime/traversal/selector/builder" peer "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/require" - cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" - versioning "github.com/filecoin-project/go-ds-versioning/pkg" - versionedds "github.com/filecoin-project/go-ds-versioning/pkg/datastore" - datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-data-transfer/channels" - "github.com/filecoin-project/go-data-transfer/channels/internal" - "github.com/filecoin-project/go-data-transfer/channels/internal/migrations" - v0 "github.com/filecoin-project/go-data-transfer/channels/internal/migrations/v0" - v1 "github.com/filecoin-project/go-data-transfer/channels/internal/migrations/v1" - "github.com/filecoin-project/go-data-transfer/cidlists" "github.com/filecoin-project/go-data-transfer/encoding" "github.com/filecoin-project/go-data-transfer/testutil" ) @@ -52,10 +39,7 @@ func TestChannels(t *testing.T) { selector := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any).Matcher().Node() peers := testutil.GeneratePeers(4) - dir := os.TempDir() - cidLists, err := cidlists.NewCIDLists(dir) - require.NoError(t, err) - channelList, err := channels.New(ds, cidLists, notifier, decoderByType, decoderByType, &fakeEnv{}, peers[0]) + channelList, err := channels.New(ds, notifier, decoderByType, decoderByType, &fakeEnv{}, peers[0]) require.NoError(t, err) err = channelList.Start(ctx) @@ -140,10 +124,8 @@ func TestChannels(t *testing.T) { t.Run("datasent/queued when transfer is already finished", func(t *testing.T) { ds := dss.MutexWrap(datastore.NewMapDatastore()) - dir := os.TempDir() - cidLists, err := cidlists.NewCIDLists(dir) - require.NoError(t, err) - channelList, err := channels.New(ds, cidLists, notifier, decoderByType, decoderByType, &fakeEnv{}, peers[0]) + + channelList, err := channels.New(ds, notifier, decoderByType, decoderByType, &fakeEnv{}, peers[0]) require.NoError(t, err) err = channelList.Start(ctx) require.NoError(t, err) @@ -172,10 +154,8 @@ func TestChannels(t *testing.T) { t.Run("updating send/receive values", func(t *testing.T) { ds := dss.MutexWrap(datastore.NewMapDatastore()) - dir := os.TempDir() - cidLists, err := cidlists.NewCIDLists(dir) - require.NoError(t, err) - channelList, err := channels.New(ds, cidLists, notifier, decoderByType, decoderByType, &fakeEnv{}, peers[0]) + + channelList, err := channels.New(ds, notifier, decoderByType, decoderByType, &fakeEnv{}, peers[0]) require.NoError(t, err) err = channelList.Start(ctx) require.NoError(t, err) @@ -186,7 +166,6 @@ func TestChannels(t *testing.T) { require.Equal(t, datatransfer.Requested, state.Status()) require.Equal(t, uint64(0), state.Received()) require.Equal(t, uint64(0), state.Sent()) - require.Empty(t, state.ReceivedCids()) isNew, err := channelList.DataReceived(datatransfer.ChannelID{Initiator: peers[0], Responder: peers[1], ID: tid1}, cids[0], 50, 1, true) require.NoError(t, err) @@ -195,7 +174,6 @@ func TestChannels(t *testing.T) { state = checkEvent(ctx, t, received, datatransfer.DataReceived) require.Equal(t, uint64(50), state.Received()) require.Equal(t, uint64(0), state.Sent()) - require.Equal(t, []cid.Cid{cids[0]}, state.ReceivedCids()) isNew, err = channelList.DataSent(datatransfer.ChannelID{Initiator: peers[0], Responder: peers[1], ID: tid1}, cids[1], 100, 1, true) require.NoError(t, err) @@ -204,7 +182,6 @@ func TestChannels(t *testing.T) { state = checkEvent(ctx, t, received, datatransfer.DataSent) require.Equal(t, uint64(50), state.Received()) require.Equal(t, uint64(100), state.Sent()) - require.Equal(t, []cid.Cid{cids[0]}, state.ReceivedCids()) // send block again has no effect isNew, err = channelList.DataSent(datatransfer.ChannelID{Initiator: peers[0], Responder: peers[1], ID: tid1}, cids[1], 100, 1, true) @@ -220,7 +197,6 @@ func TestChannels(t *testing.T) { require.False(t, isNew) isNew, err = channelList.DataSent(datatransfer.ChannelID{Initiator: peers[1], Responder: peers[0], ID: tid1}, cids[1], 200, 2, true) require.True(t, xerrors.As(err, new(*channels.ErrNotFound))) - require.Equal(t, []cid.Cid{cids[0]}, state.ReceivedCids()) require.False(t, isNew) isNew, err = channelList.DataReceived(datatransfer.ChannelID{Initiator: peers[0], Responder: peers[1], ID: tid1}, cids[1], 50, 2, true) @@ -230,7 +206,6 @@ func TestChannels(t *testing.T) { state = checkEvent(ctx, t, received, datatransfer.DataReceived) require.Equal(t, uint64(100), state.Received()) require.Equal(t, uint64(100), state.Sent()) - require.ElementsMatch(t, []cid.Cid{cids[0], cids[1]}, state.ReceivedCids()) isNew, err = channelList.DataSent(datatransfer.ChannelID{Initiator: peers[0], Responder: peers[1], ID: tid1}, cids[1], 25, 2, false) require.NoError(t, err) @@ -238,7 +213,6 @@ func TestChannels(t *testing.T) { state = checkEvent(ctx, t, received, datatransfer.DataSent) require.Equal(t, uint64(100), state.Received()) require.Equal(t, uint64(100), state.Sent()) - require.ElementsMatch(t, []cid.Cid{cids[0], cids[1]}, state.ReceivedCids()) isNew, err = channelList.DataReceived(datatransfer.ChannelID{Initiator: peers[0], Responder: peers[1], ID: tid1}, cids[0], 50, 3, false) require.NoError(t, err) @@ -246,16 +220,12 @@ func TestChannels(t *testing.T) { state = checkEvent(ctx, t, received, datatransfer.DataReceived) require.Equal(t, uint64(100), state.Received()) require.Equal(t, uint64(100), state.Sent()) - - require.ElementsMatch(t, []cid.Cid{cids[0], cids[1]}, state.ReceivedCids()) }) t.Run("missing cids", func(t *testing.T) { ds := dss.MutexWrap(datastore.NewMapDatastore()) - dir := os.TempDir() - cidLists, err := cidlists.NewCIDLists(dir) - require.NoError(t, err) - channelList, err := channels.New(ds, cidLists, notifier, decoderByType, decoderByType, &fakeEnv{}, peers[0]) + + channelList, err := channels.New(ds, notifier, decoderByType, decoderByType, &fakeEnv{}, peers[0]) require.NoError(t, err) err = channelList.Start(ctx) require.NoError(t, err) @@ -398,10 +368,7 @@ func TestChannels(t *testing.T) { notifier := func(evt datatransfer.Event, chst datatransfer.ChannelState) { received <- event{evt, chst} } - dir := os.TempDir() - cidLists, err := cidlists.NewCIDLists(dir) - require.NoError(t, err) - channelList, err := channels.New(ds, cidLists, notifier, decoderByType, decoderByType, &fakeEnv{}, peers[0]) + channelList, err := channels.New(ds, notifier, decoderByType, decoderByType, &fakeEnv{}, peers[0]) require.NoError(t, err) err = channelList.Start(ctx) require.NoError(t, err) @@ -451,258 +418,6 @@ func TestIsChannelCleaningUp(t *testing.T) { require.False(t, channels.IsChannelCleaningUp(datatransfer.Cancelled)) } -func TestMigrationsV0(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - - ds := dss.MutexWrap(datastore.NewMapDatastore()) - received := make(chan event) - notifier := func(evt datatransfer.Event, chst datatransfer.ChannelState) { - received <- event{evt, chst} - } - numChannels := 5 - transferIDs := make([]datatransfer.TransferID, numChannels) - initiators := make([]peer.ID, numChannels) - responders := make([]peer.ID, numChannels) - baseCids := make([]cid.Cid, numChannels) - - totalSizes := make([]uint64, numChannels) - sents := make([]uint64, numChannels) - receiveds := make([]uint64, numChannels) - messages := make([]string, numChannels) - vouchers := make([]datatransfer.Voucher, numChannels) - voucherResults := make([]datatransfer.VoucherResult, numChannels) - - allSelector := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any).Matcher().Node() - allSelectorBuf := new(bytes.Buffer) - err := dagcbor.Encode(allSelector, allSelectorBuf) - require.NoError(t, err) - allSelectorBytes := allSelectorBuf.Bytes() - - for i := 0; i < numChannels; i++ { - transferIDs[i] = datatransfer.TransferID(rand.Uint64()) - initiators[i] = testutil.GeneratePeers(1)[0] - responders[i] = testutil.GeneratePeers(1)[0] - baseCids[i] = testutil.GenerateCids(1)[0] - totalSizes[i] = rand.Uint64() - sents[i] = rand.Uint64() - receiveds[i] = rand.Uint64() - messages[i] = string(testutil.RandomBytes(20)) - vouchers[i] = testutil.NewFakeDTType() - vBytes, err := encoding.Encode(vouchers[i]) - require.NoError(t, err) - voucherResults[i] = testutil.NewFakeDTType() - vrBytes, err := encoding.Encode(voucherResults[i]) - require.NoError(t, err) - channel := v0.ChannelState{ - TransferID: transferIDs[i], - Initiator: initiators[i], - Responder: responders[i], - BaseCid: baseCids[i], - Selector: &cbg.Deferred{ - Raw: allSelectorBytes, - }, - Sender: initiators[i], - Recipient: responders[i], - TotalSize: totalSizes[i], - Status: datatransfer.Ongoing, - Sent: sents[i], - Received: receiveds[i], - Message: messages[i], - Vouchers: []v0.EncodedVoucher{ - { - Type: vouchers[i].Type(), - Voucher: &cbg.Deferred{ - Raw: vBytes, - }, - }, - }, - VoucherResults: []v0.EncodedVoucherResult{ - { - Type: voucherResults[i].Type(), - VoucherResult: &cbg.Deferred{ - Raw: vrBytes, - }, - }, - }, - } - buf := new(bytes.Buffer) - err = channel.MarshalCBOR(buf) - require.NoError(t, err) - err = ds.Put(ctx, datastore.NewKey(datatransfer.ChannelID{ - Initiator: initiators[i], - Responder: responders[i], - ID: transferIDs[i], - }.String()), buf.Bytes()) - require.NoError(t, err) - } - - selfPeer := testutil.GeneratePeers(1)[0] - dir := os.TempDir() - cidLists, err := cidlists.NewCIDLists(dir) - require.NoError(t, err) - channelList, err := channels.New(ds, cidLists, notifier, decoderByType, decoderByType, &fakeEnv{}, selfPeer) - require.NoError(t, err) - err = channelList.Start(ctx) - require.NoError(t, err) - - for i := 0; i < numChannels; i++ { - - channel, err := channelList.GetByID(ctx, datatransfer.ChannelID{ - Initiator: initiators[i], - Responder: responders[i], - ID: transferIDs[i], - }) - require.NoError(t, err) - require.Equal(t, selfPeer, channel.SelfPeer()) - require.Equal(t, transferIDs[i], channel.TransferID()) - require.Equal(t, baseCids[i], channel.BaseCID()) - require.Equal(t, allSelector, channel.Selector()) - require.Equal(t, initiators[i], channel.Sender()) - require.Equal(t, responders[i], channel.Recipient()) - require.Equal(t, totalSizes[i], channel.TotalSize()) - require.Equal(t, datatransfer.Ongoing, channel.Status()) - require.Equal(t, sents[i], channel.Sent()) - require.Equal(t, receiveds[i], channel.Received()) - require.Equal(t, messages[i], channel.Message()) - require.Equal(t, vouchers[i], channel.LastVoucher()) - require.Equal(t, voucherResults[i], channel.LastVoucherResult()) - require.Len(t, channel.ReceivedCids(), 0) - } -} -func TestMigrationsV1(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - - ds := dss.MutexWrap(datastore.NewMapDatastore()) - received := make(chan event) - notifier := func(evt datatransfer.Event, chst datatransfer.ChannelState) { - received <- event{evt, chst} - } - numChannels := 5 - transferIDs := make([]datatransfer.TransferID, numChannels) - initiators := make([]peer.ID, numChannels) - responders := make([]peer.ID, numChannels) - baseCids := make([]cid.Cid, numChannels) - - totalSizes := make([]uint64, numChannels) - sents := make([]uint64, numChannels) - receiveds := make([]uint64, numChannels) - messages := make([]string, numChannels) - vouchers := make([]datatransfer.Voucher, numChannels) - voucherResults := make([]datatransfer.VoucherResult, numChannels) - receivedCids := make([][]cid.Cid, numChannels) - allSelector := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any).Matcher().Node() - allSelectorBuf := new(bytes.Buffer) - err := dagcbor.Encode(allSelector, allSelectorBuf) - require.NoError(t, err) - allSelectorBytes := allSelectorBuf.Bytes() - selfPeer := testutil.GeneratePeers(1)[0] - dir := os.TempDir() - cidLists, err := cidlists.NewCIDLists(dir) - require.NoError(t, err) - - list, err := migrations.GetChannelStateMigrations(selfPeer, cidLists) - require.NoError(t, err) - vds, up := versionedds.NewVersionedDatastore(ds, list, versioning.VersionKey("1")) - require.NoError(t, up(ctx)) - - for i := 0; i < numChannels; i++ { - transferIDs[i] = datatransfer.TransferID(rand.Uint64()) - initiators[i] = testutil.GeneratePeers(1)[0] - responders[i] = testutil.GeneratePeers(1)[0] - baseCids[i] = testutil.GenerateCids(1)[0] - totalSizes[i] = rand.Uint64() - sents[i] = rand.Uint64() - receiveds[i] = rand.Uint64() - messages[i] = string(testutil.RandomBytes(20)) - vouchers[i] = testutil.NewFakeDTType() - vBytes, err := encoding.Encode(vouchers[i]) - require.NoError(t, err) - voucherResults[i] = testutil.NewFakeDTType() - vrBytes, err := encoding.Encode(voucherResults[i]) - require.NoError(t, err) - receivedCids[i] = testutil.GenerateCids(100) - channel := v1.ChannelState{ - TransferID: transferIDs[i], - Initiator: initiators[i], - Responder: responders[i], - BaseCid: baseCids[i], - Selector: &cbg.Deferred{ - Raw: allSelectorBytes, - }, - Sender: initiators[i], - Recipient: responders[i], - TotalSize: totalSizes[i], - Status: datatransfer.Ongoing, - Sent: sents[i], - Received: receiveds[i], - Message: messages[i], - Vouchers: []internal.EncodedVoucher{ - { - Type: vouchers[i].Type(), - Voucher: &cbg.Deferred{ - Raw: vBytes, - }, - }, - }, - VoucherResults: []internal.EncodedVoucherResult{ - { - Type: voucherResults[i].Type(), - VoucherResult: &cbg.Deferred{ - Raw: vrBytes, - }, - }, - }, - SelfPeer: selfPeer, - ReceivedCids: receivedCids[i], - } - buf := new(bytes.Buffer) - err = channel.MarshalCBOR(buf) - require.NoError(t, err) - err = vds.Put(ctx, datastore.NewKey(datatransfer.ChannelID{ - Initiator: initiators[i], - Responder: responders[i], - ID: transferIDs[i], - }.String()), buf.Bytes()) - require.NoError(t, err) - } - - channelList, err := channels.New(ds, cidLists, notifier, decoderByType, decoderByType, &fakeEnv{}, selfPeer) - require.NoError(t, err) - err = channelList.Start(ctx) - require.NoError(t, err) - - for i := 0; i < numChannels; i++ { - - channel, err := channelList.GetByID(ctx, datatransfer.ChannelID{ - Initiator: initiators[i], - Responder: responders[i], - ID: transferIDs[i], - }) - require.NoError(t, err) - require.Equal(t, selfPeer, channel.SelfPeer()) - require.Equal(t, transferIDs[i], channel.TransferID()) - require.Equal(t, baseCids[i], channel.BaseCID()) - require.Equal(t, allSelector, channel.Selector()) - require.Equal(t, initiators[i], channel.Sender()) - require.Equal(t, responders[i], channel.Recipient()) - require.Equal(t, totalSizes[i], channel.TotalSize()) - require.Equal(t, datatransfer.Ongoing, channel.Status()) - require.Equal(t, sents[i], channel.Sent()) - require.Equal(t, receiveds[i], channel.Received()) - require.Equal(t, messages[i], channel.Message()) - require.Equal(t, vouchers[i], channel.LastVoucher()) - require.Equal(t, voucherResults[i], channel.LastVoucherResult()) - // No longer relying on this migration to migrate CID lists as they - // have been deprecated since we moved to CID sets: - // https://github.com/filecoin-project/go-data-transfer/pull/217 - //require.Equal(t, receivedCids[i], channel.ReceivedCids()) - } -} - type event struct { event datatransfer.Event state datatransfer.ChannelState diff --git a/channels/internal/migrations/migrations.go b/channels/internal/migrations/migrations.go index 77b63210..b6a1ed6a 100644 --- a/channels/internal/migrations/migrations.go +++ b/channels/internal/migrations/migrations.go @@ -5,95 +5,9 @@ import ( versioning "github.com/filecoin-project/go-ds-versioning/pkg" "github.com/filecoin-project/go-ds-versioning/pkg/versioned" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/channels/internal" - v0 "github.com/filecoin-project/go-data-transfer/channels/internal/migrations/v0" - v1 "github.com/filecoin-project/go-data-transfer/channels/internal/migrations/v1" - "github.com/filecoin-project/go-data-transfer/cidlists" ) -// MigrateEncodedVoucher0To1 converts a tuple encoded voucher to a map encoded voucher -func MigrateEncodedVoucher0To1(oldV v0.EncodedVoucher) internal.EncodedVoucher { - return internal.EncodedVoucher{ - Type: oldV.Type, - Voucher: oldV.Voucher, - } -} - -// MigrateEncodedVoucherResult0To1 converts a tuple encoded voucher to a map encoded voucher -func MigrateEncodedVoucherResult0To1(oldV v0.EncodedVoucherResult) internal.EncodedVoucherResult { - return internal.EncodedVoucherResult{ - Type: oldV.Type, - VoucherResult: oldV.VoucherResult, - } -} - -// GetMigrateChannelState0To1 returns a conversion function for migrating v0 channel state to v1 -func GetMigrateChannelState0To1(selfPeer peer.ID) func(*v0.ChannelState) (*v1.ChannelState, error) { - return func(oldCs *v0.ChannelState) (*v1.ChannelState, error) { - encodedVouchers := make([]internal.EncodedVoucher, 0, len(oldCs.Vouchers)) - for _, ev0 := range oldCs.Vouchers { - encodedVouchers = append(encodedVouchers, MigrateEncodedVoucher0To1(ev0)) - } - encodedVoucherResults := make([]internal.EncodedVoucherResult, 0, len(oldCs.VoucherResults)) - for _, evr0 := range oldCs.VoucherResults { - encodedVoucherResults = append(encodedVoucherResults, MigrateEncodedVoucherResult0To1(evr0)) - } - return &v1.ChannelState{ - SelfPeer: selfPeer, - TransferID: oldCs.TransferID, - Initiator: oldCs.Initiator, - Responder: oldCs.Responder, - BaseCid: oldCs.BaseCid, - Selector: oldCs.Selector, - Sender: oldCs.Sender, - Recipient: oldCs.Recipient, - TotalSize: oldCs.TotalSize, - Status: oldCs.Status, - Sent: oldCs.Sent, - Received: oldCs.Received, - Message: oldCs.Message, - Vouchers: encodedVouchers, - VoucherResults: encodedVoucherResults, - ReceivedCids: nil, - }, nil - } -} - -// GetMigrateChannelState1To2 returns a conversion function for migrating v1 channel state to v2 channel state -func GetMigrateChannelState1To2(cidLists cidlists.CIDLists) func(*v1.ChannelState) (*internal.ChannelState, error) { - return func(oldCs *v1.ChannelState) (*internal.ChannelState, error) { - err := cidLists.CreateList(datatransfer.ChannelID{ID: oldCs.TransferID, Initiator: oldCs.Initiator, Responder: oldCs.Responder}, oldCs.ReceivedCids) - if err != nil { - return nil, err - } - return &internal.ChannelState{ - SelfPeer: oldCs.SelfPeer, - TransferID: oldCs.TransferID, - Initiator: oldCs.Initiator, - Responder: oldCs.Responder, - BaseCid: oldCs.BaseCid, - Selector: oldCs.Selector, - Sender: oldCs.Sender, - Recipient: oldCs.Recipient, - TotalSize: oldCs.TotalSize, - Status: oldCs.Status, - Sent: oldCs.Sent, - Received: oldCs.Received, - Message: oldCs.Message, - Vouchers: oldCs.Vouchers, - VoucherResults: oldCs.VoucherResults, - }, nil - } -} - // GetChannelStateMigrations returns a migration list for the channel states -func GetChannelStateMigrations(selfPeer peer.ID, cidLists cidlists.CIDLists) (versioning.VersionedMigrationList, error) { - channelStateMigration0To1 := GetMigrateChannelState0To1(selfPeer) - channelStateMigration1To2 := GetMigrateChannelState1To2(cidLists) - return versioned.BuilderList{ - versioned.NewVersionedBuilder(channelStateMigration0To1, versioning.VersionKey("1")), - versioned.NewVersionedBuilder(channelStateMigration1To2, versioning.VersionKey("2")).OldVersion("1"), - }.Build() +func GetChannelStateMigrations(selfPeer peer.ID) (versioning.VersionedMigrationList, error) { + return versioned.BuilderList{}.Build() } diff --git a/channels/internal/migrations/v0/v0.go b/channels/internal/migrations/v0/v0.go deleted file mode 100644 index b352abe5..00000000 --- a/channels/internal/migrations/v0/v0.go +++ /dev/null @@ -1,57 +0,0 @@ -package v0 - -import ( - "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - - datatransfer "github.com/filecoin-project/go-data-transfer" -) - -//go:generate cbor-gen-for ChannelState EncodedVoucher EncodedVoucherResult - -// EncodedVoucher is version 0 of EncodedVoucher -type EncodedVoucher struct { - // Vouchers identifier for decoding - Type datatransfer.TypeIdentifier - // used to verify this channel - Voucher *cbg.Deferred -} - -// EncodedVoucherResult is version 0 of EncodedVoucherResult -type EncodedVoucherResult struct { - // Vouchers identifier for decoding - Type datatransfer.TypeIdentifier - // used to verify this channel - VoucherResult *cbg.Deferred -} - -// ChannelState is version 0 of ChannelState -type ChannelState struct { - // an identifier for this channel shared by request and responder, set by requester through protocol - TransferID datatransfer.TransferID - // Initiator is the person who intiated this datatransfer request - Initiator peer.ID - // Responder is the person who is responding to this datatransfer request - Responder peer.ID - // base CID for the piece being transferred - BaseCid cid.Cid - // portion of Piece to return, specified by an IPLD selector - Selector *cbg.Deferred - // the party that is sending the data (not who initiated the request) - Sender peer.ID - // the party that is receiving the data (not who initiated the request) - Recipient peer.ID - // expected amount of data to be transferred - TotalSize uint64 - // current status of this deal - Status datatransfer.Status - // total bytes sent from this node (0 if receiver) - Sent uint64 - // total bytes received by this node (0 if sender) - Received uint64 - // more informative status on a channel - Message string - Vouchers []EncodedVoucher - VoucherResults []EncodedVoucherResult -} diff --git a/channels/internal/migrations/v0/v0_cbor_gen.go b/channels/internal/migrations/v0/v0_cbor_gen.go deleted file mode 100644 index 6ec595ef..00000000 --- a/channels/internal/migrations/v0/v0_cbor_gen.go +++ /dev/null @@ -1,530 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package v0 - -import ( - "fmt" - "io" - "sort" - - datatransfer "github.com/filecoin-project/go-data-transfer" - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf -var _ = cid.Undef -var _ = sort.Sort - -var lengthBufChannelState = []byte{142} - -func (t *ChannelState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufChannelState); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.TransferID (datatransfer.TransferID) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TransferID)); err != nil { - return err - } - - // t.Initiator (peer.ID) (string) - if len(t.Initiator) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Initiator was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Initiator))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Initiator)); err != nil { - return err - } - - // t.Responder (peer.ID) (string) - if len(t.Responder) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Responder was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Responder))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Responder)); err != nil { - return err - } - - // t.BaseCid (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.BaseCid); err != nil { - return xerrors.Errorf("failed to write cid field t.BaseCid: %w", err) - } - - // t.Selector (typegen.Deferred) (struct) - if err := t.Selector.MarshalCBOR(w); err != nil { - return err - } - - // t.Sender (peer.ID) (string) - if len(t.Sender) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Sender was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Sender))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Sender)); err != nil { - return err - } - - // t.Recipient (peer.ID) (string) - if len(t.Recipient) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Recipient was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Recipient))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Recipient)); err != nil { - return err - } - - // t.TotalSize (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalSize)); err != nil { - return err - } - - // t.Status (datatransfer.Status) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { - return err - } - - // t.Sent (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Sent)); err != nil { - return err - } - - // t.Received (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Received)); err != nil { - return err - } - - // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Message)); err != nil { - return err - } - - // t.Vouchers ([]v0.EncodedVoucher) (slice) - if len(t.Vouchers) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Vouchers was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Vouchers))); err != nil { - return err - } - for _, v := range t.Vouchers { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.VoucherResults ([]v0.EncodedVoucherResult) (slice) - if len(t.VoucherResults) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.VoucherResults was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.VoucherResults))); err != nil { - return err - } - for _, v := range t.VoucherResults { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { - *t = ChannelState{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 14 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.TransferID (datatransfer.TransferID) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TransferID = datatransfer.TransferID(extra) - - } - // t.Initiator (peer.ID) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Initiator = peer.ID(sval) - } - // t.Responder (peer.ID) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Responder = peer.ID(sval) - } - // t.BaseCid (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.BaseCid: %w", err) - } - - t.BaseCid = c - - } - // t.Selector (typegen.Deferred) (struct) - - { - - t.Selector = new(cbg.Deferred) - - if err := t.Selector.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("failed to read deferred field: %w", err) - } - } - // t.Sender (peer.ID) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Sender = peer.ID(sval) - } - // t.Recipient (peer.ID) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Recipient = peer.ID(sval) - } - // t.TotalSize (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TotalSize = uint64(extra) - - } - // t.Status (datatransfer.Status) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Status = datatransfer.Status(extra) - - } - // t.Sent (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Sent = uint64(extra) - - } - // t.Received (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Received = uint64(extra) - - } - // t.Message (string) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Message = string(sval) - } - // t.Vouchers ([]v0.EncodedVoucher) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Vouchers: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Vouchers = make([]EncodedVoucher, extra) - } - - for i := 0; i < int(extra); i++ { - - var v EncodedVoucher - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Vouchers[i] = v - } - - // t.VoucherResults ([]v0.EncodedVoucherResult) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.VoucherResults: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.VoucherResults = make([]EncodedVoucherResult, extra) - } - - for i := 0; i < int(extra); i++ { - - var v EncodedVoucherResult - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.VoucherResults[i] = v - } - - return nil -} - -var lengthBufEncodedVoucher = []byte{130} - -func (t *EncodedVoucher) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufEncodedVoucher); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Type (datatransfer.TypeIdentifier) (string) - if len(t.Type) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Type was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Type))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Type)); err != nil { - return err - } - - // t.Voucher (typegen.Deferred) (struct) - if err := t.Voucher.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *EncodedVoucher) UnmarshalCBOR(r io.Reader) error { - *t = EncodedVoucher{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Type (datatransfer.TypeIdentifier) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Type = datatransfer.TypeIdentifier(sval) - } - // t.Voucher (typegen.Deferred) (struct) - - { - - t.Voucher = new(cbg.Deferred) - - if err := t.Voucher.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("failed to read deferred field: %w", err) - } - } - return nil -} - -var lengthBufEncodedVoucherResult = []byte{130} - -func (t *EncodedVoucherResult) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufEncodedVoucherResult); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Type (datatransfer.TypeIdentifier) (string) - if len(t.Type) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Type was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Type))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Type)); err != nil { - return err - } - - // t.VoucherResult (typegen.Deferred) (struct) - if err := t.VoucherResult.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *EncodedVoucherResult) UnmarshalCBOR(r io.Reader) error { - *t = EncodedVoucherResult{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Type (datatransfer.TypeIdentifier) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Type = datatransfer.TypeIdentifier(sval) - } - // t.VoucherResult (typegen.Deferred) (struct) - - { - - t.VoucherResult = new(cbg.Deferred) - - if err := t.VoucherResult.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("failed to read deferred field: %w", err) - } - } - return nil -} diff --git a/channels/internal/migrations/v1/v1.go b/channels/internal/migrations/v1/v1.go deleted file mode 100644 index dad15a1c..00000000 --- a/channels/internal/migrations/v1/v1.go +++ /dev/null @@ -1,49 +0,0 @@ -package v1 - -import ( - "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/channels/internal" -) - -//go:generate cbor-gen-for --map-encoding ChannelState - -// ChannelState is the internal representation on disk for the channel fsm -type ChannelState struct { - // PeerId of the manager peer - SelfPeer peer.ID - // an identifier for this channel shared by request and responder, set by requester through protocol - TransferID datatransfer.TransferID - // Initiator is the person who intiated this datatransfer request - Initiator peer.ID - // Responder is the person who is responding to this datatransfer request - Responder peer.ID - // base CID for the piece being transferred - BaseCid cid.Cid - // portion of Piece to return, specified by an IPLD selector - Selector *cbg.Deferred - // the party that is sending the data (not who initiated the request) - Sender peer.ID - // the party that is receiving the data (not who initiated the request) - Recipient peer.ID - // expected amount of data to be transferred - TotalSize uint64 - // current status of this deal - Status datatransfer.Status - // total bytes read from this node and queued for sending (0 if receiver) - Queued uint64 - // total bytes sent from this node (0 if receiver) - Sent uint64 - // total bytes received by this node (0 if sender) - Received uint64 - // more informative status on a channel - Message string - Vouchers []internal.EncodedVoucher - VoucherResults []internal.EncodedVoucherResult - - // ReceivedCids is all the cids the initiator has received so far - ReceivedCids []cid.Cid -} diff --git a/channels/internal/migrations/v1/v1_cbor_gen.go b/channels/internal/migrations/v1/v1_cbor_gen.go deleted file mode 100644 index 77372526..00000000 --- a/channels/internal/migrations/v1/v1_cbor_gen.go +++ /dev/null @@ -1,685 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package v1 - -import ( - "fmt" - "io" - "sort" - - datatransfer "github.com/filecoin-project/go-data-transfer" - internal "github.com/filecoin-project/go-data-transfer/channels/internal" - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf -var _ = cid.Undef -var _ = sort.Sort - -func (t *ChannelState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{177}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SelfPeer (peer.ID) (string) - if len("SelfPeer") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"SelfPeer\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SelfPeer"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("SelfPeer")); err != nil { - return err - } - - if len(t.SelfPeer) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.SelfPeer was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.SelfPeer))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.SelfPeer)); err != nil { - return err - } - - // t.TransferID (datatransfer.TransferID) (uint64) - if len("TransferID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"TransferID\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TransferID"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("TransferID")); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TransferID)); err != nil { - return err - } - - // t.Initiator (peer.ID) (string) - if len("Initiator") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Initiator\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Initiator"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Initiator")); err != nil { - return err - } - - if len(t.Initiator) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Initiator was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Initiator))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Initiator)); err != nil { - return err - } - - // t.Responder (peer.ID) (string) - if len("Responder") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Responder\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Responder"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Responder")); err != nil { - return err - } - - if len(t.Responder) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Responder was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Responder))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Responder)); err != nil { - return err - } - - // t.BaseCid (cid.Cid) (struct) - if len("BaseCid") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"BaseCid\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("BaseCid"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("BaseCid")); err != nil { - return err - } - - if err := cbg.WriteCidBuf(scratch, w, t.BaseCid); err != nil { - return xerrors.Errorf("failed to write cid field t.BaseCid: %w", err) - } - - // t.Selector (typegen.Deferred) (struct) - if len("Selector") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Selector\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Selector"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Selector")); err != nil { - return err - } - - if err := t.Selector.MarshalCBOR(w); err != nil { - return err - } - - // t.Sender (peer.ID) (string) - if len("Sender") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Sender\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sender"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Sender")); err != nil { - return err - } - - if len(t.Sender) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Sender was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Sender))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Sender)); err != nil { - return err - } - - // t.Recipient (peer.ID) (string) - if len("Recipient") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Recipient\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Recipient"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Recipient")); err != nil { - return err - } - - if len(t.Recipient) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Recipient was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Recipient))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Recipient)); err != nil { - return err - } - - // t.TotalSize (uint64) (uint64) - if len("TotalSize") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"TotalSize\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TotalSize"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("TotalSize")); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalSize)); err != nil { - return err - } - - // t.Status (datatransfer.Status) (uint64) - if len("Status") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Status\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Status"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Status")); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { - return err - } - - // t.Queued (uint64) (uint64) - if len("Queued") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Queued\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Queued"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Queued")); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Queued)); err != nil { - return err - } - - // t.Sent (uint64) (uint64) - if len("Sent") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Sent\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sent"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Sent")); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Sent)); err != nil { - return err - } - - // t.Received (uint64) (uint64) - if len("Received") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Received\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Received"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Received")); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Received)); err != nil { - return err - } - - // t.Message (string) (string) - if len("Message") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Message\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Message")); err != nil { - return err - } - - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Message)); err != nil { - return err - } - - // t.Vouchers ([]internal.EncodedVoucher) (slice) - if len("Vouchers") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Vouchers\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Vouchers"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Vouchers")); err != nil { - return err - } - - if len(t.Vouchers) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Vouchers was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Vouchers))); err != nil { - return err - } - for _, v := range t.Vouchers { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.VoucherResults ([]internal.EncodedVoucherResult) (slice) - if len("VoucherResults") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"VoucherResults\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VoucherResults"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("VoucherResults")); err != nil { - return err - } - - if len(t.VoucherResults) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.VoucherResults was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.VoucherResults))); err != nil { - return err - } - for _, v := range t.VoucherResults { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.ReceivedCids ([]cid.Cid) (slice) - if len("ReceivedCids") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"ReceivedCids\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReceivedCids"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("ReceivedCids")); err != nil { - return err - } - - if len(t.ReceivedCids) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ReceivedCids was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ReceivedCids))); err != nil { - return err - } - for _, v := range t.ReceivedCids { - if err := cbg.WriteCidBuf(scratch, w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.ReceivedCids: %w", err) - } - } - return nil -} - -func (t *ChannelState) UnmarshalCBOR(r io.Reader) error { - *t = ChannelState{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("ChannelState: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.SelfPeer (peer.ID) (string) - case "SelfPeer": - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.SelfPeer = peer.ID(sval) - } - // t.TransferID (datatransfer.TransferID) (uint64) - case "TransferID": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TransferID = datatransfer.TransferID(extra) - - } - // t.Initiator (peer.ID) (string) - case "Initiator": - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Initiator = peer.ID(sval) - } - // t.Responder (peer.ID) (string) - case "Responder": - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Responder = peer.ID(sval) - } - // t.BaseCid (cid.Cid) (struct) - case "BaseCid": - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.BaseCid: %w", err) - } - - t.BaseCid = c - - } - // t.Selector (typegen.Deferred) (struct) - case "Selector": - - { - - t.Selector = new(cbg.Deferred) - - if err := t.Selector.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("failed to read deferred field: %w", err) - } - } - // t.Sender (peer.ID) (string) - case "Sender": - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Sender = peer.ID(sval) - } - // t.Recipient (peer.ID) (string) - case "Recipient": - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Recipient = peer.ID(sval) - } - // t.TotalSize (uint64) (uint64) - case "TotalSize": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TotalSize = uint64(extra) - - } - // t.Status (datatransfer.Status) (uint64) - case "Status": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Status = datatransfer.Status(extra) - - } - // t.Queued (uint64) (uint64) - case "Queued": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Queued = uint64(extra) - - } - // t.Sent (uint64) (uint64) - case "Sent": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Sent = uint64(extra) - - } - // t.Received (uint64) (uint64) - case "Received": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Received = uint64(extra) - - } - // t.Message (string) (string) - case "Message": - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Message = string(sval) - } - // t.Vouchers ([]internal.EncodedVoucher) (slice) - case "Vouchers": - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Vouchers: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Vouchers = make([]internal.EncodedVoucher, extra) - } - - for i := 0; i < int(extra); i++ { - - var v internal.EncodedVoucher - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Vouchers[i] = v - } - - // t.VoucherResults ([]internal.EncodedVoucherResult) (slice) - case "VoucherResults": - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.VoucherResults: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.VoucherResults = make([]internal.EncodedVoucherResult, extra) - } - - for i := 0; i < int(extra); i++ { - - var v internal.EncodedVoucherResult - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.VoucherResults[i] = v - } - - // t.ReceivedCids ([]cid.Cid) (slice) - case "ReceivedCids": - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ReceivedCids: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ReceivedCids = make([]cid.Cid, extra) - } - - for i := 0; i < int(extra); i++ { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("reading cid field t.ReceivedCids failed: %w", err) - } - t.ReceivedCids[i] = c - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} diff --git a/cidlists/cidlists.go b/cidlists/cidlists.go deleted file mode 100644 index 8ca30826..00000000 --- a/cidlists/cidlists.go +++ /dev/null @@ -1,112 +0,0 @@ -package cidlists - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - - datatransfer "github.com/filecoin-project/go-data-transfer" -) - -// Deprecated: CIDLists have now been replaced by CID sets (see cidsets directory). -// CIDLists maintains files that contain a list of CIDs received for different data transfers -type CIDLists interface { - CreateList(chid datatransfer.ChannelID, initalCids []cid.Cid) error - AppendList(chid datatransfer.ChannelID, c cid.Cid) error - ReadList(chid datatransfer.ChannelID) ([]cid.Cid, error) - DeleteList(chid datatransfer.ChannelID) error -} - -type cidLists struct { - baseDir string -} - -// NewCIDLists initializes a new set of cid lists in a given directory -func NewCIDLists(baseDir string) (CIDLists, error) { - base := filepath.Clean(string(baseDir)) - info, err := os.Stat(string(base)) - if err != nil { - return nil, fmt.Errorf("error getting %s info: %s", base, err.Error()) - } - if !info.IsDir() { - return nil, fmt.Errorf("%s is not a directory", base) - } - return &cidLists{ - baseDir: base, - }, nil -} - -// CreateList initializes a new CID list with the given initial cids (or can be empty) for a data transfer channel -func (cl *cidLists) CreateList(chid datatransfer.ChannelID, initialCids []cid.Cid) (err error) { - f, err := os.Create(transferFilename(cl.baseDir, chid)) - if err != nil { - return err - } - defer func() { - closeErr := f.Close() - if err == nil { - err = closeErr - } - }() - for _, c := range initialCids { - err := cbg.WriteCid(f, c) - if err != nil { - return err - } - } - return nil -} - -// AppendList appends a single CID to the list for a given data transfer channel -func (cl *cidLists) AppendList(chid datatransfer.ChannelID, c cid.Cid) (err error) { - f, err := os.OpenFile(transferFilename(cl.baseDir, chid), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return err - } - defer func() { - closeErr := f.Close() - if err == nil { - err = closeErr - } - }() - return cbg.WriteCid(f, c) -} - -// ReadList reads an on disk list of cids for the given data transfer channel -func (cl *cidLists) ReadList(chid datatransfer.ChannelID) (cids []cid.Cid, err error) { - f, err := os.Open(transferFilename(cl.baseDir, chid)) - if err != nil { - return nil, err - } - defer func() { - closeErr := f.Close() - if err == nil { - err = closeErr - } - }() - var receivedCids []cid.Cid - for { - c, err := cbg.ReadCid(f) - if err != nil { - if err == io.EOF { - return receivedCids, nil - } - return nil, err - } - receivedCids = append(receivedCids, c) - } -} - -// DeleteList deletes the list for the given data transfer channel -func (cl *cidLists) DeleteList(chid datatransfer.ChannelID) error { - return os.Remove(transferFilename(cl.baseDir, chid)) -} - -func transferFilename(baseDir string, chid datatransfer.ChannelID) string { - filename := fmt.Sprintf("%d-%s-%s", chid.ID, chid.Initiator, chid.Responder) - return filepath.Join(baseDir, filename) -} diff --git a/cidlists/cidlists_test.go b/cidlists/cidlists_test.go deleted file mode 100644 index d9fe57b5..00000000 --- a/cidlists/cidlists_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package cidlists_test - -import ( - "fmt" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "testing" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/cidlists" - "github.com/filecoin-project/go-data-transfer/testutil" -) - -func TestCIDLists(t *testing.T) { - - baseDir, err := ioutil.TempDir("", "cidlisttest") - require.NoError(t, err) - - chid1 := datatransfer.ChannelID{ID: datatransfer.TransferID(rand.Uint64()), Initiator: testutil.GeneratePeers(1)[0], Responder: testutil.GeneratePeers(1)[0]} - chid2 := datatransfer.ChannelID{ID: datatransfer.TransferID(rand.Uint64()), Initiator: testutil.GeneratePeers(1)[0], Responder: testutil.GeneratePeers(1)[0]} - initialCids1 := testutil.GenerateCids(100) - - cidLists, err := cidlists.NewCIDLists(baseDir) - require.NoError(t, err) - - t.Run("creating lists", func(t *testing.T) { - require.NoError(t, cidLists.CreateList(chid1, initialCids1)) - - filename := fmt.Sprintf("%d-%s-%s", chid1.ID, chid1.Initiator, chid1.Responder) - f, err := os.Open(filepath.Join(baseDir, filename)) - require.NoError(t, err) - f.Close() - - require.NoError(t, cidLists.CreateList(chid2, nil)) - - filename = fmt.Sprintf("%d-%s-%s", chid2.ID, chid2.Initiator, chid2.Responder) - f, err = os.Open(filepath.Join(baseDir, filename)) - require.NoError(t, err) - f.Close() - }) - - t.Run("reading lists", func(t *testing.T) { - savedCids1, err := cidLists.ReadList(chid1) - require.NoError(t, err) - require.Equal(t, initialCids1, savedCids1) - - savedCids2, err := cidLists.ReadList(chid2) - require.NoError(t, err) - require.Nil(t, savedCids2) - }) - - t.Run("appending lists", func(t *testing.T) { - newCid1 := testutil.GenerateCids(1)[0] - require.NoError(t, cidLists.AppendList(chid1, newCid1)) - savedCids1, err := cidLists.ReadList(chid1) - require.NoError(t, err) - require.Equal(t, append(initialCids1, newCid1), savedCids1) - - newCid2 := testutil.GenerateCids(1)[0] - require.NoError(t, cidLists.AppendList(chid2, newCid2)) - savedCids2, err := cidLists.ReadList(chid2) - require.NoError(t, err) - require.Equal(t, []cid.Cid{newCid2}, savedCids2) - }) - - t.Run("deleting lists", func(t *testing.T) { - require.NoError(t, cidLists.DeleteList(chid1)) - - filename := fmt.Sprintf("%d-%s-%s", chid1.ID, chid1.Initiator, chid1.Responder) - _, err := os.Open(filepath.Join(baseDir, filename)) - require.Error(t, err) - - require.NoError(t, cidLists.DeleteList(chid2)) - - filename = fmt.Sprintf("%d-%s-%s", chid2.ID, chid2.Initiator, chid2.Responder) - _, err = os.Open(filepath.Join(baseDir, filename)) - require.Error(t, err) - }) -} diff --git a/cidsets/cidsets.go b/cidsets/cidsets.go deleted file mode 100644 index 2f5c7b87..00000000 --- a/cidsets/cidsets.go +++ /dev/null @@ -1,224 +0,0 @@ -package cidsets - -import ( - "context" - "sync" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - "github.com/ipfs/go-datastore/query" -) - -// SetID is a unique ID for a CID set -type SetID string - -// CIDSetManager keeps track of several CID sets, by SetID -type CIDSetManager struct { - ds datastore.Datastore - lk sync.Mutex - sets map[SetID]*cidSet -} - -func NewCIDSetManager(ds datastore.Datastore) *CIDSetManager { - return &CIDSetManager{ds: ds, sets: make(map[SetID]*cidSet)} -} - -// InsertSetCID inserts a CID into a CID set. -// Returns true if the set already contained the CID. -func (mgr *CIDSetManager) InsertSetCID(sid SetID, c cid.Cid) (exists bool, err error) { - return mgr.getSet(sid).Insert(c) -} - -// SetToArray gets the set as an array of CIDs -func (mgr *CIDSetManager) SetToArray(sid SetID) ([]cid.Cid, error) { - return mgr.getSet(sid).ToArray() -} - -// SetLen gets the number of CIDs in the set -func (mgr *CIDSetManager) SetLen(sid SetID) (int, error) { - return mgr.getSet(sid).Len() -} - -// DeleteSet deletes a CID set -func (mgr *CIDSetManager) DeleteSet(sid SetID) error { - return mgr.getSet(sid).Truncate() -} - -// getSet gets the cidSet for the given SetID -func (mgr *CIDSetManager) getSet(sid SetID) *cidSet { - mgr.lk.Lock() - defer mgr.lk.Unlock() - - s, ok := mgr.sets[sid] - if !ok { - s = NewCIDSet(mgr.getSetDS(sid)) - mgr.sets[sid] = s - } - return s -} - -// getSetDS gets the wrapped datastore for the given SetID -func (mgr *CIDSetManager) getSetDS(sid SetID) datastore.Batching { - setDSKey := datastore.NewKey(string(sid) + "/cids") - return namespace.Wrap(mgr.ds, setDSKey) -} - -// cidSet persists a set of CIDs -type cidSet struct { - lk sync.Mutex - ds datastore.Batching - len int // cached length of set, starts at -1 -} - -func NewCIDSet(ds datastore.Batching) *cidSet { - return &cidSet{ds: ds, len: -1} -} - -// Insert a CID into the set. -// Returns true if the the CID was already in the set. -func (s *cidSet) Insert(c cid.Cid) (exists bool, err error) { - ctx := context.TODO() - s.lk.Lock() - defer s.lk.Unlock() - - // Check if the key is in the set already - k := datastore.NewKey(c.String()) - has, err := s.ds.Has(ctx, k) - if err != nil { - return false, err - } - if has { - // Already in the set, just return true - return true, nil - } - - // Get the length of the set - len, err := s.unlockedLen() - if err != nil { - return false, err - } - - // Add the new CID to the set - err = s.ds.Put(ctx, k, nil) - if err != nil { - return false, err - } - - // Increment the cached length of the set - s.len = len + 1 - - return false, nil -} - -// Returns the number of CIDs in the set -func (s *cidSet) Len() (int, error) { - s.lk.Lock() - defer s.lk.Unlock() - - return s.unlockedLen() -} - -func (s *cidSet) unlockedLen() (int, error) { - ctx := context.TODO() - - // If the length is already cached, return it - if s.len >= 0 { - return s.len, nil - } - - // Query the datastore for all keys - res, err := s.ds.Query(ctx, query.Query{KeysOnly: true}) - if err != nil { - return 0, err - } - - entries, err := res.Rest() - if err != nil { - return 0, err - } - - // Cache the length of the set - s.len = len(entries) - - return s.len, nil -} - -// Get all cids in the set as an array -func (s *cidSet) ToArray() ([]cid.Cid, error) { - ctx := context.TODO() - - s.lk.Lock() - defer s.lk.Unlock() - - res, err := s.ds.Query(ctx, query.Query{KeysOnly: true}) - if err != nil { - return nil, err - } - - entries, err := res.Rest() - if err != nil { - return nil, err - } - - cids := make([]cid.Cid, 0, len(entries)) - for _, entry := range entries { - // When we create a datastore Key, a "/" is automatically pre-pended, - // so here we need to remove the preceding "/" before parsing as a CID - k := entry.Key - if string(k[0]) == "/" { - k = k[1:] - } - - c, err := cid.Parse(k) - if err != nil { - return nil, err - } - cids = append(cids, c) - } - return cids, nil -} - -// Truncate removes all CIDs in the set -func (s *cidSet) Truncate() error { - ctx := context.TODO() - - s.lk.Lock() - defer s.lk.Unlock() - - // Get all keys in the datastore - res, err := s.ds.Query(ctx, query.Query{KeysOnly: true}) - if err != nil { - return err - } - - entries, err := res.Rest() - if err != nil { - return err - } - - // Create a batch to perform all deletes as one operation - batched, err := s.ds.Batch(ctx) - if err != nil { - return err - } - - // Add delete operations for each key to the batch - for _, entry := range entries { - err := batched.Delete(ctx, datastore.NewKey(entry.Key)) - if err != nil { - return err - } - } - - // Commit the batch - err = batched.Commit(ctx) - if err != nil { - return err - } - - // Set the cached length of the set to zero - s.len = 0 - - return nil -} diff --git a/cidsets/cidsets_test.go b/cidsets/cidsets_test.go deleted file mode 100644 index ae75a719..00000000 --- a/cidsets/cidsets_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package cidsets - -import ( - "testing" - - ds "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-data-transfer/testutil" -) - -func TestCIDSetManager(t *testing.T) { - cid1 := testutil.GenerateCids(1)[0] - - dstore := ds_sync.MutexWrap(ds.NewMapDatastore()) - mgr := NewCIDSetManager(dstore) - setID1 := SetID("set1") - setID2 := SetID("set2") - - // set1: +cid1 - exists, err := mgr.InsertSetCID(setID1, cid1) - require.NoError(t, err) - require.False(t, exists) - - // set1: +cid1 (again) - exists, err = mgr.InsertSetCID(setID1, cid1) - require.NoError(t, err) - require.True(t, exists) - - // set2: +cid1 - exists, err = mgr.InsertSetCID(setID2, cid1) - require.NoError(t, err) - require.False(t, exists) - - // set2: +cid2 (again) - exists, err = mgr.InsertSetCID(setID2, cid1) - require.NoError(t, err) - require.True(t, exists) - - // delete set1 - err = mgr.DeleteSet(setID1) - require.NoError(t, err) - - // set1: +cid1 - exists, err = mgr.InsertSetCID(setID1, cid1) - require.NoError(t, err) - require.False(t, exists) - - // set1: +cid1 (again) - exists, err = mgr.InsertSetCID(setID2, cid1) - require.NoError(t, err) - require.True(t, exists) -} - -func TestCIDSetToArray(t *testing.T) { - cids := testutil.GenerateCids(2) - cid1 := cids[0] - cid2 := cids[1] - - dstore := ds_sync.MutexWrap(ds.NewMapDatastore()) - mgr := NewCIDSetManager(dstore) - setID1 := SetID("set1") - - // Expect no items in set - len, err := mgr.SetLen(setID1) - require.NoError(t, err) - require.Equal(t, 0, len) - - arr, err := mgr.SetToArray(setID1) - require.NoError(t, err) - require.Len(t, arr, 0) - - // set1: +cid1 - exists, err := mgr.InsertSetCID(setID1, cid1) - require.NoError(t, err) - require.False(t, exists) - - // Expect 1 cid in set - len, err = mgr.SetLen(setID1) - require.NoError(t, err) - require.Equal(t, 1, len) - - arr, err = mgr.SetToArray(setID1) - require.NoError(t, err) - require.Len(t, arr, 1) - require.Equal(t, arr[0], cid1) - - // set1: +cid1 (again) - exists, err = mgr.InsertSetCID(setID1, cid1) - require.NoError(t, err) - require.True(t, exists) - - // Expect 1 cid in set - len, err = mgr.SetLen(setID1) - require.NoError(t, err) - require.Equal(t, 1, len) - - arr, err = mgr.SetToArray(setID1) - require.NoError(t, err) - require.Len(t, arr, 1) - require.Equal(t, arr[0], cid1) - - // set1: +cid2 - exists, err = mgr.InsertSetCID(setID1, cid2) - require.NoError(t, err) - require.False(t, exists) - - // Expect 2 cids in set - len, err = mgr.SetLen(setID1) - require.NoError(t, err) - require.Equal(t, 2, len) - - arr, err = mgr.SetToArray(setID1) - require.NoError(t, err) - require.Len(t, arr, 2) - require.Contains(t, arr, cid1) - require.Contains(t, arr, cid2) - - // Delete set1 - err = mgr.DeleteSet(setID1) - require.NoError(t, err) - - // Expect no items in set - len, err = mgr.SetLen(setID1) - require.NoError(t, err) - require.Equal(t, 0, len) - - arr, err = mgr.SetToArray(setID1) - require.NoError(t, err) - require.Len(t, arr, 0) -} - -// Add items to set then get the length (to make sure that internal caching -// is working correctly) -func TestCIDSetLenAfterInsert(t *testing.T) { - cids := testutil.GenerateCids(2) - cid1 := cids[0] - cid2 := cids[1] - - dstore := ds_sync.MutexWrap(ds.NewMapDatastore()) - mgr := NewCIDSetManager(dstore) - setID1 := SetID("set1") - - // set1: +cid1 - exists, err := mgr.InsertSetCID(setID1, cid1) - require.NoError(t, err) - require.False(t, exists) - - // set1: +cid2 - exists, err = mgr.InsertSetCID(setID1, cid2) - require.NoError(t, err) - require.False(t, exists) - - // Expect 2 cids in set - len, err := mgr.SetLen(setID1) - require.NoError(t, err) - require.Equal(t, 2, len) -} - -func TestCIDSetRestart(t *testing.T) { - cids := testutil.GenerateCids(3) - cid1 := cids[0] - cid2 := cids[1] - cid3 := cids[2] - - dstore := ds_sync.MutexWrap(ds.NewMapDatastore()) - mgr := NewCIDSetManager(dstore) - setID1 := SetID("set1") - - // set1: +cid1 - exists, err := mgr.InsertSetCID(setID1, cid1) - require.NoError(t, err) - require.False(t, exists) - - // set1: +cid2 - exists, err = mgr.InsertSetCID(setID1, cid2) - require.NoError(t, err) - require.False(t, exists) - - // Expect 2 cids in set - arr, err := mgr.SetToArray(setID1) - require.NoError(t, err) - require.Len(t, arr, 2) - require.Contains(t, arr, cid1) - require.Contains(t, arr, cid2) - - // Simulate a restart by creating a new CIDSetManager from the same - // datastore - mgr = NewCIDSetManager(dstore) - - // Expect 2 cids in set - arr, err = mgr.SetToArray(setID1) - require.NoError(t, err) - require.Len(t, arr, 2) - require.Contains(t, arr, cid1) - require.Contains(t, arr, cid2) - - // set1: +cid3 - exists, err = mgr.InsertSetCID(setID1, cid3) - require.NoError(t, err) - require.False(t, exists) - - // Expect 3 cids in set - arr, err = mgr.SetToArray(setID1) - require.NoError(t, err) - require.Len(t, arr, 3) - require.Contains(t, arr, cid1) - require.Contains(t, arr, cid2) - require.Contains(t, arr, cid3) -} diff --git a/impl/impl.go b/impl/impl.go index 393fbd98..c3f511d4 100644 --- a/impl/impl.go +++ b/impl/impl.go @@ -22,7 +22,6 @@ import ( datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-data-transfer/channelmonitor" "github.com/filecoin-project/go-data-transfer/channels" - "github.com/filecoin-project/go-data-transfer/cidlists" "github.com/filecoin-project/go-data-transfer/encoding" "github.com/filecoin-project/go-data-transfer/message" "github.com/filecoin-project/go-data-transfer/network" @@ -44,7 +43,6 @@ type manager struct { channels *channels.Channels peerID peer.ID transport datatransfer.Transport - cidLists cidlists.CIDLists channelMonitor *channelmonitor.Monitor channelMonitorCfg *channelmonitor.Config transferIDGen *timeCounter @@ -94,7 +92,7 @@ func ChannelRestartConfig(cfg channelmonitor.Config) DataTransferOption { } // NewDataTransfer initializes a new instance of a data transfer manager -func NewDataTransfer(ds datastore.Batching, cidListsDir string, dataTransferNetwork network.DataTransferNetwork, transport datatransfer.Transport, options ...DataTransferOption) (datatransfer.Manager, error) { +func NewDataTransfer(ds datastore.Batching, dataTransferNetwork network.DataTransferNetwork, transport datatransfer.Transport, options ...DataTransferOption) (datatransfer.Manager, error) { m := &manager{ dataTransferNetwork: dataTransferNetwork, validatedTypes: registry.NewRegistry(), @@ -109,12 +107,7 @@ func NewDataTransfer(ds datastore.Batching, cidListsDir string, dataTransferNetw spansIndex: tracing.NewSpansIndex(), } - cidLists, err := cidlists.NewCIDLists(cidListsDir) - if err != nil { - return nil, err - } - m.cidLists = cidLists - channels, err := channels.New(ds, cidLists, m.notifier, m.voucherDecoder, m.resultTypes.Decoder, &channelEnvironment{m}, dataTransferNetwork.ID()) + channels, err := channels.New(ds, m.notifier, m.voucherDecoder, m.resultTypes.Decoder, &channelEnvironment{m}, dataTransferNetwork.ID()) if err != nil { return nil, err } diff --git a/impl/initiating_test.go b/impl/initiating_test.go index c5c25827..50401014 100644 --- a/impl/initiating_test.go +++ b/impl/initiating_test.go @@ -3,7 +3,6 @@ package impl_test import ( "context" "math/rand" - "os" "testing" "time" @@ -335,7 +334,7 @@ func TestDataTransferInitiating(t *testing.T) { h.network = testutil.NewFakeNetwork(h.peers[0]) h.transport = testutil.NewFakeTransport() h.ds = dss.MutexWrap(datastore.NewMapDatastore()) - dt, err := NewDataTransfer(h.ds, os.TempDir(), h.network, h.transport, verify.options...) + dt, err := NewDataTransfer(h.ds, h.network, h.transport, verify.options...) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt) h.dt = dt @@ -392,8 +391,6 @@ func TestDataTransferRestartInitiating(t *testing.T) { require.Equal(t, openChannel.Root, cidlink.Link{Cid: h.baseCid}) require.Equal(t, openChannel.Selector, h.stor) require.True(t, openChannel.Message.IsRequest()) - // received cids should be a part of the channel req - require.ElementsMatch(t, []cid.Cid{testCids[0], testCids[1]}, openChannel.Channel.ReceivedCids()) receivedRequest, ok := openChannel.Message.(datatransfer.Request) require.True(t, ok) @@ -582,7 +579,7 @@ func TestDataTransferRestartInitiating(t *testing.T) { h.voucherValidator = testutil.NewStubbedValidator() // setup data transfer`` - dt, err := NewDataTransfer(h.ds, os.TempDir(), h.network, h.transport) + dt, err := NewDataTransfer(h.ds, h.network, h.transport) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt) h.dt = dt diff --git a/impl/integration_test.go b/impl/integration_test.go index e7dcb60f..f470155f 100644 --- a/impl/integration_test.go +++ b/impl/integration_test.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "math/rand" - "os" "testing" "time" @@ -61,10 +60,6 @@ var protocolsForTest = map[string]struct { host2Protocols []protocol.ID }{ "(v1.2 -> v1.2)": {nil, nil}, - "(v1.0 -> v1.2)": {[]protocol.ID{datatransfer.ProtocolDataTransfer1_0}, nil}, - "(v1.2 -> v1.0)": {nil, []protocol.ID{datatransfer.ProtocolDataTransfer1_0}}, - "(v1.1 -> v1.2)": {[]protocol.ID{datatransfer.ProtocolDataTransfer1_1}, nil}, - "(v1.2 -> v1.1)": {nil, []protocol.ID{datatransfer.ProtocolDataTransfer1_1}}, } // tests data transfer for the protocol combinations that support restart messages @@ -73,8 +68,6 @@ var protocolsForRestartTest = map[string]struct { host2Protocols []protocol.ID }{ "(v1.2 -> v1.2)": {nil, nil}, - "(v1.1 -> v1.2)": {[]protocol.ID{datatransfer.ProtocolDataTransfer1_1}, nil}, - "(v1.2 -> v1.1)": {nil, []protocol.ID{datatransfer.ProtocolDataTransfer1_1}}, } func TestRoundTrip(t *testing.T) { @@ -150,10 +143,10 @@ func TestRoundTrip(t *testing.T) { tp1 := gsData.SetupGSTransportHost1() tp2 := gsData.SetupGSTransportHost2() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt2) @@ -303,10 +296,10 @@ func TestRoundTripMissingBlocks(t *testing.T) { tp1 := gsData.SetupGSTransportHost1() tp2 := gsData.SetupGSTransportHost2() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt2) @@ -425,10 +418,10 @@ func TestMultipleRoundTripMultipleStores(t *testing.T) { tp1 := gsData.SetupGSTransportHost1() tp2 := gsData.SetupGSTransportHost2() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt2) @@ -544,7 +537,7 @@ func TestManyReceiversAtOnce(t *testing.T) { host1 := gsData.Host1 // initiator, data sender tp1 := gsData.SetupGSTransportHost1() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) @@ -566,11 +559,11 @@ func TestManyReceiversAtOnce(t *testing.T) { destDagService := merkledag.NewDAGService(blockservice.New(altBs, offline.Exchange(altBs))) gs := gsimpl.New(gsData.Ctx, gsnet, lsys) - gsTransport := tp.NewTransport(host.ID(), gs, dtnet) + gsTransport := tp.NewTransport(host.ID(), gs) dtDs := namespace.Wrap(ds, datastore.NewKey("datatransfer")) - receiver, err := NewDataTransfer(dtDs, os.TempDir(), dtnet, gsTransport) + receiver, err := NewDataTransfer(dtDs, dtnet, gsTransport) require.NoError(t, err) err = receiver.Start(gsData.Ctx) require.NoError(t, err) @@ -894,12 +887,12 @@ func TestAutoRestart(t *testing.T) { MaxConsecutiveRestarts: 10, CompleteTimeout: 100 * time.Millisecond, }) - initiator, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, initiatorGSTspt, restartConf) + initiator, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, initiatorGSTspt, restartConf) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, initiator) defer initiator.Stop(ctx) - responder, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, responderGSTspt) + responder, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, responderGSTspt) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, responder) defer responder.Stop(ctx) @@ -1070,12 +1063,12 @@ func TestAutoRestartAfterBouncingInitiator(t *testing.T) { MaxConsecutiveRestarts: 10, CompleteTimeout: 100 * time.Millisecond, }) - initiator, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, initiatorGSTspt, restartConf) + initiator, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, initiatorGSTspt, restartConf) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, initiator) defer initiator.Stop(ctx) - responder, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, responderGSTspt) + responder, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, responderGSTspt) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, responder) defer responder.Stop(ctx) @@ -1170,7 +1163,7 @@ func TestAutoRestartAfterBouncingInitiator(t *testing.T) { // 2. Create a new initiator initiator2GSTspt := gsData.SetupGSTransportHost1() - initiator2, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, initiator2GSTspt, restartConf) + initiator2, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, initiator2GSTspt, restartConf) require.NoError(t, err) require.NoError(t, initiator2.RegisterVoucherType(&testutil.FakeDTType{}, sv)) initiator2.SubscribeToEvents(completeSubscriber) @@ -1280,10 +1273,10 @@ func TestRoundTripCancelledRequest(t *testing.T) { tp1 := gsData.SetupGSTransportHost1() tp2 := gsData.SetupGSTransportHost2() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt2) @@ -1474,10 +1467,10 @@ func TestSimulatedRetrievalFlow(t *testing.T) { tp1 := gsData.SetupGSTransportHost1() tp2 := gsData.SetupGSTransportHost2() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt2) var chid datatransfer.ChannelID @@ -1592,10 +1585,10 @@ func TestPauseAndResume(t *testing.T) { tp1 := gsData.SetupGSTransportHost1() tp2 := gsData.SetupGSTransportHost2() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt2) finished := make(chan struct{}, 2) @@ -1731,10 +1724,10 @@ func TestUnrecognizedVoucherRoundTrip(t *testing.T) { tp1 := gsData.SetupGSTransportHost1() tp2 := gsData.SetupGSTransportHost2() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt2) @@ -1801,14 +1794,14 @@ func TestDataTransferSubscribing(t *testing.T) { sv := testutil.NewStubbedValidator() sv.StubErrorPull() sv.StubErrorPush() - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt2) require.NoError(t, dt2.RegisterVoucherType(&testutil.FakeDTType{}, sv)) voucher := testutil.FakeDTType{Data: "applesauce"} baseCid := testutil.GenerateCids(1)[0] - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) subscribe1Calls := make(chan struct{}, 1) @@ -1940,7 +1933,7 @@ func TestRespondingToPushGraphsyncRequests(t *testing.T) { gsData.GsNet2.SetDelegate(gsr) tp1 := gsData.SetupGSTransportHost1() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) voucherResult := testutil.NewFakeDTType() @@ -2027,8 +2020,8 @@ func TestResponseHookWhenExtensionNotFound(t *testing.T) { gsData.GsNet2.SetDelegate(gsr) gs1 := gsData.SetupGraphsyncHost1() - tp1 := tp.NewTransport(host1.ID(), gs1, gsData.DtNet1) - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + tp1 := tp.NewTransport(host1.ID(), gs1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) t.Run("when it's not our extension, does not error and does not validate", func(t *testing.T) { @@ -2071,7 +2064,7 @@ func TestRespondingToPullGraphsyncRequests(t *testing.T) { sv := testutil.NewStubbedValidator() sv.ExpectSuccessPull() - dt1, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt1, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) require.NoError(t, dt1.RegisterVoucherType(&testutil.FakeDTType{}, sv)) @@ -2103,7 +2096,7 @@ func TestRespondingToPullGraphsyncRequests(t *testing.T) { test: func(t *testing.T, gsData *testutil.GraphsyncTestingData, tp2 datatransfer.Transport, link ipld.Link, id datatransfer.TransferID, gsr *fakeGraphSyncReceiver) { sv := testutil.NewStubbedValidator() sv.ExpectErrorPull() - dt1, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt1, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) require.NoError(t, dt1.RegisterVoucherType(&testutil.FakeDTType{}, sv)) @@ -2174,11 +2167,11 @@ func TestMultipleMessagesInExtension(t *testing.T) { tp1 := gsData.SetupGSTransportHost1() tp2 := gsData.SetupGSTransportHost2() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt2) @@ -2327,11 +2320,11 @@ func TestMultipleParallelTransfers(t *testing.T) { tp1 := gsData.SetupGSTransportHost1() tp2 := gsData.SetupGSTransportHost2() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt1) - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt2) diff --git a/impl/responding_test.go b/impl/responding_test.go index 90ef4b51..2566886f 100644 --- a/impl/responding_test.go +++ b/impl/responding_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "math/rand" - "os" "testing" "time" @@ -559,7 +558,7 @@ func TestDataTransferResponding(t *testing.T) { h.network = testutil.NewFakeNetwork(h.peers[0]) h.transport = testutil.NewFakeTransport() h.ds = dss.MutexWrap(datastore.NewMapDatastore()) - dt, err := NewDataTransfer(h.ds, os.TempDir(), h.network, h.transport) + dt, err := NewDataTransfer(h.ds, h.network, h.transport) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt) h.dt = dt @@ -664,8 +663,6 @@ func TestDataTransferRestartResponding(t *testing.T) { require.Equal(t, openChannel.Root, cidlink.Link{Cid: h.baseCid}) require.Equal(t, openChannel.Selector, h.stor) // assert do not send cids are sent - require.ElementsMatch(t, []cid.Cid{testCids[0], testCids[1]}, openChannel.Channel.ReceivedCids()) - require.EqualValues(t, 2, openChannel.Channel.ReceivedCidsTotal()) require.False(t, openChannel.Message.IsRequest()) response, ok := openChannel.Message.(datatransfer.Response) require.True(t, ok) @@ -875,8 +872,6 @@ func TestDataTransferRestartResponding(t *testing.T) { require.Equal(t, openChannel.Root, cidlink.Link{Cid: h.baseCid}) require.Equal(t, openChannel.Selector, h.stor) require.True(t, openChannel.Message.IsRequest()) - // received cids should be a part of the channel req - require.ElementsMatch(t, openChannel.Channel.ReceivedCids(), testCids) require.EqualValues(t, len(testCids), openChannel.Channel.ReceivedCidsTotal()) // assert a restart request is in the channel @@ -983,7 +978,7 @@ func TestDataTransferRestartResponding(t *testing.T) { h.network = testutil.NewFakeNetwork(h.peers[0]) h.transport = testutil.NewFakeTransport() h.ds = dss.MutexWrap(datastore.NewMapDatastore()) - dt, err := NewDataTransfer(h.ds, os.TempDir(), h.network, h.transport) + dt, err := NewDataTransfer(h.ds, h.network, h.transport) require.NoError(t, err) testutil.StartAndWaitForReady(ctx, t, dt) h.dt = dt diff --git a/impl/restart_integration_test.go b/impl/restart_integration_test.go index 785af679..c3f4e389 100644 --- a/impl/restart_integration_test.go +++ b/impl/restart_integration_test.go @@ -51,7 +51,7 @@ func TestRestartPush(t *testing.T) { require.NoError(t, rh.dt1.Stop(rh.testCtx)) time.Sleep(100 * time.Millisecond) tp1 := rh.gsData.SetupGSTransportHost1() - rh.dt1, err = NewDataTransfer(rh.gsData.DtDs1, rh.gsData.TempDir1, rh.gsData.DtNet1, tp1) + rh.dt1, err = NewDataTransfer(rh.gsData.DtDs1, rh.gsData.DtNet1, tp1) require.NoError(rh.t, err) require.NoError(rh.t, rh.dt1.RegisterVoucherType(&testutil.FakeDTType{}, rh.sv)) testutil.StartAndWaitForReady(rh.testCtx, t, rh.dt1) @@ -93,7 +93,7 @@ func TestRestartPush(t *testing.T) { require.NoError(t, rh.dt2.Stop(rh.testCtx)) time.Sleep(100 * time.Millisecond) tp2 := rh.gsData.SetupGSTransportHost2() - rh.dt2, err = NewDataTransfer(rh.gsData.DtDs2, rh.gsData.TempDir2, rh.gsData.DtNet2, tp2) + rh.dt2, err = NewDataTransfer(rh.gsData.DtDs2, rh.gsData.DtNet2, tp2) require.NoError(rh.t, err) require.NoError(rh.t, rh.dt2.RegisterVoucherType(&testutil.FakeDTType{}, rh.sv)) testutil.StartAndWaitForReady(rh.testCtx, t, rh.dt2) @@ -300,7 +300,7 @@ func TestRestartPull(t *testing.T) { require.NoError(t, rh.dt2.Stop(rh.testCtx)) time.Sleep(100 * time.Millisecond) tp2 := rh.gsData.SetupGSTransportHost2() - rh.dt2, err = NewDataTransfer(rh.gsData.DtDs2, rh.gsData.TempDir2, rh.gsData.DtNet2, tp2) + rh.dt2, err = NewDataTransfer(rh.gsData.DtDs2, rh.gsData.DtNet2, tp2) require.NoError(rh.t, err) require.NoError(rh.t, rh.dt2.RegisterVoucherType(&testutil.FakeDTType{}, rh.sv)) testutil.StartAndWaitForReady(rh.testCtx, t, rh.dt2) @@ -339,7 +339,7 @@ func TestRestartPull(t *testing.T) { require.NoError(t, rh.dt1.Stop(rh.testCtx)) time.Sleep(100 * time.Millisecond) tp1 := rh.gsData.SetupGSTransportHost1() - rh.dt1, err = NewDataTransfer(rh.gsData.DtDs1, rh.gsData.TempDir1, rh.gsData.DtNet1, tp1) + rh.dt1, err = NewDataTransfer(rh.gsData.DtDs1, rh.gsData.DtNet1, tp1) require.NoError(rh.t, err) require.NoError(rh.t, rh.dt1.RegisterVoucherType(&testutil.FakeDTType{}, rh.sv)) testutil.StartAndWaitForReady(rh.testCtx, t, rh.dt1) @@ -387,7 +387,6 @@ func TestRestartPull(t *testing.T) { sent := make(chan uint64, totalIncrements) received := make(chan uint64, totalIncrements) receivedTillNow := atomic.NewInt32(0) - var receivedCids []cid.Cid // counters we will check at the end for correctness opens := atomic.NewInt32(0) @@ -420,15 +419,6 @@ func TestRestartPull(t *testing.T) { finishedPeersLk.Lock() { finishedPeers = append(finishedPeers, channelState.SelfPeer()) - - // When the receiving peer completes, record received CIDs - // before they get cleaned up - if channelState.SelfPeer() == rh.peer2 { - chs, err := rh.dt2.InProgressChannels(rh.testCtx) - require.NoError(t, err) - require.Len(t, chs, 1) - receivedCids = chs[chid].ReceivedCids() - } } finishedPeersLk.Unlock() finished <- channelState.SelfPeer() @@ -502,9 +492,6 @@ func TestRestartPull(t *testing.T) { _, _, err = waitF(10*time.Second, 2) require.NoError(t, err) - // verify all cids are present on the receiver - require.Equal(t, totalIncrements, len(receivedCids)) - testutil.VerifyHasFile(rh.testCtx, t, rh.destDagService, rh.root, rh.origBytes) rh.sv.VerifyExpectations(t) @@ -568,10 +555,10 @@ func newRestartHarness(t *testing.T) *restartHarness { tp1 := gsData.SetupGSTransportHost1() tp2 := gsData.SetupGSTransportHost2() - dt1, err := NewDataTransfer(gsData.DtDs1, gsData.TempDir1, gsData.DtNet1, tp1) + dt1, err := NewDataTransfer(gsData.DtDs1, gsData.DtNet1, tp1) require.NoError(t, err) - dt2, err := NewDataTransfer(gsData.DtDs2, gsData.TempDir2, gsData.DtNet2, tp2) + dt2, err := NewDataTransfer(gsData.DtDs2, gsData.DtNet2, tp2) require.NoError(t, err) sv := testutil.NewStubbedValidator() diff --git a/message.go b/message.go index ac61e611..ad61db54 100644 --- a/message.go +++ b/message.go @@ -15,15 +15,6 @@ var ( // ProtocolDataTransfer1_2 is the protocol identifier for the latest // version of data-transfer (supports do-not-send-first-blocks extension) ProtocolDataTransfer1_2 protocol.ID = "/fil/datatransfer/1.2.0" - - // ProtocolDataTransfer1_1 is the protocol identifier for the version - // of data-transfer that supports the do-not-send-cids extension - // (but not the do-not-send-first-blocks extension) - ProtocolDataTransfer1_1 protocol.ID = "/fil/datatransfer/1.1.0" - - // ProtocolDataTransfer1_0 is the protocol identifier for legacy data-transfer messages. - // This protocol does NOT support the `Restart` functionality for data transfer channels. - ProtocolDataTransfer1_0 protocol.ID = "/fil/datatransfer/1.0.0" ) // Message is a message for the data transfer protocol diff --git a/message/message1_0/message.go b/message/message1_0/message.go deleted file mode 100644 index 23856886..00000000 --- a/message/message1_0/message.go +++ /dev/null @@ -1,57 +0,0 @@ -package message1_0 - -import ( - "io" - - "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" - - datatransfer "github.com/filecoin-project/go-data-transfer" -) - -// NewTransferRequest creates a transfer request for the 1_0 Data Transfer Protocol. -func NewTransferRequest(bcid *cid.Cid, typ uint64, paus, part, pull bool, stor, vouch *cbg.Deferred, - vtyp datatransfer.TypeIdentifier, xferId uint64) datatransfer.Request { - return &transferRequest{ - BCid: bcid, - Type: typ, - Paus: paus, - Part: part, - Pull: pull, - Stor: stor, - Vouch: vouch, - VTyp: vtyp, - XferID: xferId, - } -} - -// NewTransferRequest creates a transfer response for the 1_0 Data Transfer Protocol. -func NewTransferResponse(typ uint64, acpt bool, paus bool, xferId uint64, vRes *cbg.Deferred, vtyp datatransfer.TypeIdentifier) datatransfer.Response { - return &transferResponse{ - Type: typ, - Acpt: acpt, - Paus: paus, - XferID: xferId, - VRes: vRes, - VTyp: vtyp, - } -} - -// FromNet can read a network stream to deserialize a GraphSyncMessage -func FromNet(r io.Reader) (datatransfer.Message, error) { - tresp := transferMessage{} - err := tresp.UnmarshalCBOR(r) - if err != nil { - return nil, err - } - - if (tresp.IsRequest() && tresp.Request == nil) || (!tresp.IsRequest() && tresp.Response == nil) { - return nil, xerrors.Errorf("invalid/malformed message") - } - - if tresp.IsRequest() { - return tresp.Request, nil - } - return tresp.Response, nil -} diff --git a/message/message1_0/transfer_message.go b/message/message1_0/transfer_message.go deleted file mode 100644 index 4da04b1d..00000000 --- a/message/message1_0/transfer_message.go +++ /dev/null @@ -1,36 +0,0 @@ -package message1_0 - -import ( - "io" - - datatransfer "github.com/filecoin-project/go-data-transfer" -) - -//go:generate cbor-gen-for transferMessage -type transferMessage struct { - IsRq bool - - Request *transferRequest - Response *transferResponse -} - -// ========= datatransfer.Message interface - -// IsRequest returns true if this message is a data request -func (tm *transferMessage) IsRequest() bool { - return tm.IsRq -} - -// TransferID returns the TransferID of this message -func (tm *transferMessage) TransferID() datatransfer.TransferID { - if tm.IsRequest() { - return tm.Request.TransferID() - } - return tm.Response.TransferID() -} - -// ToNet serializes a transfer message type. It is simply a wrapper for MarshalCBOR, to provide -// symmetry with FromNet -func (tm *transferMessage) ToNet(w io.Writer) error { - return tm.MarshalCBOR(w) -} diff --git a/message/message1_0/transfer_message_cbor_gen.go b/message/message1_0/transfer_message_cbor_gen.go deleted file mode 100644 index aa4e4719..00000000 --- a/message/message1_0/transfer_message_cbor_gen.go +++ /dev/null @@ -1,121 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package message1_0 - -import ( - "fmt" - "io" - "sort" - - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf -var _ = cid.Undef -var _ = sort.Sort - -var lengthBuftransferMessage = []byte{131} - -func (t *transferMessage) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBuftransferMessage); err != nil { - return err - } - - // t.IsRq (bool) (bool) - if err := cbg.WriteBool(w, t.IsRq); err != nil { - return err - } - - // t.Request (message1_0.transferRequest) (struct) - if err := t.Request.MarshalCBOR(w); err != nil { - return err - } - - // t.Response (message1_0.transferResponse) (struct) - if err := t.Response.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *transferMessage) UnmarshalCBOR(r io.Reader) error { - *t = transferMessage{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.IsRq (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.IsRq = false - case 21: - t.IsRq = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Request (message1_0.transferRequest) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.Request = new(transferRequest) - if err := t.Request.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Request pointer: %w", err) - } - } - - } - // t.Response (message1_0.transferResponse) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.Response = new(transferResponse) - if err := t.Response.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Response pointer: %w", err) - } - } - - } - return nil -} diff --git a/message/message1_0/transfer_request.go b/message/message1_0/transfer_request.go deleted file mode 100644 index 19d7ba7a..00000000 --- a/message/message1_0/transfer_request.go +++ /dev/null @@ -1,146 +0,0 @@ -package message1_0 - -import ( - "bytes" - "io" - - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - "github.com/ipld/go-ipld-prime/codec/dagcbor" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/libp2p/go-libp2p-core/protocol" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/encoding" - "github.com/filecoin-project/go-data-transfer/message/types" -) - -//go:generate cbor-gen-for transferRequest - -// transferRequest is a struct that fulfills the datatransfer.Request interface. -// its members are exported to be used by cbor-gen -type transferRequest struct { - BCid *cid.Cid - Type uint64 - Paus bool - Part bool - Pull bool - Stor *cbg.Deferred - Vouch *cbg.Deferred - VTyp datatransfer.TypeIdentifier - XferID uint64 -} - -// IsRequest always returns true in this case because this is a transfer request -func (trq *transferRequest) IsRequest() bool { - return true -} - -func (trq *transferRequest) MessageForProtocol(targetProtocol protocol.ID) (datatransfer.Message, error) { - switch targetProtocol { - case datatransfer.ProtocolDataTransfer1_0: - return trq, nil - default: - return nil, xerrors.Errorf("protocol not supported") - } -} - -func (trq *transferRequest) IsNew() bool { - return trq.Type == uint64(types.NewMessage) -} - -func (trq *transferRequest) IsUpdate() bool { - return trq.Type == uint64(types.UpdateMessage) -} - -func (trq *transferRequest) IsVoucher() bool { - return trq.Type == uint64(types.VoucherMessage) || trq.Type == uint64(types.NewMessage) -} - -func (trq *transferRequest) IsPaused() bool { - return trq.Paus -} - -func (trq *transferRequest) TransferID() datatransfer.TransferID { - return datatransfer.TransferID(trq.XferID) -} - -// ========= datatransfer.Request interface -// IsPull returns true if this is a data pull request -func (trq *transferRequest) IsPull() bool { - return trq.Pull -} - -// VoucherType returns the Voucher ID -func (trq *transferRequest) VoucherType() datatransfer.TypeIdentifier { - return trq.VTyp -} - -// Voucher returns the Voucher bytes -func (trq *transferRequest) Voucher(decoder encoding.Decoder) (encoding.Encodable, error) { - if trq.Vouch == nil { - return nil, xerrors.New("No voucher present to read") - } - return decoder.DecodeFromCbor(trq.Vouch.Raw) -} - -func (trq *transferRequest) EmptyVoucher() bool { - return trq.VTyp == datatransfer.EmptyTypeIdentifier -} - -// BaseCid returns the Base CID -func (trq *transferRequest) BaseCid() cid.Cid { - if trq.BCid == nil { - return cid.Undef - } - return *trq.BCid -} - -// Selector returns the message Selector bytes -func (trq *transferRequest) Selector() (ipld.Node, error) { - if trq.Stor == nil { - return nil, xerrors.New("No selector present to read") - } - builder := basicnode.Prototype.Any.NewBuilder() - reader := bytes.NewReader(trq.Stor.Raw) - err := dagcbor.Decode(builder, reader) - if err != nil { - return nil, xerrors.Errorf("Error decoding selector: %w", err) - } - return builder.Build(), nil -} - -// IsCancel returns true if this is a cancel request -func (trq *transferRequest) IsCancel() bool { - return trq.Type == uint64(types.CancelMessage) -} - -// IsPartial returns true if this is a partial request -func (trq *transferRequest) IsPartial() bool { - return trq.Part -} - -// ToNet serializes a transfer request. It's a wrapper for MarshalCBOR to provide -// symmetry with FromNet -func (trq *transferRequest) ToNet(w io.Writer) error { - msg := transferMessage{ - IsRq: true, - Request: trq, - Response: nil, - } - return msg.MarshalCBOR(w) -} - -func (trq *transferRequest) IsRestart() bool { - return false -} - -func (trq *transferRequest) IsRestartExistingChannelRequest() bool { - return false -} - -func (trq *transferRequest) RestartChannelId() (datatransfer.ChannelID, error) { - return datatransfer.ChannelID{}, xerrors.New("not supported") -} diff --git a/message/message1_0/transfer_request_cbor_gen.go b/message/message1_0/transfer_request_cbor_gen.go deleted file mode 100644 index 678cdc38..00000000 --- a/message/message1_0/transfer_request_cbor_gen.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package message1_0 - -import ( - "fmt" - "io" - "sort" - - datatransfer "github.com/filecoin-project/go-data-transfer" - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf -var _ = cid.Undef -var _ = sort.Sort - -var lengthBuftransferRequest = []byte{137} - -func (t *transferRequest) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBuftransferRequest); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.BCid (cid.Cid) (struct) - - if t.BCid == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.BCid); err != nil { - return xerrors.Errorf("failed to write cid field t.BCid: %w", err) - } - } - - // t.Type (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Type)); err != nil { - return err - } - - // t.Paus (bool) (bool) - if err := cbg.WriteBool(w, t.Paus); err != nil { - return err - } - - // t.Part (bool) (bool) - if err := cbg.WriteBool(w, t.Part); err != nil { - return err - } - - // t.Pull (bool) (bool) - if err := cbg.WriteBool(w, t.Pull); err != nil { - return err - } - - // t.Stor (typegen.Deferred) (struct) - if err := t.Stor.MarshalCBOR(w); err != nil { - return err - } - - // t.Vouch (typegen.Deferred) (struct) - if err := t.Vouch.MarshalCBOR(w); err != nil { - return err - } - - // t.VTyp (datatransfer.TypeIdentifier) (string) - if len(t.VTyp) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.VTyp was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.VTyp))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.VTyp)); err != nil { - return err - } - - // t.XferID (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.XferID)); err != nil { - return err - } - - return nil -} - -func (t *transferRequest) UnmarshalCBOR(r io.Reader) error { - *t = transferRequest{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 9 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.BCid (cid.Cid) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.BCid: %w", err) - } - - t.BCid = &c - } - - } - // t.Type (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Type = uint64(extra) - - } - // t.Paus (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Paus = false - case 21: - t.Paus = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Part (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Part = false - case 21: - t.Part = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Pull (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Pull = false - case 21: - t.Pull = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Stor (typegen.Deferred) (struct) - - { - - t.Stor = new(cbg.Deferred) - - if err := t.Stor.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("failed to read deferred field: %w", err) - } - } - // t.Vouch (typegen.Deferred) (struct) - - { - - t.Vouch = new(cbg.Deferred) - - if err := t.Vouch.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("failed to read deferred field: %w", err) - } - } - // t.VTyp (datatransfer.TypeIdentifier) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.VTyp = datatransfer.TypeIdentifier(sval) - } - // t.XferID (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.XferID = uint64(extra) - - } - return nil -} diff --git a/message/message1_0/transfer_response.go b/message/message1_0/transfer_response.go deleted file mode 100644 index 7de7a381..00000000 --- a/message/message1_0/transfer_response.go +++ /dev/null @@ -1,108 +0,0 @@ -package message1_0 - -import ( - "io" - - "github.com/libp2p/go-libp2p-core/protocol" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/encoding" - "github.com/filecoin-project/go-data-transfer/message/types" -) - -//go:generate cbor-gen-for transferResponse - -// transferResponse is a private struct that satisfies the datatransfer.Response interface -type transferResponse struct { - Type uint64 - Acpt bool - Paus bool - XferID uint64 - VRes *cbg.Deferred - VTyp datatransfer.TypeIdentifier -} - -func (trsp *transferResponse) TransferID() datatransfer.TransferID { - return datatransfer.TransferID(trsp.XferID) -} - -func (trsp *transferResponse) IsRestart() bool { - return false -} - -func (trsp *transferResponse) MessageForProtocol(targetProtocol protocol.ID) (datatransfer.Message, error) { - switch targetProtocol { - case datatransfer.ProtocolDataTransfer1_0: - return trsp, nil - default: - return nil, xerrors.Errorf("protocol not supported") - } -} - -// IsRequest always returns false in this case because this is a transfer response -func (trsp *transferResponse) IsRequest() bool { - return false -} - -// IsNew returns true if this is the first response sent -func (trsp *transferResponse) IsNew() bool { - return trsp.Type == uint64(types.NewMessage) -} - -// IsUpdate returns true if this response is an update -func (trsp *transferResponse) IsUpdate() bool { - return trsp.Type == uint64(types.UpdateMessage) -} - -// IsPaused returns true if the responder is paused -func (trsp *transferResponse) IsPaused() bool { - return trsp.Paus -} - -// IsCancel returns true if the responder has cancelled this response -func (trsp *transferResponse) IsCancel() bool { - return trsp.Type == uint64(types.CancelMessage) -} - -// IsComplete returns true if the responder has completed this response -func (trsp *transferResponse) IsComplete() bool { - return trsp.Type == uint64(types.CompleteMessage) -} - -func (trsp *transferResponse) IsVoucherResult() bool { - return trsp.Type == uint64(types.VoucherResultMessage) || trsp.Type == uint64(types.NewMessage) || - trsp.Type == uint64(types.CompleteMessage) -} - -// Accepted returns true if the request is accepted in the response -func (trsp *transferResponse) Accepted() bool { - return trsp.Acpt -} - -func (trsp *transferResponse) VoucherResultType() datatransfer.TypeIdentifier { - return trsp.VTyp -} - -func (trsp *transferResponse) VoucherResult(decoder encoding.Decoder) (encoding.Encodable, error) { - if trsp.VRes == nil { - return nil, xerrors.New("No voucher present to read") - } - return decoder.DecodeFromCbor(trsp.VRes.Raw) -} - -func (trsp *transferResponse) EmptyVoucherResult() bool { - return trsp.VTyp == datatransfer.EmptyTypeIdentifier -} - -// ToNet serializes a transfer response. It's a wrapper for MarshalCBOR to provide -// symmetry with FromNet -func (trsp *transferResponse) ToNet(w io.Writer) error { - msg := transferMessage{ - IsRq: false, - Request: nil, - Response: trsp, - } - return msg.MarshalCBOR(w) -} diff --git a/message/message1_0/transfer_response_cbor_gen.go b/message/message1_0/transfer_response_cbor_gen.go deleted file mode 100644 index 8fc1e720..00000000 --- a/message/message1_0/transfer_response_cbor_gen.go +++ /dev/null @@ -1,175 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package message1_0 - -import ( - "fmt" - "io" - "sort" - - datatransfer "github.com/filecoin-project/go-data-transfer" - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf -var _ = cid.Undef -var _ = sort.Sort - -var lengthBuftransferResponse = []byte{134} - -func (t *transferResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBuftransferResponse); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Type (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Type)); err != nil { - return err - } - - // t.Acpt (bool) (bool) - if err := cbg.WriteBool(w, t.Acpt); err != nil { - return err - } - - // t.Paus (bool) (bool) - if err := cbg.WriteBool(w, t.Paus); err != nil { - return err - } - - // t.XferID (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.XferID)); err != nil { - return err - } - - // t.VRes (typegen.Deferred) (struct) - if err := t.VRes.MarshalCBOR(w); err != nil { - return err - } - - // t.VTyp (datatransfer.TypeIdentifier) (string) - if len(t.VTyp) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.VTyp was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.VTyp))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.VTyp)); err != nil { - return err - } - return nil -} - -func (t *transferResponse) UnmarshalCBOR(r io.Reader) error { - *t = transferResponse{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 6 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Type (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Type = uint64(extra) - - } - // t.Acpt (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Acpt = false - case 21: - t.Acpt = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Paus (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Paus = false - case 21: - t.Paus = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.XferID (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.XferID = uint64(extra) - - } - // t.VRes (typegen.Deferred) (struct) - - { - - t.VRes = new(cbg.Deferred) - - if err := t.VRes.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("failed to read deferred field: %w", err) - } - } - // t.VTyp (datatransfer.TypeIdentifier) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.VTyp = datatransfer.TypeIdentifier(sval) - } - return nil -} diff --git a/message/message1_1/transfer_request.go b/message/message1_1/transfer_request.go index 7ce3f937..0b3020d2 100644 --- a/message/message1_1/transfer_request.go +++ b/message/message1_1/transfer_request.go @@ -14,7 +14,6 @@ import ( datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-data-transfer/encoding" - "github.com/filecoin-project/go-data-transfer/message/message1_0" "github.com/filecoin-project/go-data-transfer/message/types" ) @@ -38,26 +37,8 @@ type transferRequest1_1 struct { func (trq *transferRequest1_1) MessageForProtocol(targetProtocol protocol.ID) (datatransfer.Message, error) { switch targetProtocol { - case datatransfer.ProtocolDataTransfer1_2, datatransfer.ProtocolDataTransfer1_1: + case datatransfer.ProtocolDataTransfer1_2: return trq, nil - case datatransfer.ProtocolDataTransfer1_0: - if trq.IsRestart() || trq.IsRestartExistingChannelRequest() { - return nil, xerrors.New("restart not supported on 1.0") - } - - lreq := message1_0.NewTransferRequest( - trq.BCid, - trq.Type, - trq.Paus, - trq.Part, - trq.Pull, - trq.Stor, - trq.Vouch, - trq.VTyp, - trq.XferID, - ) - return lreq, nil - default: return nil, xerrors.Errorf("protocol not supported") } diff --git a/message/message1_1/transfer_request_test.go b/message/message1_1/transfer_request_test.go index 29763579..da0bccac 100644 --- a/message/message1_1/transfer_request_test.go +++ b/message/message1_1/transfer_request_test.go @@ -28,14 +28,7 @@ func TestRequestMessageForProtocol(t *testing.T) { require.NoError(t, err) require.Equal(t, request, out12) - out11, err := request.MessageForProtocol(datatransfer.ProtocolDataTransfer1_1) - require.NoError(t, err) - require.Equal(t, request, out11) - - // for the old protocol - out, err := request.MessageForProtocol(datatransfer.ProtocolDataTransfer1_0) - require.NoError(t, err) - req, ok := out.(datatransfer.Request) + req, ok := out12.(datatransfer.Request) require.True(t, ok) require.False(t, req.IsRestart()) require.False(t, req.IsRestartExistingChannelRequest()) @@ -45,29 +38,4 @@ func TestRequestMessageForProtocol(t *testing.T) { require.NoError(t, err) require.Equal(t, selector, n) require.Equal(t, voucher.Type(), req.VoucherType()) - - // random protocol - out, err = request.MessageForProtocol("RAND") - require.Error(t, err) - require.Nil(t, out) -} - -func TestRequestMessageForProtocolRestartDowngradeFails(t *testing.T) { - baseCid := testutil.GenerateCids(1)[0] - selector := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any).Matcher().Node() - isPull := true - id := datatransfer.TransferID(rand.Int31()) - voucher := testutil.NewFakeDTType() - - request, err := message1_1.NewRequest(id, true, isPull, voucher.Type(), voucher, baseCid, selector) - require.NoError(t, err) - - out, err := request.MessageForProtocol(datatransfer.ProtocolDataTransfer1_0) - require.Nil(t, out) - require.EqualError(t, err, "restart not supported on 1.0") - - req2 := message1_1.RestartExistingChannelRequest(datatransfer.ChannelID{}) - out, err = req2.MessageForProtocol(datatransfer.ProtocolDataTransfer1_0) - require.Nil(t, out) - require.EqualError(t, err, "restart not supported on 1.0") } diff --git a/message/message1_1/transfer_response.go b/message/message1_1/transfer_response.go index 9c3e2a68..b578ad37 100644 --- a/message/message1_1/transfer_response.go +++ b/message/message1_1/transfer_response.go @@ -9,7 +9,6 @@ import ( datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-data-transfer/encoding" - "github.com/filecoin-project/go-data-transfer/message/message1_0" "github.com/filecoin-project/go-data-transfer/message/types" ) @@ -91,24 +90,8 @@ func (trsp *transferResponse1_1) EmptyVoucherResult() bool { func (trsp *transferResponse1_1) MessageForProtocol(targetProtocol protocol.ID) (datatransfer.Message, error) { switch targetProtocol { - case datatransfer.ProtocolDataTransfer1_2, datatransfer.ProtocolDataTransfer1_1: + case datatransfer.ProtocolDataTransfer1_2: return trsp, nil - case datatransfer.ProtocolDataTransfer1_0: - // this should never happen but dosen't hurt to have this here for sanity - if trsp.IsRestart() { - return nil, xerrors.New("restart not supported for 1.0 protocol") - } - - lresp := message1_0.NewTransferResponse( - trsp.Type, - trsp.Acpt, - trsp.Paus, - trsp.XferID, - trsp.VRes, - trsp.VTyp, - ) - - return lresp, nil default: return nil, xerrors.Errorf("protocol %s not supported", targetProtocol) } diff --git a/message/message1_1/transfer_response_test.go b/message/message1_1/transfer_response_test.go index d624b0d4..8e98e668 100644 --- a/message/message1_1/transfer_response_test.go +++ b/message/message1_1/transfer_response_test.go @@ -22,14 +22,6 @@ func TestResponseMessageForProtocol(t *testing.T) { require.NoError(t, err) require.Equal(t, response, out) - // v1.1 protocol - out, err = response.MessageForProtocol(datatransfer.ProtocolDataTransfer1_1) - require.NoError(t, err) - require.Equal(t, response, out) - - // old protocol - out, err = response.MessageForProtocol(datatransfer.ProtocolDataTransfer1_0) - require.NoError(t, err) resp, ok := (out).(datatransfer.Response) require.True(t, ok) require.True(t, resp.IsPaused()) @@ -41,14 +33,3 @@ func TestResponseMessageForProtocol(t *testing.T) { require.Error(t, err) require.Nil(t, out) } - -func TestResponseMessageForProtocolFail(t *testing.T) { - id := datatransfer.TransferID(rand.Int31()) - voucherResult := testutil.NewFakeDTType() - response, err := message1_1.RestartResponse(id, false, true, voucherResult.Type(), voucherResult) // not accepted - require.NoError(t, err) - - out, err := response.MessageForProtocol(datatransfer.ProtocolDataTransfer1_0) - require.Nil(t, out) - require.EqualError(t, err, "restart not supported for 1.0 protocol") -} diff --git a/network/libp2p_impl.go b/network/libp2p_impl.go index f5e635a4..706cae8d 100644 --- a/network/libp2p_impl.go +++ b/network/libp2p_impl.go @@ -20,7 +20,6 @@ import ( datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-data-transfer/message" - "github.com/filecoin-project/go-data-transfer/message/message1_0" ) var log = logging.Logger("data_transfer_network") @@ -45,8 +44,6 @@ const defaultBackoffFactor = 5 var defaultDataTransferProtocols = []protocol.ID{ datatransfer.ProtocolDataTransfer1_2, - datatransfer.ProtocolDataTransfer1_1, - datatransfer.ProtocolDataTransfer1_0, } // Option is an option for configuring the libp2p storage market network @@ -247,10 +244,8 @@ func (dtnet *libp2pDataTransferNetwork) handleNewStream(s network.Stream) { var received datatransfer.Message var err error switch s.Protocol() { - case datatransfer.ProtocolDataTransfer1_2, datatransfer.ProtocolDataTransfer1_1: + case datatransfer.ProtocolDataTransfer1_2: received, err = message.FromNet(s) - default: - received, err = message1_0.FromNet(s) } if err != nil { @@ -315,8 +310,6 @@ func (dtnet *libp2pDataTransferNetwork) msgToStream(ctx context.Context, s netwo switch s.Protocol() { case datatransfer.ProtocolDataTransfer1_2: - case datatransfer.ProtocolDataTransfer1_1: - case datatransfer.ProtocolDataTransfer1_0: default: return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) } diff --git a/testutil/gstestdata.go b/testutil/gstestdata.go index 7b47eead..40868d42 100644 --- a/testutil/gstestdata.go +++ b/testutil/gstestdata.go @@ -58,8 +58,7 @@ const unixfsChunkSize uint64 = 1 << 10 const unixfsLinksPerLevel = 1024 var extsForProtocol = map[protocol.ID]graphsync.ExtensionName{ - datatransfer.ProtocolDataTransfer1_1: extension.ExtensionDataTransfer1_1, - datatransfer.ProtocolDataTransfer1_0: extension.ExtensionDataTransfer1_0, + datatransfer.ProtocolDataTransfer1_2: extension.ExtensionDataTransfer1_1, } // GraphsyncTestingData is a test harness for testing data transfer on top of @@ -181,7 +180,7 @@ func (gsData *GraphsyncTestingData) SetupGSTransportHost1(opts ...gstransport.Op opts = append(opts, gstransport.SupportedExtensions(supportedExtensions)) } - return gstransport.NewTransport(gsData.Host1.ID(), gs, gsData.DtNet1, opts...) + return gstransport.NewTransport(gsData.Host1.ID(), gs, opts...) } // SetupGraphsyncHost2 sets up a new, real graphsync instance on top of the second host @@ -206,7 +205,7 @@ func (gsData *GraphsyncTestingData) SetupGSTransportHost2(opts ...gstransport.Op } opts = append(opts, gstransport.SupportedExtensions(supportedExtensions)) } - return gstransport.NewTransport(gsData.Host2.ID(), gs, gsData.DtNet2, opts...) + return gstransport.NewTransport(gsData.Host2.ID(), gs, opts...) } // LoadUnixFSFile loads a fixtures file we can test dag transfer with diff --git a/testutil/mockpeerprotocol.go b/testutil/mockpeerprotocol.go deleted file mode 100644 index 67481cdf..00000000 --- a/testutil/mockpeerprotocol.go +++ /dev/null @@ -1,43 +0,0 @@ -package testutil - -import ( - "context" - "sync" - - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/protocol" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-data-transfer/transport/graphsync" -) - -type MockPeerProtocol struct { - lk sync.Mutex - pp map[peer.ID]protocol.ID -} - -var _ graphsync.PeerProtocol = (*MockPeerProtocol)(nil) - -func NewMockPeerProtocol() *MockPeerProtocol { - return &MockPeerProtocol{ - pp: make(map[peer.ID]protocol.ID), - } -} - -func (m *MockPeerProtocol) Protocol(ctx context.Context, id peer.ID) (protocol.ID, error) { - m.lk.Lock() - defer m.lk.Unlock() - - proto, ok := m.pp[id] - if !ok { - return "", xerrors.Errorf("no protocol set for peer %s", id) - } - return proto, nil -} - -func (m *MockPeerProtocol) SetProtocol(id peer.ID, proto protocol.ID) { - m.lk.Lock() - defer m.lk.Unlock() - - m.pp[id] = proto -} diff --git a/transport/graphsync/extension/gsextension.go b/transport/graphsync/extension/gsextension.go index 48d07031..a6f0424a 100644 --- a/transport/graphsync/extension/gsextension.go +++ b/transport/graphsync/extension/gsextension.go @@ -10,7 +10,6 @@ import ( datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-data-transfer/message" - "github.com/filecoin-project/go-data-transfer/message/message1_0" ) const ( @@ -20,16 +19,13 @@ const ( ExtensionOutgoingBlock1_1 = graphsync.ExtensionName("fil/data-transfer/outgoing-block/1.1") // ExtensionDataTransfer1_1 is the identifier for the v1.1 data transfer extension to graphsync ExtensionDataTransfer1_1 = graphsync.ExtensionName("fil/data-transfer/1.1") - // ExtensionDataTransfer1_0 is the identifier for the v1.0 data transfer extension to graphsync - ExtensionDataTransfer1_0 = graphsync.ExtensionName("fil/data-transfer") ) // ProtocolMap maps graphsync extensions to their libp2p protocols var ProtocolMap = map[graphsync.ExtensionName]protocol.ID{ - ExtensionIncomingRequest1_1: datatransfer.ProtocolDataTransfer1_1, - ExtensionOutgoingBlock1_1: datatransfer.ProtocolDataTransfer1_1, - ExtensionDataTransfer1_1: datatransfer.ProtocolDataTransfer1_1, - ExtensionDataTransfer1_0: datatransfer.ProtocolDataTransfer1_0, + ExtensionIncomingRequest1_1: datatransfer.ProtocolDataTransfer1_2, + ExtensionOutgoingBlock1_1: datatransfer.ProtocolDataTransfer1_2, + ExtensionDataTransfer1_1: datatransfer.ProtocolDataTransfer1_2, } // ToExtensionData converts a message to a graphsync extension @@ -87,5 +83,4 @@ var decoders = map[graphsync.ExtensionName]decoder{ ExtensionIncomingRequest1_1: message.FromNet, ExtensionOutgoingBlock1_1: message.FromNet, ExtensionDataTransfer1_1: message.FromNet, - ExtensionDataTransfer1_0: message1_0.FromNet, } diff --git a/transport/graphsync/graphsync.go b/transport/graphsync/graphsync.go index 8ffaa03b..3a17d1d5 100644 --- a/transport/graphsync/graphsync.go +++ b/transport/graphsync/graphsync.go @@ -7,14 +7,11 @@ import ( "sync" "time" - "github.com/ipfs/go-cid" "github.com/ipfs/go-graphsync" - "github.com/ipfs/go-graphsync/cidset" "github.com/ipfs/go-graphsync/donotsendfirstblocks" logging "github.com/ipfs/go-log/v2" ipld "github.com/ipld/go-ipld-prime" peer "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/protocol" "golang.org/x/sync/errgroup" "golang.org/x/xerrors" @@ -37,19 +34,16 @@ type graphsyncKey struct { var defaultSupportedExtensions = []graphsync.ExtensionName{ extension.ExtensionDataTransfer1_1, - extension.ExtensionDataTransfer1_0, } var incomingReqExtensions = []graphsync.ExtensionName{ extension.ExtensionIncomingRequest1_1, extension.ExtensionDataTransfer1_1, - extension.ExtensionDataTransfer1_0, } var outgoingBlkExtensions = []graphsync.ExtensionName{ extension.ExtensionOutgoingBlock1_1, extension.ExtensionDataTransfer1_1, - extension.ExtensionDataTransfer1_0, } // Option is an option for setting up the graphsync transport @@ -76,19 +70,12 @@ func RegisterCompletedResponseListener(l func(channelID datatransfer.ChannelID)) } } -type PeerProtocol interface { - // Protocol returns the protocol version of the peer, connecting to - // the peer if necessary - Protocol(context.Context, peer.ID) (protocol.ID, error) -} - // Transport manages graphsync hooks for data transfer, translating from // graphsync hooks to semantic data transfer events type Transport struct { - events datatransfer.EventsHandler - gs graphsync.GraphExchange - peerProtocol PeerProtocol - peerID peer.ID + events datatransfer.EventsHandler + gs graphsync.GraphExchange + peerID peer.ID supportedExtensions []graphsync.ExtensionName unregisterFuncs []graphsync.UnregisterHookFunc @@ -105,10 +92,9 @@ type Transport struct { } // NewTransport makes a new hooks manager with the given hook events interface -func NewTransport(peerID peer.ID, gs graphsync.GraphExchange, pp PeerProtocol, options ...Option) *Transport { +func NewTransport(peerID peer.ID, gs graphsync.GraphExchange, options ...Option) *Transport { t := &Transport{ gs: gs, - peerProtocol: pp, peerID: peerID, supportedExtensions: defaultSupportedExtensions, dtChannels: make(map[datatransfer.ChannelID]*dtChannel), @@ -172,48 +158,7 @@ func (t *Transport) getRestartExtension(ctx context.Context, p peer.ID, channel if channel == nil { return nil, nil } - - // Get the peer's protocol version - protocol, err := t.peerProtocol.Protocol(ctx, p) - if err != nil { - return nil, err - } - - switch protocol { - case datatransfer.ProtocolDataTransfer1_0: - // Doesn't support restart extensions - return nil, nil - case datatransfer.ProtocolDataTransfer1_1: - // Supports do-not-send-cids extension - return getDoNotSendCidsExtension(channel) - default: // Versions higher than 1.1 - // Supports do-not-send-first-blocks extension - return getDoNotSendFirstBlocksExtension(channel) - } -} - -// Send a list of CIDs that have already been received, so that the peer -// doesn't send those blocks again -func getDoNotSendCidsExtension(channel datatransfer.ChannelState) ([]graphsync.ExtensionData, error) { - doNotSendCids := channel.ReceivedCids() - if len(doNotSendCids) == 0 { - return nil, nil - } - - // Make sure the CIDs are unique - set := cid.NewSet() - for _, c := range doNotSendCids { - set.Add(c) - } - bz, err := cidset.EncodeCidSet(set) - if err != nil { - return nil, xerrors.Errorf("failed to encode cid set: %w", err) - } - doNotSendExt := graphsync.ExtensionData{ - Name: graphsync.ExtensionDoNotSendCIDs, - Data: bz, - } - return []graphsync.ExtensionData{doNotSendExt}, nil + return getDoNotSendFirstBlocksExtension(channel) } // Skip the first N blocks because they were already received @@ -1065,7 +1010,7 @@ func (c *dtChannel) open( // Open a new graphsync request msg := fmt.Sprintf("Opening graphsync request to %s for root %s", dataSender, root) if channel != nil { - msg += fmt.Sprintf(" with %d CIDs already received", channel.ReceivedCidsLen()) + msg += fmt.Sprintf(" with %d Blocks already received", channel.ReceivedCidsTotal()) } log.Info(msg) responseChan, errChan := c.gs.Request(ctx, dataSender, root, stor, exts...) diff --git a/transport/graphsync/graphsync_test.go b/transport/graphsync/graphsync_test.go index 16dbb2df..a907875b 100644 --- a/transport/graphsync/graphsync_test.go +++ b/transport/graphsync/graphsync_test.go @@ -11,7 +11,6 @@ import ( "github.com/ipfs/go-cid" "github.com/ipfs/go-graphsync" - "github.com/ipfs/go-graphsync/cidset" "github.com/ipfs/go-graphsync/donotsendfirstblocks" "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" @@ -710,38 +709,6 @@ func TestManager(t *testing.T) { require.True(t, events.OnReceiveDataErrorCalled) }, }, - "open channel adds cids to the DoNotSendCids extension for v1.1 protocol": { - protocol: datatransfer.ProtocolDataTransfer1_1, - action: func(gsData *harness) { - cids := testutil.GenerateCids(2) - channel := &mockChannelState{receivedCids: cids} - stor, _ := gsData.outgoing.Selector() - - go gsData.outgoingRequestHook() - _ = gsData.transport.OpenChannel( - gsData.ctx, - gsData.other, - datatransfer.ChannelID{ID: gsData.transferID, Responder: gsData.other, Initiator: gsData.self}, - cidlink.Link{Cid: gsData.outgoing.BaseCid()}, - stor, - channel, - gsData.outgoing) - }, - check: func(t *testing.T, events *fakeEvents, gsData *harness) { - requestReceived := gsData.fgs.AssertRequestReceived(gsData.ctx, t) - - ext := requestReceived.Extensions - require.Len(t, ext, 3) - doNotSend := ext[2] - - name := doNotSend.Name - require.Equal(t, graphsync.ExtensionDoNotSendCIDs, name) - data := doNotSend.Data - cs, err := cidset.DecodeCidSet(data) - require.NoError(t, err) - require.Equal(t, cs.Len(), 2) - }, - }, "open channel sends missing Cids": { action: func(gsData *harness) { stor, _ := gsData.outgoing.Selector() @@ -796,8 +763,8 @@ func TestManager(t *testing.T) { requestReceived := gsData.fgs.AssertRequestReceived(gsData.ctx, t) ext := requestReceived.Extensions - require.Len(t, ext, 3) - doNotSend := ext[2] + require.Len(t, ext, 2) + doNotSend := ext[1] name := doNotSend.Name require.Equal(t, graphsync.ExtensionsDoNotSendFirstBlocks, name) @@ -1124,13 +1091,7 @@ func TestManager(t *testing.T) { fgs := testutil.NewFakeGraphSync() outgoing := testutil.NewDTRequest(t, transferID) incoming := testutil.NewDTResponse(t, transferID) - pp := testutil.NewMockPeerProtocol() - proto := datatransfer.ProtocolDataTransfer1_2 - if data.protocol != "" { - proto = data.protocol - } - pp.SetProtocol(peers[1], proto) - transport := NewTransport(peers[0], fgs, pp) + transport := NewTransport(peers[0], fgs) gsData := &harness{ ctx: ctx, outgoing: outgoing, diff --git a/types.go b/types.go index 983d8435..c3fab186 100644 --- a/types.go +++ b/types.go @@ -129,12 +129,6 @@ type ChannelState interface { // LastVoucherResult returns the last voucher result sent on the channel LastVoucherResult() VoucherResult - // ReceivedCids returns the cids received so far on the channel - ReceivedCids() []cid.Cid - - // ReceivedCidsLen returns the number of unique cids received so far on the channel - ReceivedCidsLen() int - // ReceivedCidsTotal returns the number of (non-unique) cids received so far // on the channel - note that a block can exist in more than one place in the DAG ReceivedCidsTotal() int64