Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions channelmonitor/channelmonitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ type Config struct {
// Max time to wait for the responder to send a Complete message once all
// data has been sent
CompleteTimeout time.Duration
// Called when a restart completes successfully
OnRestartComplete func(id datatransfer.ChannelID)
}

func NewMonitor(mgr monitorAPI, cfg *Config) *Monitor {
Expand Down Expand Up @@ -382,6 +384,9 @@ func (mc *monitoredChannel) restartChannel() {

if !restartAgain {
// No restart queued, we're done
if mc.cfg.OnRestartComplete != nil {
mc.cfg.OnRestartComplete(mc.chid)
}
return
}

Expand Down
15 changes: 9 additions & 6 deletions impl/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ func (m *manager) restartRequest(chid datatransfer.ChannelID,
return nil, err
}

voucher, result, err := m.validateVoucher(initiator, incoming, incoming.IsPull(), incoming.BaseCid(), stor)
voucher, result, err := m.validateVoucher(true, initiator, incoming, incoming.IsPull(), incoming.BaseCid(), stor)
if err != nil && err != datatransfer.ErrPause {
return result, xerrors.Errorf("failed to validate voucher: %w", err)
}
Expand Down Expand Up @@ -361,7 +361,7 @@ func (m *manager) acceptRequest(
return nil, err
}

voucher, result, err := m.validateVoucher(initiator, incoming, incoming.IsPull(), incoming.BaseCid(), stor)
voucher, result, err := m.validateVoucher(false, initiator, incoming, incoming.IsPull(), incoming.BaseCid(), stor)
if err != nil && err != datatransfer.ErrPause {
return result, err
}
Expand Down Expand Up @@ -410,16 +410,19 @@ func (m *manager) acceptRequest(
// * reading voucher fails
// * deserialization of selector fails
// * validation fails
func (m *manager) validateVoucher(sender peer.ID,
func (m *manager) validateVoucher(
isRestart bool,
sender peer.ID,
incoming datatransfer.Request,
isPull bool,
baseCid cid.Cid,
stor ipld.Node) (datatransfer.Voucher, datatransfer.VoucherResult, error) {
stor ipld.Node,
) (datatransfer.Voucher, datatransfer.VoucherResult, error) {
vouch, err := m.decodeVoucher(incoming, m.validatedTypes)
if err != nil {
return nil, nil, err
}
var validatorFunc func(peer.ID, datatransfer.Voucher, cid.Cid, ipld.Node) (datatransfer.VoucherResult, error)
var validatorFunc func(bool, peer.ID, datatransfer.Voucher, cid.Cid, ipld.Node) (datatransfer.VoucherResult, error)
processor, _ := m.validatedTypes.Processor(vouch.Type())
validator := processor.(datatransfer.RequestValidator)
if isPull {
Expand All @@ -428,7 +431,7 @@ func (m *manager) validateVoucher(sender peer.ID,
validatorFunc = validator.ValidatePush
}

result, err := validatorFunc(sender, vouch, baseCid, stor)
result, err := validatorFunc(isRestart, sender, vouch, baseCid, stor)
return vouch, result, err
}

Expand Down
1 change: 1 addition & 0 deletions impl/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -725,6 +725,7 @@ func TestAutoRestart(t *testing.T) {
// Set up
restartConf := ChannelRestartConfig(channelmonitor.Config{
AcceptTimeout: 100 * time.Millisecond,
RestartDebounce: 500 * time.Millisecond,
RestartBackoff: 500 * time.Millisecond,
MaxConsecutiveRestarts: 5,
RestartAckTimeout: 100 * time.Millisecond,
Expand Down
2 changes: 1 addition & 1 deletion impl/restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func (m *manager) validateRestartVoucher(channel datatransfer.ChannelState, isPu
}

// revalidate the voucher by reconstructing the request that would have led to the creation of this channel
if _, _, err := m.validateVoucher(channel.OtherPeer(), req, isPull, channel.BaseCID(), channel.Selector()); err != nil {
if _, _, err := m.validateVoucher(true, channel.OtherPeer(), req, isPull, channel.BaseCID(), channel.Selector()); err != nil {
return err
}

Expand Down
2 changes: 2 additions & 0 deletions manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,14 @@ import (
type RequestValidator interface {
// ValidatePush validates a push request received from the peer that will send data
ValidatePush(
isRestart bool,
sender peer.ID,
voucher Voucher,
baseCid cid.Cid,
selector ipld.Node) (VoucherResult, error)
// ValidatePull validates a pull request received from the peer that will receive data
ValidatePull(
isRestart bool,
receiver peer.ID,
voucher Voucher,
baseCid cid.Cid,
Expand Down
2 changes: 2 additions & 0 deletions testutil/stubbedvalidator.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ func NewStubbedValidator() *StubbedValidator {

// ValidatePush returns a stubbed result for a push validation
func (sv *StubbedValidator) ValidatePush(
isRestart bool,
sender peer.ID,
voucher datatransfer.Voucher,
baseCid cid.Cid,
Expand All @@ -30,6 +31,7 @@ func (sv *StubbedValidator) ValidatePush(

// ValidatePull returns a stubbed result for a pull validation
func (sv *StubbedValidator) ValidatePull(
isRestart bool,
receiver peer.ID,
voucher datatransfer.Voucher,
baseCid cid.Cid,
Expand Down
36 changes: 32 additions & 4 deletions transport/graphsync/graphsync.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ type Transport struct {
contextCancelMap map[datatransfer.ChannelID]cancelRequest
pending map[datatransfer.ChannelID]chan struct{}
requestorCancelledMap map[datatransfer.ChannelID]struct{}
channelXferStarted map[datatransfer.ChannelID]bool
pendingExtensions map[datatransfer.ChannelID][]graphsync.ExtensionData
stores map[datatransfer.ChannelID]struct{}
supportedExtensions []graphsync.ExtensionName
Expand All @@ -98,6 +99,7 @@ func NewTransport(peerID peer.ID, gs graphsync.GraphExchange, options ...Option)
pendingExtensions: make(map[datatransfer.ChannelID][]graphsync.ExtensionData),
channelIDMap: make(map[datatransfer.ChannelID]graphsyncKey),
pending: make(map[datatransfer.ChannelID]chan struct{}),
channelXferStarted: make(map[datatransfer.ChannelID]bool),
stores: make(map[datatransfer.ChannelID]struct{}),
supportedExtensions: defaultSupportedExtensions,
}
Expand Down Expand Up @@ -149,15 +151,22 @@ func (t *Transport) OpenChannel(ctx context.Context,
// Relock now that request has been cancelled
t.dataLock.Lock()
}
// Set up the request listeners

// Keep track of "pending" channels.
// The channel is in the "pending" state when we've made a call to
// Graphsync to open a request, but Graphsync hasn't yet called the
// outgoing request hook.
t.pending[channelID] = make(chan struct{})

// Create a cancellable context for the channel so that the graphsync
// request can be cancelled
internalCtx, internalCancel := context.WithCancel(ctx)
cancelRQ := cancelRequest{
cancel: internalCancel,
completed: make(chan struct{}),
}
t.contextCancelMap[channelID] = cancelRQ

t.dataLock.Unlock()

// If this is a restart request, the client can send a list of CIDs of
Expand Down Expand Up @@ -348,10 +357,10 @@ func (t *Transport) ResumeChannel(ctx context.Context,
defer t.dataLock.Unlock()

if _, ok := t.requestorCancelledMap[chid]; ok {

t.pendingExtensions[chid] = append(t.pendingExtensions[chid], extensions...)
return nil
}
t.channelXferStarted[chid] = true
return t.gs.UnpauseResponse(gsKey.p, gsKey.requestID, extensions...)
}

Expand All @@ -375,10 +384,11 @@ func (t *Transport) CloseChannel(ctx context.Context, chid datatransfer.ChannelI
return nil
}
t.dataLock.Lock()
if _, ok := t.requestorCancelledMap[chid]; ok {
_, ok := t.requestorCancelledMap[chid]
t.dataLock.Unlock()
if ok {
return nil
}
t.dataLock.Unlock()
return t.gs.CancelResponse(gsKey.p, gsKey.requestID)
}

Expand Down Expand Up @@ -606,11 +616,26 @@ func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hook
return
}

// Check if the callback indicated that the channel should be paused
// immediately
paused := false
if err == datatransfer.ErrPause {
paused = true
hookActions.PauseResponse()
}

t.dataLock.Lock()

// If this is a restart request, and the data transfer still hasn't got
// out of the paused state (eg because we're still unsealing), start this
// graphsync response in the paused state.
hasXferStarted, isRestart := t.channelXferStarted[chid]
if isRestart && !hasXferStarted && !paused {
paused = true
hookActions.PauseResponse()
}
t.channelXferStarted[chid] = !paused

gsKey := graphsyncKey{request.ID(), p}
if _, ok := t.requestorCancelledMap[chid]; ok {
delete(t.requestorCancelledMap, chid)
Expand All @@ -626,7 +651,9 @@ func (t *Transport) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hook
if ok {
hookActions.UsePersistenceOption("data-transfer-" + chid.String())
}

t.dataLock.Unlock()

hookActions.ValidateRequest()
}

Expand Down Expand Up @@ -695,6 +722,7 @@ func (t *Transport) cleanupChannel(chid datatransfer.ChannelID, gsKey graphsyncK
delete(t.graphsyncRequestMap, gsKey)
delete(t.pendingExtensions, chid)
delete(t.requestorCancelledMap, chid)
delete(t.channelXferStarted, chid)
_, ok := t.stores[chid]
if ok {
opt := "data-transfer-" + chid.String()
Expand Down