From 970a61e796c75a856128dcf5b6cc8c967e505507 Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Wed, 27 Oct 2021 11:50:08 +0400 Subject: [PATCH 1/5] interfaces and types --- storagemarket/datastore/interface.go | 7 ++ storagemarket/datatransfer/interface.go | 14 +++ storagemarket/fundmanager/interface.go | 6 + storagemarket/lotusnode/node.go | 107 ++++++++++++++++++ storagemarket/storagemanager/interface.go | 8 ++ storagemarket/types/deal_state.go | 46 ++++++++ .../types/dealcheckpoints/checkpoints.go | 12 ++ storagemarket/types/types.go | 59 ++++++++++ 8 files changed, 259 insertions(+) create mode 100644 storagemarket/datastore/interface.go create mode 100644 storagemarket/datatransfer/interface.go create mode 100644 storagemarket/fundmanager/interface.go create mode 100644 storagemarket/lotusnode/node.go create mode 100644 storagemarket/storagemanager/interface.go create mode 100644 storagemarket/types/deal_state.go create mode 100644 storagemarket/types/dealcheckpoints/checkpoints.go create mode 100644 storagemarket/types/types.go diff --git a/storagemarket/datastore/interface.go b/storagemarket/datastore/interface.go new file mode 100644 index 000000000..4a306ed29 --- /dev/null +++ b/storagemarket/datastore/interface.go @@ -0,0 +1,7 @@ +package datastore + +import "github.com/filecoin-project/boost/storagemarket/types" + +type API interface { + CreateOrUpdateDeal(newState *types.ProviderDealState) error +} diff --git a/storagemarket/datatransfer/interface.go b/storagemarket/datatransfer/interface.go new file mode 100644 index 000000000..6fde189dd --- /dev/null +++ b/storagemarket/datatransfer/interface.go @@ -0,0 +1,14 @@ +package datatransfer + +import ( + "context" + "net/url" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/libp2p/go-libp2p-core/event" +) + +type Transport interface { + Execute(ctx context.Context, URL *url.URL, OutputFilePath string, ExpectedDealSize abi.PaddedPieceSize) (event.Subscription, error) +} diff --git a/storagemarket/fundmanager/interface.go b/storagemarket/fundmanager/interface.go new file mode 100644 index 000000000..8ff24c400 --- /dev/null +++ b/storagemarket/fundmanager/interface.go @@ -0,0 +1,6 @@ +package fundmanager + +type Manager interface { + ReserveFunds() error + ReleaseFunds() error +} diff --git a/storagemarket/lotusnode/node.go b/storagemarket/lotusnode/node.go new file mode 100644 index 000000000..00f847ae8 --- /dev/null +++ b/storagemarket/lotusnode/node.go @@ -0,0 +1,107 @@ +package lotusnode + +import ( + "context" + "io" + + "github.com/filecoin-project/boost/storagemarket/types" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + "github.com/ipfs/go-cid" +) + +// DealSectorPreCommittedCallback is a callback that runs when a sector is pre-committed +// sectorNumber: the number of the sector that the deal is in +// isActive: the deal is already active +type DealSectorPreCommittedCallback func(sectorNumber abi.SectorNumber, isActive bool, err error) + +// DealSectorCommittedCallback is a callback that runs when a sector is committed +type DealSectorCommittedCallback func(err error) + +// DealExpiredCallback is a callback that runs when a deal expires +type DealExpiredCallback func(err error) + +// DealSlashedCallback is a callback that runs when a deal gets slashed +type DealSlashedCallback func(slashEpoch abi.ChainEpoch, err error) + +// PackingResult returns information about how a deal was put into a sector +type PackingResult struct { + SectorNumber abi.SectorNumber + Offset abi.PaddedPieceSize + Size abi.PaddedPieceSize +} + +// PublishDealsWaitResult is the result of a call to wait for publish deals to +// appear on chain +type PublishDealsWaitResult struct { + DealID abi.DealID + FinalCid cid.Cid +} + +// StorageProviderNode are node dependencies for a StorageProvider +type StorageProviderNode interface { + // GetChainHead returns a tipset token for the current chain head + GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) + + // Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. + AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) + + // ReserveFunds reserves the given amount of funds is ensures it is available for the deal + ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) + + // ReleaseFunds releases funds reserved with ReserveFunds + ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error + + // VerifySignature verifies a given set of data was signed properly by a given address's private key + VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, plaintext []byte, tok shared.TipSetToken) (bool, error) + + // WaitForMessage waits until a message appears on chain. If it is already on chain, the callback is called immediately + WaitForMessage(ctx context.Context, mcid cid.Cid, onCompletion func(exitcode.ExitCode, []byte, cid.Cid, error) error) error + + // SignsBytes signs the given data with the given address's private key + SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) + + // DealProviderCollateralBounds returns the min and max collateral a storage provider can issue. + DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) + + // OnDealSectorPreCommitted waits for a deal's sector to be pre-committed + OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid, cb DealSectorPreCommittedCallback) error + + // OnDealSectorCommitted waits for a deal's sector to be sealed and proved, indicating the deal is active + OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal market.DealProposal, publishCid *cid.Cid, cb DealSectorCommittedCallback) error + + // OnDealExpiredOrSlashed registers callbacks to be called when the deal expires or is slashed + OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired DealExpiredCallback, onDealSlashed DealSlashedCallback) error + + // PublishDeals publishes a deal on chain, returns the message cid, but does not wait for message to appear + PublishDeals(ctx context.Context, deal types.ProviderDealState) (cid.Cid, error) + + // WaitForPublishDeals waits for a deal publish message to land on chain. + WaitForPublishDeals(ctx context.Context, mcid cid.Cid, proposal market.DealProposal) (*PublishDealsWaitResult, error) + + // OnDealComplete is called when a deal is complete and on chain, and data has been transferred and is ready to be added to a sector + OnDealComplete(ctx context.Context, deal types.ProviderDealState, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) (*PackingResult, error) + + // GetMinerWorkerAddress returns the worker address associated with a miner + GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken) (address.Address, error) + + // GetDataCap gets the current data cap for addr + GetDataCap(ctx context.Context, addr address.Address, tok shared.TipSetToken) (*verifreg.DataCap, error) + + // GetProofType gets the current seal proof type for the given miner. + GetProofType(ctx context.Context, addr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) + + // GetBalance returns locked/unlocked for a storage participant. Used by both providers and clients. + GetBalance(ctx context.Context, addr address.Address, tok shared.TipSetToken) (Balance, error) +} + +// Balance represents a current balance of funds in the StorageMarketActor. +type Balance struct { + Locked abi.TokenAmount + Available abi.TokenAmount +} diff --git a/storagemarket/storagemanager/interface.go b/storagemarket/storagemanager/interface.go new file mode 100644 index 000000000..dfe8a8c82 --- /dev/null +++ b/storagemarket/storagemanager/interface.go @@ -0,0 +1,8 @@ +package storagemanager + +import "github.com/google/uuid" + +type Manager interface { + ReserveSpace(dealUuid uuid.UUID, spaceInBytes uint64) (bool, error) + ReleaseSpace(dealUuid uuid.UUID) (bool, error) +} diff --git a/storagemarket/types/deal_state.go b/storagemarket/types/deal_state.go new file mode 100644 index 000000000..58cdbcd12 --- /dev/null +++ b/storagemarket/types/deal_state.go @@ -0,0 +1,46 @@ +package types + +import ( + "github.com/filecoin-project/boost/storagemarket/types/dealcheckpoints" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + + "github.com/google/uuid" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" +) + +// ProviderDealState is the local state tracked for a deal by the StorageProvider. +type ProviderDealState struct { + // DealUuid is an unique uuid generated by client for the deal. + DealUuid uuid.UUID + // ClientDealProposal is the deal proposal sent by the client. + ClientDealProposal market.ClientDealProposal + + // SelfPeerID is the Storage Provider's libp2p Peer ID. + SelfPeerID peer.ID + // ClientPeerID is the Clients libp2p Peer ID. + ClientPeerID peer.ID + + // DealDataRoot is the root of the IPLD DAG that the client wants to store. + DealDataRoot cid.Cid + + // data-transfer + // InboundCARPath is the file-path where the storage provider will persist the CAR file sent by the client. + InboundCARPath string + // TransferURL is the URL sent by the client where the Storage Provider can fetch the CAR file from. + TransferURL string + + // Chain Vars + DealID abi.DealID + AddFundsCid cid.Cid + PublishCid cid.Cid + + // sector packing info + SectorID abi.SectorNumber + Offset abi.PaddedPieceSize + Length abi.PaddedPieceSize + + // deal checkpoint in DB. + Checkpoint dealcheckpoints.Checkpoint +} diff --git a/storagemarket/types/dealcheckpoints/checkpoints.go b/storagemarket/types/dealcheckpoints/checkpoints.go new file mode 100644 index 000000000..92dcd1a7f --- /dev/null +++ b/storagemarket/types/dealcheckpoints/checkpoints.go @@ -0,0 +1,12 @@ +package dealcheckpoints + +type Checkpoint int + +const ( + New Checkpoint = iota + Transferred + FundsReserved + Published + PublishConfirmed + AddedPiece +) diff --git a/storagemarket/types/types.go b/storagemarket/types/types.go new file mode 100644 index 000000000..dec6dcaf0 --- /dev/null +++ b/storagemarket/types/types.go @@ -0,0 +1,59 @@ +package types + +import ( + "time" + + "github.com/google/uuid" + "github.com/libp2p/go-libp2p-core/peer" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin/market" +) + +// StorageAsk defines the parameters by which a miner will choose to accept or +// reject a deal. Note: making a storage deal proposal which matches the miner's +// ask is a precondition, but not sufficient to ensure the deal is accepted (the +// storage provider may run its own decision logic). +type StorageAsk struct { + // Price per GiB / Epoch + Price abi.TokenAmount + VerifiedPrice abi.TokenAmount + + MinPieceSize abi.PaddedPieceSize + MaxPieceSize abi.PaddedPieceSize + Miner address.Address +} + +// ClientDealParams are the deal params sent by the client +type ClientDealParams struct { + DealUuid uuid.UUID + MinerPeerID peer.ID + ClientPeerID peer.ID + ClientDealProposal market.ClientDealProposal + + DealDataRoot cid.Cid + TransferURL string +} + +// ProviderDealRejectionInfo is the information sent by the Storage Provider to the Client when it rejects a valid deal. +type ProviderDealRejectionInfo struct { + Reason string + Backoff time.Duration +} + +type ProviderDealEvent struct { + DealUuid uuid.UUID + // ... + // +} + +type DataTransferEvent struct { + DealUuid uuid.UUID + // ... + // + // TransferEvent (Started, Progress, Finished) ? + PercentComplete int +} From b49a945eafb717306d10f76a97f9dcf8774dbfd6 Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Wed, 27 Oct 2021 13:14:14 +0400 Subject: [PATCH 2/5] feat/deal-execution (#12) --- go.mod | 7 + storagemarket/deal_acceptance.go | 165 +++++++++++++++ storagemarket/deal_execution.go | 344 +++++++++++++++++++++++++++++++ storagemarket/deal_handler.go | 36 ++++ storagemarket/provider.go | 302 +++++++++++++++++++++++++++ 5 files changed, 854 insertions(+) create mode 100644 storagemarket/deal_acceptance.go create mode 100644 storagemarket/deal_execution.go create mode 100644 storagemarket/deal_handler.go create mode 100644 storagemarket/provider.go diff --git a/go.mod b/go.mod index ba87ac783..dac16ca10 100644 --- a/go.mod +++ b/go.mod @@ -11,9 +11,14 @@ require ( github.com/filecoin-project/dagstore v0.4.3 github.com/filecoin-project/go-address v0.0.5 github.com/filecoin-project/go-bitfield v0.2.4 + github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 + github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 github.com/filecoin-project/go-data-transfer v1.11.1 + github.com/filecoin-project/go-fil-commcid v0.1.0 + github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 github.com/filecoin-project/go-fil-markets v1.13.1 github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec + github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1 github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 github.com/filecoin-project/go-statemachine v1.0.1 github.com/filecoin-project/go-statestore v0.1.1 @@ -41,8 +46,10 @@ require ( github.com/ipfs/go-log/v2 v2.3.0 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 + github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 github.com/ipld/go-ipld-selector-text-lite v0.0.0 github.com/kelseyhightower/envconfig v1.4.0 + github.com/libp2p/go-eventbus v0.2.1 github.com/libp2p/go-libp2p v0.15.0 github.com/libp2p/go-libp2p-connmgr v0.2.4 github.com/libp2p/go-libp2p-core v0.9.0 diff --git a/storagemarket/deal_acceptance.go b/storagemarket/deal_acceptance.go new file mode 100644 index 000000000..4fd6c11a6 --- /dev/null +++ b/storagemarket/deal_acceptance.go @@ -0,0 +1,165 @@ +package storagemarket + +import ( + "errors" + "fmt" + + "github.com/filecoin-project/boost/storagemarket/types" + + "github.com/filecoin-project/go-fil-markets/shared" + + cborutil "github.com/filecoin-project/go-cbor-util" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + "github.com/filecoin-project/specs-actors/actors/builtin/miner" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" +) + +const DealMaxLabelSize = 256 + +// ValidateDealProposal validates a proposed deal against the provider criteria +func (p *provider) validateDealProposal(deal types.ProviderDealState) error { + tok, curEpoch, err := p.lotusNode.GetChainHead(p.ctx) + if err != nil { + return fmt.Errorf("node error getting most recent state id: %w", err) + } + + // verify client signature + if err := p.validateSignature(tok, deal); err != nil { + return fmt.Errorf("validateSignature failed: %w", err) + } + + // validate deal proposal + proposal := deal.ClientDealProposal.Proposal + if proposal.Provider != p.Address { + return fmt.Errorf("incorrect provider for deal") + } + + if len(proposal.Label) > DealMaxLabelSize { + return fmt.Errorf("deal label can be at most %d bytes, is %d", DealMaxLabelSize, len(proposal.Label)) + } + + if err := proposal.PieceSize.Validate(); err != nil { + return fmt.Errorf("proposal piece size is invalid: %w", err) + } + + if !proposal.PieceCID.Defined() { + return fmt.Errorf("proposal PieceCID undefined") + } + + if proposal.PieceCID.Prefix() != market.PieceCIDPrefix { + return fmt.Errorf("proposal PieceCID had wrong prefix") + } + + if proposal.EndEpoch <= proposal.StartEpoch { + return fmt.Errorf("proposal end before proposal start") + } + + if curEpoch > proposal.StartEpoch { + return fmt.Errorf("deal start epoch has already elapsed") + } + + // Check that the delta between the start and end epochs (the deal + // duration) is within acceptable bounds + minDuration, maxDuration := market2.DealDurationBounds(proposal.PieceSize) + if proposal.Duration() < minDuration || proposal.Duration() > maxDuration { + return fmt.Errorf("deal duration out of bounds (min, max, provided): %d, %d, %d", minDuration, maxDuration, proposal.Duration()) + } + + // Check that the proposed end epoch isn't too far beyond the current epoch + maxEndEpoch := curEpoch + miner.MaxSectorExpirationExtension + if proposal.EndEpoch > maxEndEpoch { + return fmt.Errorf("invalid deal end epoch %d: cannot be more than %d past current epoch %d", proposal.EndEpoch, miner.MaxSectorExpirationExtension, curEpoch) + } + + pcMin, pcMax, err := p.lotusNode.DealProviderCollateralBounds(p.ctx, proposal.PieceSize, proposal.VerifiedDeal) + if err != nil { + return fmt.Errorf("node error getting collateral bounds: %w", err) + } + + if proposal.ProviderCollateral.LessThan(pcMin) { + return fmt.Errorf("proposed provider collateral below minimum: %s < %s", proposal.ProviderCollateral, pcMin) + } + + if proposal.ProviderCollateral.GreaterThan(pcMax) { + return fmt.Errorf("proposed provider collateral above maximum: %s > %s", proposal.ProviderCollateral, pcMax) + } + + if err := p.validateAsk(deal); err != nil { + return fmt.Errorf("validateAsk failed: %w", err) + } + + // check market funds + clientMarketBalance, err := p.lotusNode.GetBalance(p.ctx, proposal.Client, tok) + if err != nil { + return fmt.Errorf("node error getting client market balance failed: %w", err) + } + + // This doesn't guarantee that the client won't withdraw / lock those funds + // but it's a decent first filter + if clientMarketBalance.Available.LessThan(proposal.ClientBalanceRequirement()) { + return fmt.Errorf("clientMarketBalance.Available too small: %d < %d", clientMarketBalance.Available, proposal.ClientBalanceRequirement()) + } + + // Verified deal checks + if proposal.VerifiedDeal { + dataCap, err := p.lotusNode.GetDataCap(p.ctx, proposal.Client, tok) + if err != nil { + return fmt.Errorf("node error fetching verified data cap: %w", err) + } + + if dataCap == nil { + return errors.New("node error fetching verified data cap: data cap missing -- client not verified") + } + + pieceSize := big.NewIntUnsigned(uint64(proposal.PieceSize)) + if dataCap.LessThan(pieceSize) { + return errors.New("verified deal DataCap too small for proposed piece size") + } + } + + return nil +} + +func (p *provider) validateAsk(deal types.ProviderDealState) error { + ask := p.GetAsk() + askPrice := ask.Price + if deal.ClientDealProposal.Proposal.VerifiedDeal { + askPrice = ask.VerifiedPrice + } + + proposal := deal.ClientDealProposal.Proposal + minPrice := big.Div(big.Mul(askPrice, abi.NewTokenAmount(int64(proposal.PieceSize))), abi.NewTokenAmount(1<<30)) + if proposal.StoragePricePerEpoch.LessThan(minPrice) { + return fmt.Errorf("storage price per epoch less than asking price: %s < %s", proposal.StoragePricePerEpoch, minPrice) + } + + if proposal.PieceSize < ask.MinPieceSize { + return fmt.Errorf("piece size less than minimum required size: %d < %d", proposal.PieceSize, ask.MinPieceSize) + } + + if proposal.PieceSize > ask.MaxPieceSize { + return fmt.Errorf("piece size more than maximum allowed size: %d > %d", proposal.PieceSize, ask.MaxPieceSize) + } + + return nil +} + +func (p *provider) validateSignature(tok shared.TipSetToken, deal types.ProviderDealState) error { + b, err := cborutil.Dump(&deal.ClientDealProposal.Proposal) + if err != nil { + return fmt.Errorf("failed to serialize client deal proposal: %w", err) + } + + verified, err := p.lotusNode.VerifySignature(p.ctx, deal.ClientDealProposal.ClientSignature, deal.ClientDealProposal.Proposal.Client, b, tok) + if err != nil { + return fmt.Errorf("error verifying signature: %w", err) + } + if !verified { + return errors.New("could not verify signature") + } + + return nil +} diff --git a/storagemarket/deal_execution.go b/storagemarket/deal_execution.go new file mode 100644 index 000000000..4a00b55ae --- /dev/null +++ b/storagemarket/deal_execution.go @@ -0,0 +1,344 @@ +package storagemarket + +import ( + "context" + "fmt" + "io" + "net/url" + "os" + "time" + + "github.com/filecoin-project/boost/stores" + + "github.com/libp2p/go-libp2p-core/event" + + "github.com/filecoin-project/go-padreader" + + "github.com/filecoin-project/boost/storagemarket/types/dealcheckpoints" + + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/go-commp-utils/writer" + commcid "github.com/filecoin-project/go-fil-commcid" + commp "github.com/filecoin-project/go-fil-commp-hashhash" + "github.com/ipfs/go-cid" + carv2 "github.com/ipld/go-car/v2" + + "github.com/filecoin-project/boost/storagemarket/types" +) + +func (p *provider) failDeal(ds *types.ProviderDealState, err error) { + p.cleanupDeal(ds) + + select { + case p.failedDealsChan <- failedDealReq{ds, err}: + case <-p.ctx.Done(): + } +} + +func (p *provider) cleanupDeal(ds *types.ProviderDealState) { + _ = os.Remove(ds.InboundCARPath) + // ... + //cleanup resources here +} + +func dealStateToEvent(ds *types.ProviderDealState) types.ProviderDealEvent { + // TODO flush out this function based on UX needs ! + return types.ProviderDealEvent{} +} + +func transferEventToProviderEvent(ds *types.ProviderDealState, evt types.DataTransferEvent) types.ProviderDealEvent { + // TODO flush out based on UX needs + return types.ProviderDealEvent{} +} + +func (p *provider) doDeal(ds *types.ProviderDealState, publisher event.Emitter) { + // publish an event with the current state of the deal + if err := publisher.Emit(dealStateToEvent(ds)); err != nil { + // log + } + + // Transfer Data + if ds.Checkpoint < dealcheckpoints.Transferred { + if err := p.transferAndVerify(ds, publisher); err != nil { + p.failDeal(ds, fmt.Errorf("failed data transfer: %w", err)) + return + } + if err := publisher.Emit(dealStateToEvent(ds)); err != nil { + // log + } + } + + // Reserve funds + if ds.Checkpoint < dealcheckpoints.FundsReserved { + if err := p.reserveFunds(ds); err != nil { + p.failDeal(ds, fmt.Errorf("failed to reserve funds: %w", err)) + return + } + if err := publisher.Emit(dealStateToEvent(ds)); err != nil { + // log + } + } + + // Publish + if ds.Checkpoint <= dealcheckpoints.Published { + if err := p.publishDeal(ds); err != nil { + p.failDeal(ds, fmt.Errorf("failed to publish deal: %w", err)) + return + } + if err := publisher.Emit(dealStateToEvent(ds)); err != nil { + // log + } + } + + // TODO Release Reserved Funds ? + + // AddPiece + if ds.Checkpoint < dealcheckpoints.AddedPiece { + if err := p.addPiece(ds); err != nil { + p.failDeal(ds, fmt.Errorf("failed to add piece: %w", err)) + return + } + + if err := publisher.Emit(dealStateToEvent(ds)); err != nil { + // log + } + } + + // Lie back, sip your cocktail and watch the deal as it goes through the cycle of life and eventually expires. + // The deal will eventually wonder if it was all worth it, only to then realise that if it was able to make life + // easy for even one Web2 user, it really was all worth it, for kindness keeps us warm in an otherwise cold and indifferent cosmos. + // ... + // ... + // Watch deal on chain and change state in DB and emit notifications. +} + +func (p *provider) transferAndVerify(ds *types.ProviderDealState, publisher event.Emitter) error { + // Transfer Data + u, err := url.Parse(ds.TransferURL) + if err != nil { + return fmt.Errorf("failed to parse transfer URL: %w", err) + } + + tctx, cancel := context.WithDeadline(p.ctx, time.Now().Add(p.config.MaxTransferDuration)) + defer cancel() + // TODO Execute SHOULD respect the context here ! + // async call returns the subscription -> dosen't block + // need to ensure that that passing the padded piece size here makes sense to the transport layer which will receieve the raw unpadded bytes. + transferSub, err := p.transport.Execute(tctx, u, ds.InboundCARPath, ds.ClientDealProposal.Proposal.PieceSize) + if err != nil { + return fmt.Errorf("failed data transfer: %w", err) + } + defer transferSub.Close() + select { + // similar to boost notifications, the transport layer too will first push the current state before resuming the transfer + case evt := <-transferSub.Out(): + dtEvent := evt.(types.DataTransferEvent) + if err := publisher.Emit(transferEventToProviderEvent(ds, dtEvent)); err != nil { + // log + } + // if dtEvent.Type == Completed || Cancelled || Error { + // move ahead, fail deal etc. + // } + + case <-tctx.Done(): + return fmt.Errorf("data transfer timed out: %w", tctx.Err()) + } + + // Verify CommP matches + pieceCid, err := p.generatePieceCommitment(ds) + if err != nil { + return fmt.Errorf("failed to generate CommP: %w", err) + } + + clientPieceCid := ds.ClientDealProposal.Proposal.PieceCID + if pieceCid != clientPieceCid { + return fmt.Errorf("commP mismatch, expected=%s, actual=%s", clientPieceCid, pieceCid) + } + + // persist transferred checkpoint + ds.Checkpoint = dealcheckpoints.Transferred + if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { + return fmt.Errorf("failed to persist deal state: %w", err) + } + + // TODO : Emit a notification here + return nil +} + +// ReserveProviderFunds adds funds, as needed to the StorageMarketActor, so the miner has adequate collateral for the deal +func (p *provider) reserveFunds(ds *types.ProviderDealState) error { + tok, _, err := p.lotusNode.GetChainHead(p.ctx) + if err != nil { + return fmt.Errorf("acquiring chain head: %w", err) + } + + waddr, err := p.lotusNode.GetMinerWorkerAddress(p.ctx, ds.ClientDealProposal.Proposal.Provider, tok) + if err != nil { + return fmt.Errorf("looking up miner worker: %w", err) + } + + mcid, err := p.lotusNode.ReserveFunds(p.ctx, waddr, ds.ClientDealProposal.Proposal.Provider, ds.ClientDealProposal.Proposal.ProviderCollateral) + if err != nil { + return fmt.Errorf("reserving funds: %w", err) + } + + // if no message was sent, and there was no error, funds were already available + if mcid == cid.Undef { + ds.Checkpoint = dealcheckpoints.FundsReserved + if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { + return fmt.Errorf("failed to update deal state: %w", err) + } + return nil + } + + if err := p.lotusNode.WaitForMessage(p.ctx, mcid, func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error { + if err != nil { + return fmt.Errorf("AddFunds errored: %w", err) + } + if code != exitcode.Ok { + return fmt.Errorf("AddFunds exit code: %s", code.String()) + } + return nil + }); err != nil { + return fmt.Errorf("failed to reserve funds: %w", err) + } + + ds.AddFundsCid = mcid + ds.Checkpoint = dealcheckpoints.FundsReserved + if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { + return fmt.Errorf("failed to update deal state: %w", err) + } + + return nil +} + +// GeneratePieceCommitment generates the pieceCid for the CARv1 deal payload in +// the CARv2 file that already exists at the given path. +func (p *provider) generatePieceCommitment(ds *types.ProviderDealState) (c cid.Cid, finalErr error) { + rd, err := carv2.OpenReader(ds.InboundCARPath) + if err != nil { + return cid.Undef, fmt.Errorf("failed to get CARv2 reader: %w", err) + } + + defer func() { + if err := rd.Close(); err != nil { + + if finalErr == nil { + c = cid.Undef + finalErr = fmt.Errorf("failed to close CARv2 reader: %w", err) + return + } + } + }() + + // dump the CARv1 payload of the CARv2 file to the Commp Writer and get back the CommP. + w := &writer.Writer{} + written, err := io.Copy(w, rd.DataReader()) + if err != nil { + return cid.Undef, fmt.Errorf("failed to write to CommP writer: %w", err) + } + + if written != int64(rd.Header.DataSize) { + return cid.Undef, fmt.Errorf("number of bytes written to CommP writer %d not equal to the CARv1 payload size %d", written, rd.Header.DataSize) + } + + cidAndSize, err := w.Sum() + if err != nil { + return cid.Undef, fmt.Errorf("failed to get CommP: %w", err) + } + + dealSize := ds.ClientDealProposal.Proposal.PieceSize + if cidAndSize.PieceSize < dealSize { + // need to pad up! + rawPaddedCommp, err := commp.PadCommP( + // we know how long a pieceCid "hash" is, just blindly extract the trailing 32 bytes + cidAndSize.PieceCID.Hash()[len(cidAndSize.PieceCID.Hash())-32:], + uint64(cidAndSize.PieceSize), + uint64(dealSize), + ) + if err != nil { + return cid.Undef, fmt.Errorf("failed to pad data: %w", err) + } + cidAndSize.PieceCID, _ = commcid.DataCommitmentV1ToCID(rawPaddedCommp) + } + + return cidAndSize.PieceCID, err +} + +func (p *provider) publishDeal(ds *types.ProviderDealState) error { + if ds.Checkpoint < dealcheckpoints.Published { + mcid, err := p.lotusNode.PublishDeals(p.ctx, *ds) + if err != nil { + return fmt.Errorf("failed to publish deal: %w", err) + } + + ds.PublishCid = mcid + ds.Checkpoint = dealcheckpoints.Published + if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { + return fmt.Errorf("failed to update deal: %w", err) + } + } + + res, err := p.lotusNode.WaitForPublishDeals(p.ctx, ds.PublishCid, ds.ClientDealProposal.Proposal) + if err != nil { + return fmt.Errorf("wait for publish failed: %w", err) + } + + ds.PublishCid = res.FinalCid + ds.DealID = res.DealID + ds.Checkpoint = dealcheckpoints.PublishConfirmed + if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { + return fmt.Errorf("failed to update deal: %w", err) + } + + // TODO Release funds ? How does that work ? + return nil +} + +// HandoffDeal hands off a published deal for sealing and commitment in a sector +func (p *provider) addPiece(ds *types.ProviderDealState) error { + v2r, err := carv2.OpenReader(ds.InboundCARPath) + if err != nil { + return fmt.Errorf("failed to open CARv2 file: %w", err) + } + + // Hand the deal off to the process that adds it to a sector + paddedReader, err := padreader.NewInflator(v2r.DataReader(), v2r.Header.DataSize, ds.ClientDealProposal.Proposal.PieceSize.Unpadded()) + if err != nil { + return fmt.Errorf("failed to create inflator: %w", err) + } + + packingInfo, packingErr := p.lotusNode.OnDealComplete( + p.ctx, + *ds, + ds.ClientDealProposal.Proposal.PieceSize.Unpadded(), + paddedReader, + ) + + // Close the reader as we're done reading from it. + if err := v2r.Close(); err != nil { + return fmt.Errorf("failed to close CARv2 reader: %w", err) + } + + if packingErr != nil { + return fmt.Errorf("packing piece %s: %w", ds.ClientDealProposal.Proposal.PieceCID, packingErr) + } + + ds.SectorID = packingInfo.SectorNumber + ds.Offset = packingInfo.Offset + ds.Length = packingInfo.Size + ds.Checkpoint = dealcheckpoints.AddedPiece + if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { + return fmt.Errorf("failed to update deal: %w", err) + } + + // Register the deal data as a "shard" with the DAG store. Later it can be + // fetched from the DAG store during retrieval. + if err := stores.RegisterShardSync(p.ctx, p.dagStore, ds.ClientDealProposal.Proposal.PieceCID, ds.InboundCARPath, true); err != nil { + err = fmt.Errorf("failed to activate shard: %w", err) + log.Error(err) + } + + return nil +} diff --git a/storagemarket/deal_handler.go b/storagemarket/deal_handler.go new file mode 100644 index 000000000..91e5d8cdf --- /dev/null +++ b/storagemarket/deal_handler.go @@ -0,0 +1,36 @@ +package storagemarket + +import ( + "sync" + + "github.com/google/uuid" + + "github.com/libp2p/go-libp2p-core/event" +) + +type DealHandler struct { + cancelSync sync.Once + + DealUuid uuid.UUID + // caller should close this when done with the deal + Subscription event.Subscription +} + +func newDealHandler(dealUuid uuid.UUID, sub event.Subscription) *DealHandler { + dh := &DealHandler{ + DealUuid: dealUuid, + Subscription: sub, + } + return dh +} + +// Shutsdown/Cancels the deal. +func (dh *DealHandler) Close() error { + dh.cancelSync.Do(func() { + _ = dh.Subscription.Close() + + // TODO Pass down the cancel to the deal go-routine in Boost + // wait for the deal to get cancelled + }) + return nil +} diff --git a/storagemarket/provider.go b/storagemarket/provider.go new file mode 100644 index 000000000..9f5e414ef --- /dev/null +++ b/storagemarket/provider.go @@ -0,0 +1,302 @@ +package storagemarket + +import ( + "context" + "fmt" + "net/url" + "os" + "sync" + "time" + + "github.com/filecoin-project/boost/filestore" + "github.com/filecoin-project/boost/stores" + + "github.com/libp2p/go-eventbus" + + "github.com/libp2p/go-libp2p-core/event" + + "github.com/filecoin-project/boost/storagemarket/types/dealcheckpoints" + + "github.com/filecoin-project/boost/storagemarket/datatransfer" + "github.com/filecoin-project/boost/storagemarket/lotusnode" + + "github.com/filecoin-project/go-address" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/boost/storagemarket/storagemanager" + + "github.com/filecoin-project/boost/storagemarket/fundmanager" + "github.com/filecoin-project/boost/storagemarket/types" + + "github.com/filecoin-project/boost/storagemarket/datastore" +) + +var log = logging.Logger("provider") + +type Config struct { + MaxTransferDuration time.Duration +} + +type provider struct { + config Config + // Address of the provider on chain. + Address address.Address + + ctx context.Context + cancel context.CancelFunc + closeSync sync.Once + wg sync.WaitGroup + + // filestore for manipulating files on disk. + fs filestore.FileStore + + // event loop + acceptDealsChan chan acceptDealReq + failedDealsChan chan failedDealReq + restartDealsChan chan restartReq + + // Database API + dbApi datastore.API + + // interacts with lotus + lotusNode lotusnode.StorageProviderNode + dagStore stores.DAGStoreWrapper + + fundManager fundmanager.Manager + storageManager storagemanager.Manager + // TODO + // dealAcceptanceFilter + + transport datatransfer.Transport +} + +func NewProvider(dbApi datastore.API, lotusNode lotusnode.StorageProviderNode, fundManager fundmanager.Manager) (*provider, error) { + ctx, cancel := context.WithCancel(context.Background()) + + return &provider{ + ctx: ctx, + cancel: cancel, + }, nil +} + +func (p *provider) GetAsk() *types.StorageAsk { + return nil +} + +func (p *provider) ExecuteDeal(dp *types.ClientDealParams) (dh *DealHandler, pi *types.ProviderDealRejectionInfo, err error) { + if _, err := url.Parse(dp.TransferURL); err != nil { + return nil, nil, fmt.Errorf("transfer url is invalid: %w", err) + } + + ds := types.ProviderDealState{ + DealUuid: dp.DealUuid, + ClientDealProposal: dp.ClientDealProposal, + SelfPeerID: dp.MinerPeerID, + ClientPeerID: dp.ClientPeerID, + DealDataRoot: dp.DealDataRoot, + TransferURL: dp.TransferURL, + } + + // validate the deal proposal + if err := p.validateDealProposal(ds); err != nil { + return nil, &types.ProviderDealRejectionInfo{ + Reason: fmt.Sprintf("failed validation: %s", err), + }, err + } + + // create a temp file where we will hold the deal data. + tmp, err := p.fs.CreateTemp() + if err != nil { + return nil, nil, fmt.Errorf("failed to create temp file: %w, err") + } + if err := tmp.Close(); err != nil { + _ = os.Remove(string(tmp.OsPath())) + return nil, nil, fmt.Errorf("failed to close temp file: %w", err) + } + ds.InboundCARPath = string(tmp.OsPath()) + + // create the pub-sub plumbing for this deal + bus := eventbus.NewBus() + publisher, sub, err := createPubSub(bus) + if err != nil { + _ = os.Remove(ds.InboundCARPath) + return nil, nil, err + } + + defer func() { + if pi != nil || err != nil { + _ = os.Remove(ds.InboundCARPath) + _ = sub.Close() + } + }() + + // send message to event loop to run the deal through the acceptance filter and reserve the required resources + // then wait for a response and return the response to the client. + respChan := make(chan acceptDealResp, 1) + select { + case p.acceptDealsChan <- acceptDealReq{&ds, respChan, publisher}: + case <-p.ctx.Done(): + return nil, nil, p.ctx.Err() + } + + var resp acceptDealResp + select { + case resp = <-respChan: + case <-p.ctx.Done(): + return nil, nil, p.ctx.Err() + } + + // if there was an error, we return no rejection reason as well. + if resp.err != nil { + return nil, nil, fmt.Errorf("failed to accept deal: %w", resp.err) + } + // return rejection reason as provider has rejected a valid deal. + if !resp.accepted { + return nil, resp.ri, nil + } + + dh = newDealHandler(dp.DealUuid, sub) + return dh, nil, nil +} + +func createPubSub(bus event.Bus) (event.Emitter, event.Subscription, error) { + emitter, err := bus.Emitter(&types.ProviderDealEvent{}, eventbus.Stateful) + if err != nil { + return nil, nil, fmt.Errorf("failed to create event emitter: %w", err) + } + sub, err := bus.Subscribe(new(types.ProviderDealEvent), eventbus.BufSize(256)) + if err != nil { + return nil, nil, fmt.Errorf("failed to create subscriber: %w", err) + } + + return emitter, sub, nil +} + +func (p *provider) Start() []*DealHandler { + // restart all existing deals + // execute db query to get all non-terminated deals here + var deals []*types.ProviderDealState + var restartWg sync.WaitGroup + dhs := make([]*DealHandler, 0, len(deals)) + + for i := range deals { + pub, sub, err := createPubSub(eventbus.NewBus()) + if err != nil { + panic(err) + } + + deal := deals[i] + req := restartReq{deal, pub} + + restartWg.Add(1) + go func() { + defer restartWg.Done() + + select { + case p.restartDealsChan <- req: + case <-p.ctx.Done(): + } + }() + + dhs = append(dhs, newDealHandler(deal.DealUuid, sub)) + } + + p.wg.Add(1) + go p.loop() + + // wait for all deals to be restarted before returning so we know new deals will be processed + // after all existing deals have restarted and accounted for their resources. + restartWg.Wait() + + return dhs +} + +func (p *provider) Close() error { + p.closeSync.Do(func() { + p.cancel() + p.wg.Wait() + }) + return nil +} + +type acceptDealReq struct { + st *types.ProviderDealState + rsp chan acceptDealResp + publisher event.Emitter +} + +type acceptDealResp struct { + accepted bool + ri *types.ProviderDealRejectionInfo + err error +} + +type failedDealReq struct { + st *types.ProviderDealState + err error +} + +type restartReq struct { + st *types.ProviderDealState + publisher event.Emitter +} + +// TODO: This is transient -> If it dosen't work out, we will use locks. +// 1:N will move this problem elsewhere. +func (p *provider) loop() { + defer p.wg.Done() + + for { + select { + case restartReq := <-p.restartDealsChan: + // Put ANY RESTART SYNCHRONIZATION LOGIC HERE. + // .... + // + p.wg.Add(1) + go func() { + defer p.wg.Done() + p.doDeal(restartReq.st, restartReq.publisher) + }() + + case dealReq := <-p.acceptDealsChan: + writeDealResp := func(accepted bool, ri *types.ProviderDealRejectionInfo, err error) { + select { + case dealReq.rsp <- acceptDealResp{accepted, ri, err}: + case <-p.ctx.Done(): + return + } + } + + var err error + if err != nil { + go writeDealResp(false, nil, err) + continue + } + + // TODO: Deal filter, storage space manager, fund manager etc . basically synchronization + // send rejection if deal is not accepted by the above filters + var accepted bool + if !accepted { + go writeDealResp(false, &types.ProviderDealRejectionInfo{}, nil) + continue + } + go writeDealResp(true, nil, nil) + + // start executing the deal + dealReq.st.Checkpoint = dealcheckpoints.New + + p.wg.Add(1) + go func() { + defer p.wg.Done() + p.doDeal(dealReq.st, dealReq.publisher) + }() + + case failedDeal := <-p.failedDealsChan: + fmt.Println(failedDeal) + // Release storage space , funds, shared resources etc etc. + + case <-p.ctx.Done(): + return + } + } +} From 9f262a847ce9219c7dcdf98bb6352dea27122ca5 Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Wed, 27 Oct 2021 13:21:47 +0400 Subject: [PATCH 3/5] remove dead code --- storagemarket/fundmanager/interface.go | 6 ------ storagemarket/provider.go | 16 +++++----------- storagemarket/storagemanager/interface.go | 8 -------- 3 files changed, 5 insertions(+), 25 deletions(-) delete mode 100644 storagemarket/fundmanager/interface.go delete mode 100644 storagemarket/storagemanager/interface.go diff --git a/storagemarket/fundmanager/interface.go b/storagemarket/fundmanager/interface.go deleted file mode 100644 index 8ff24c400..000000000 --- a/storagemarket/fundmanager/interface.go +++ /dev/null @@ -1,6 +0,0 @@ -package fundmanager - -type Manager interface { - ReserveFunds() error - ReleaseFunds() error -} diff --git a/storagemarket/provider.go b/storagemarket/provider.go index 9f5e414ef..eeed27a7a 100644 --- a/storagemarket/provider.go +++ b/storagemarket/provider.go @@ -23,9 +23,6 @@ import ( "github.com/filecoin-project/go-address" logging "github.com/ipfs/go-log/v2" - "github.com/filecoin-project/boost/storagemarket/storagemanager" - - "github.com/filecoin-project/boost/storagemarket/fundmanager" "github.com/filecoin-project/boost/storagemarket/types" "github.com/filecoin-project/boost/storagemarket/datastore" @@ -62,20 +59,17 @@ type provider struct { lotusNode lotusnode.StorageProviderNode dagStore stores.DAGStoreWrapper - fundManager fundmanager.Manager - storageManager storagemanager.Manager - // TODO - // dealAcceptanceFilter - transport datatransfer.Transport } -func NewProvider(dbApi datastore.API, lotusNode lotusnode.StorageProviderNode, fundManager fundmanager.Manager) (*provider, error) { +func NewProvider(dbApi datastore.API, lotusNode lotusnode.StorageProviderNode) (*provider, error) { ctx, cancel := context.WithCancel(context.Background()) return &provider{ - ctx: ctx, - cancel: cancel, + ctx: ctx, + cancel: cancel, + lotusNode: lotusNode, + dbApi: dbApi, }, nil } diff --git a/storagemarket/storagemanager/interface.go b/storagemarket/storagemanager/interface.go deleted file mode 100644 index dfe8a8c82..000000000 --- a/storagemarket/storagemanager/interface.go +++ /dev/null @@ -1,8 +0,0 @@ -package storagemanager - -import "github.com/google/uuid" - -type Manager interface { - ReserveSpace(dealUuid uuid.UUID, spaceInBytes uint64) (bool, error) - ReleaseSpace(dealUuid uuid.UUID) (bool, error) -} From de9549a5f1b368393441411ceec3b756a93ef7a8 Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Wed, 27 Oct 2021 13:24:01 +0400 Subject: [PATCH 4/5] remove fund reservation --- storagemarket/deal_execution.go | 62 ------------------- .../types/dealcheckpoints/checkpoints.go | 1 - 2 files changed, 63 deletions(-) diff --git a/storagemarket/deal_execution.go b/storagemarket/deal_execution.go index 4a00b55ae..42ed7000c 100644 --- a/storagemarket/deal_execution.go +++ b/storagemarket/deal_execution.go @@ -16,8 +16,6 @@ import ( "github.com/filecoin-project/boost/storagemarket/types/dealcheckpoints" - "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/go-commp-utils/writer" commcid "github.com/filecoin-project/go-fil-commcid" commp "github.com/filecoin-project/go-fil-commp-hashhash" @@ -69,17 +67,6 @@ func (p *provider) doDeal(ds *types.ProviderDealState, publisher event.Emitter) } } - // Reserve funds - if ds.Checkpoint < dealcheckpoints.FundsReserved { - if err := p.reserveFunds(ds); err != nil { - p.failDeal(ds, fmt.Errorf("failed to reserve funds: %w", err)) - return - } - if err := publisher.Emit(dealStateToEvent(ds)); err != nil { - // log - } - } - // Publish if ds.Checkpoint <= dealcheckpoints.Published { if err := p.publishDeal(ds); err != nil { @@ -91,8 +78,6 @@ func (p *provider) doDeal(ds *types.ProviderDealState, publisher event.Emitter) } } - // TODO Release Reserved Funds ? - // AddPiece if ds.Checkpoint < dealcheckpoints.AddedPiece { if err := p.addPiece(ds); err != nil { @@ -166,53 +151,6 @@ func (p *provider) transferAndVerify(ds *types.ProviderDealState, publisher even return nil } -// ReserveProviderFunds adds funds, as needed to the StorageMarketActor, so the miner has adequate collateral for the deal -func (p *provider) reserveFunds(ds *types.ProviderDealState) error { - tok, _, err := p.lotusNode.GetChainHead(p.ctx) - if err != nil { - return fmt.Errorf("acquiring chain head: %w", err) - } - - waddr, err := p.lotusNode.GetMinerWorkerAddress(p.ctx, ds.ClientDealProposal.Proposal.Provider, tok) - if err != nil { - return fmt.Errorf("looking up miner worker: %w", err) - } - - mcid, err := p.lotusNode.ReserveFunds(p.ctx, waddr, ds.ClientDealProposal.Proposal.Provider, ds.ClientDealProposal.Proposal.ProviderCollateral) - if err != nil { - return fmt.Errorf("reserving funds: %w", err) - } - - // if no message was sent, and there was no error, funds were already available - if mcid == cid.Undef { - ds.Checkpoint = dealcheckpoints.FundsReserved - if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { - return fmt.Errorf("failed to update deal state: %w", err) - } - return nil - } - - if err := p.lotusNode.WaitForMessage(p.ctx, mcid, func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error { - if err != nil { - return fmt.Errorf("AddFunds errored: %w", err) - } - if code != exitcode.Ok { - return fmt.Errorf("AddFunds exit code: %s", code.String()) - } - return nil - }); err != nil { - return fmt.Errorf("failed to reserve funds: %w", err) - } - - ds.AddFundsCid = mcid - ds.Checkpoint = dealcheckpoints.FundsReserved - if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { - return fmt.Errorf("failed to update deal state: %w", err) - } - - return nil -} - // GeneratePieceCommitment generates the pieceCid for the CARv1 deal payload in // the CARv2 file that already exists at the given path. func (p *provider) generatePieceCommitment(ds *types.ProviderDealState) (c cid.Cid, finalErr error) { diff --git a/storagemarket/types/dealcheckpoints/checkpoints.go b/storagemarket/types/dealcheckpoints/checkpoints.go index 92dcd1a7f..7cac4593e 100644 --- a/storagemarket/types/dealcheckpoints/checkpoints.go +++ b/storagemarket/types/dealcheckpoints/checkpoints.go @@ -5,7 +5,6 @@ type Checkpoint int const ( New Checkpoint = iota Transferred - FundsReserved Published PublishConfirmed AddedPiece From 3423c6ebc3bd03d36f0c0037bd85336d40ad10cb Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Thu, 28 Oct 2021 14:51:41 +0200 Subject: [PATCH 5/5] 2/ integrate storagemarket.Provider into boost (#14) * integrate storagemarket.Provider into boost * add SectorBlocks; add DealPublisher; add OnDealSectorCommitted; add ProviderNodeAdapter * boost: add api client for cli interaction (#15) --- .circleci/config.yml | 49 --- .gitignore | 3 +- api/client/client.go | 47 +++ build/params.go | 3 + cli/cmd.go | 9 + cli/util/api.go | 250 ++++++++++++++ cli/util/apiinfo.go | 80 +++++ cmd/boost/dummydeal.go | 35 ++ cmd/boost/main.go | 1 + filestore/_test/a/b/c/d/existing.txt | Bin 64 -> 0 bytes go.mod | 229 ++++++++++++- lib/rpcenc/reader.go | 445 ++++++++++++++++++++++++ node/impl/client/car_helpers.go | 91 ----- node/modules/storageminer.go | 326 +----------------- node/repo/fsrepo.go | 4 +- storage/sectorblocks/sectorblocks.go | 176 ++++++++++ storagemarket/adapter.go | 388 +++++++++++++++++++++ storagemarket/deal_acceptance.go | 21 +- storagemarket/deal_execution.go | 133 ++++---- storagemarket/deal_publisher.go | 448 +++++++++++++++++++++++++ storagemarket/lotusnode/node.go | 107 ------ storagemarket/ondealsectorcommitted.go | 352 +++++++++++++++++++ storagemarket/provider.go | 46 ++- 23 files changed, 2568 insertions(+), 675 deletions(-) create mode 100644 api/client/client.go create mode 100644 build/params.go create mode 100644 cli/cmd.go create mode 100644 cli/util/api.go create mode 100644 cli/util/apiinfo.go create mode 100644 cmd/boost/dummydeal.go delete mode 100644 filestore/_test/a/b/c/d/existing.txt create mode 100644 lib/rpcenc/reader.go delete mode 100644 node/impl/client/car_helpers.go create mode 100644 storage/sectorblocks/sectorblocks.go create mode 100644 storagemarket/adapter.go create mode 100644 storagemarket/deal_publisher.go delete mode 100644 storagemarket/lotusnode/node.go create mode 100644 storagemarket/ondealsectorcommitted.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 066cf74ae..65593bb9a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -61,9 +61,6 @@ jobs: - run: sudo apt-get install npm - run: command: make buildall - - run: - name: check tag and version output match - command: ./scripts/version-check.sh ./boost - store_artifacts: path: boost - run: mkdir linux && mv boost linux/ @@ -72,14 +69,6 @@ jobs: paths: - linux - build-debug: - executor: golang - steps: - - install-deps - - prepare - - run: - command: make debug - test: description: | Run tests with gotestsum. @@ -220,41 +209,6 @@ jobs: - run: command: "! go fmt ./... 2>&1 | read" - gen-check: - executor: golang - steps: - - install-deps - - prepare - - run: make deps - - run: go install golang.org/x/tools/cmd/goimports - - run: go install github.com/hannahhoward/cbor-gen-for - - run: make gen - - run: git --no-pager diff - - run: git --no-pager diff --quiet - - run: make docsgen-cli - - run: git --no-pager diff - - run: git --no-pager diff --quiet - - docs-check: - executor: golang - steps: - - install-deps - - prepare - - run: go install golang.org/x/tools/cmd/goimports - - run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full - - run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner - - run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker - - run: make deps - - run: make docsgen - - run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full - - run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner - - run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker - - run: git --no-pager diff - - run: diff ../pre-openrpc-full ../post-openrpc-full - - run: diff ../pre-openrpc-miner ../post-openrpc-miner - - run: diff ../pre-openrpc-worker ../post-openrpc-worker - - run: git --no-pager diff --quiet - lint: &lint description: | Run golangci-lint. @@ -302,9 +256,6 @@ workflows: concurrency: "16" # expend all docker 2xlarge CPUs. - mod-tidy-check - gofmt - - gen-check - - docs-check - - build-debug - build-all: filters: tags: diff --git a/.gitignore b/.gitignore index df8c87662..6f62a2c34 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,5 @@ build/.* .idea ux/index.js ux/node_modules -ux/package-lock.json \ No newline at end of file +ux/package-lock.json +filestore/_test/a/b/c/d/existing.txt diff --git a/api/client/client.go b/api/client/client.go new file mode 100644 index 000000000..1abbeb3b9 --- /dev/null +++ b/api/client/client.go @@ -0,0 +1,47 @@ +package client + +import ( + "context" + "net/http" + "net/url" + "path" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/boost/api" + "github.com/filecoin-project/boost/lib/rpcenc" +) + +func getPushUrl(addr string) (string, error) { + pushUrl, err := url.Parse(addr) + if err != nil { + return "", err + } + switch pushUrl.Scheme { + case "ws": + pushUrl.Scheme = "http" + case "wss": + pushUrl.Scheme = "https" + } + ///rpc/v0 -> /rpc/streams/v0/push + + pushUrl.Path = path.Join(pushUrl.Path, "../streams/v0/push") + return pushUrl.String(), nil +} + +// NewBoostRPCV0 creates a new http jsonrpc client for miner +func NewBoostRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.Boost, jsonrpc.ClientCloser, error) { + pushUrl, err := getPushUrl(addr) + if err != nil { + return nil, nil, err + } + + var res api.BoostStruct + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", + api.GetInternalStructs(&res), requestHeader, + append([]jsonrpc.Option{ + rpcenc.ReaderParamEncoder(pushUrl), + }, opts...)...) + + return &res, closer, err +} diff --git a/build/params.go b/build/params.go new file mode 100644 index 000000000..5f7180a2a --- /dev/null +++ b/build/params.go @@ -0,0 +1,3 @@ +package build + +var MessageConfidence = uint64(5) diff --git a/cli/cmd.go b/cli/cmd.go new file mode 100644 index 000000000..b2b149233 --- /dev/null +++ b/cli/cmd.go @@ -0,0 +1,9 @@ +package cli + +import ( + cliutil "github.com/filecoin-project/boost/cli/util" +) + +var GetBoostAPI = cliutil.GetBoostAPI + +var DaemonContext = cliutil.DaemonContext diff --git a/cli/util/api.go b/cli/util/api.go new file mode 100644 index 000000000..28943dde4 --- /dev/null +++ b/cli/util/api.go @@ -0,0 +1,250 @@ +package cliutil + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/mitchellh/go-homedir" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-jsonrpc" + + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/boost/api" + "github.com/filecoin-project/boost/api/client" + "github.com/filecoin-project/boost/node/repo" +) + +const ( + metadataTraceContext = "traceContext" +) + +var log = logging.Logger("cli") + +// flagsForAPI returns flags passed on the command line with the listen address +// of the API server (only used by the tests), in the order of precedence they +// should be applied for the requested kind of node. +func flagsForAPI(t repo.RepoType) []string { + switch t { + case repo.Boost: + return []string{"boost-api-url"} + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +func flagsForRepo(t repo.RepoType) []string { + switch t { + case repo.Boost: + return []string{"boost-repo"} + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +// EnvsForAPIInfos returns the environment variables to use in order of precedence +// to determine the API endpoint of the specified node type. +// +// It returns the current variables and deprecated ones separately, so that +// the user can log a warning when deprecated ones are found to be in use. +func EnvsForAPIInfos(t repo.RepoType) (primary string, fallbacks []string, deprecated []string) { + switch t { + case repo.Boost: + return "BOOST_API_INFO", []string{"BOOST_API_INFO"}, nil + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +// GetAPIInfo returns the API endpoint to use for the specified kind of repo. +// +// The order of precedence is as follows: +// +// 1. *-api-url command line flags. +// 2. *_API_INFO environment variables +// 3. deprecated *_API_INFO environment variables +// 4. *-repo command line flags. +func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) { + // Check if there was a flag passed with the listen address of the API + // server (only used by the tests) + apiFlags := flagsForAPI(t) + for _, f := range apiFlags { + if !ctx.IsSet(f) { + continue + } + strma := ctx.String(f) + strma = strings.TrimSpace(strma) + + return APIInfo{Addr: strma}, nil + } + + // + // Note: it is not correct/intuitive to prefer environment variables over + // CLI flags (repo flags below). + // + primaryEnv, fallbacksEnvs, deprecatedEnvs := EnvsForAPIInfos(t) + env, ok := os.LookupEnv(primaryEnv) + if ok { + return ParseApiInfo(env), nil + } + + for _, env := range deprecatedEnvs { + env, ok := os.LookupEnv(env) + if ok { + log.Warnf("Using deprecated env(%s) value, please use env(%s) instead.", env, primaryEnv) + return ParseApiInfo(env), nil + } + } + + repoFlags := flagsForRepo(t) + for _, f := range repoFlags { + // cannot use ctx.IsSet because it ignores default values + path := ctx.String(f) + if path == "" { + continue + } + + p, err := homedir.Expand(path) + if err != nil { + return APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", f, err) + } + + r, err := repo.NewFS(p) + if err != nil { + return APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err) + } + + exists, err := r.Exists() + if err != nil { + return APIInfo{}, xerrors.Errorf("repo.Exists returned an error: %w", err) + } + + if !exists { + return APIInfo{}, errors.New("repo directory does not exist. Make sure your configuration is correct") + } + + ma, err := r.APIEndpoint() + if err != nil { + return APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err) + } + + token, err := r.APIToken() + if err != nil { + log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err) + } + + return APIInfo{ + Addr: ma.String(), + Token: token, + }, nil + } + + for _, env := range fallbacksEnvs { + env, ok := os.LookupEnv(env) + if ok { + return ParseApiInfo(env), nil + } + } + + return APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t) +} + +type GetBoostOptions struct { + PreferHttp bool +} + +type GetBoostOption func(*GetBoostOptions) + +func BoostUseHttp(opts *GetBoostOptions) { + opts.PreferHttp = true +} + +func GetBoostAPI(ctx *cli.Context, opts ...GetBoostOption) (api.Boost, jsonrpc.ClientCloser, error) { + var options GetBoostOptions + for _, opt := range opts { + opt(&options) + } + + if tn, ok := ctx.App.Metadata["testnode-boost"]; ok { + return tn.(api.Boost), func() {}, nil + } + + addr, headers, err := GetRawAPI(ctx, repo.Boost, "v0") + if err != nil { + return nil, nil, err + } + + if options.PreferHttp { + u, err := url.Parse(addr) + if err != nil { + return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err) + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + } + + addr = u.String() + } + + if IsVeryVerbose { + _, _ = fmt.Fprintln(ctx.App.Writer, "using miner API v0 endpoint:", addr) + } + + return client.NewBoostRPCV0(ctx.Context, addr, headers) +} + +func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) { + ainfo, err := GetAPIInfo(ctx, t) + if err != nil { + return "", nil, xerrors.Errorf("could not get API info for %s: %w", t, err) + } + + addr, err := ainfo.DialArgs(version) + if err != nil { + return "", nil, xerrors.Errorf("could not get DialArgs: %w", err) + } + + if IsVeryVerbose { + _, _ = fmt.Fprintf(ctx.App.Writer, "using raw API %s endpoint: %s\n", version, addr) + } + + return addr, ainfo.AuthHeader(), nil +} + +func DaemonContext(cctx *cli.Context) context.Context { + if mtCtx, ok := cctx.App.Metadata[metadataTraceContext]; ok { + return mtCtx.(context.Context) + } + + return context.Background() +} + +// ReqContext returns context for cli execution. Calling it for the first time +// installs SIGTERM handler that will close returned context. +// Not safe for concurrent execution. +func ReqContext(cctx *cli.Context) context.Context { + tCtx := DaemonContext(cctx) + + ctx, done := context.WithCancel(tCtx) + sigChan := make(chan os.Signal, 2) + go func() { + <-sigChan + done() + }() + signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP) + + return ctx +} diff --git a/cli/util/apiinfo.go b/cli/util/apiinfo.go new file mode 100644 index 000000000..a91f37b2b --- /dev/null +++ b/cli/util/apiinfo.go @@ -0,0 +1,80 @@ +package cliutil + +import ( + "net/http" + "net/url" + "regexp" + "strings" + + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" +) + +var ( + infoWithToken = regexp.MustCompile(`^[a-zA-Z0-9\-_]+?\.[a-zA-Z0-9\-_]+?\.([a-zA-Z0-9\-_]+)?:.+$`) +) + +type APIInfo struct { + Addr string + Token []byte +} + +func ParseApiInfo(s string) APIInfo { + var tok []byte + if infoWithToken.Match([]byte(s)) { + sp := strings.SplitN(s, ":", 2) + tok = []byte(sp[0]) + s = sp[1] + } + + return APIInfo{ + Addr: s, + Token: tok, + } +} + +func (a APIInfo) DialArgs(version string) (string, error) { + ma, err := multiaddr.NewMultiaddr(a.Addr) + if err == nil { + _, addr, err := manet.DialArgs(ma) + if err != nil { + return "", err + } + + return "ws://" + addr + "/rpc/" + version, nil + } + + _, err = url.Parse(a.Addr) + if err != nil { + return "", err + } + return a.Addr + "/rpc/" + version, nil +} + +func (a APIInfo) Host() (string, error) { + ma, err := multiaddr.NewMultiaddr(a.Addr) + if err == nil { + _, addr, err := manet.DialArgs(ma) + if err != nil { + return "", err + } + + return addr, nil + } + + spec, err := url.Parse(a.Addr) + if err != nil { + return "", err + } + return spec.Host, nil +} + +func (a APIInfo) AuthHeader() http.Header { + if len(a.Token) != 0 { + headers := http.Header{} + headers.Add("Authorization", "Bearer "+string(a.Token)) + return headers + } + log.Warn("API Token not set and requested, capabilities might be limited.") + return nil +} diff --git a/cmd/boost/dummydeal.go b/cmd/boost/dummydeal.go new file mode 100644 index 000000000..84deda3d1 --- /dev/null +++ b/cmd/boost/dummydeal.go @@ -0,0 +1,35 @@ +package main + +import ( + "github.com/davecgh/go-spew/spew" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + lcli "github.com/filecoin-project/boost/cli" +) + +var dummydealCmd = &cli.Command{ + Name: "dummydeal", + Usage: "Trigger a sample deal", + Before: before, + Action: func(cctx *cli.Context) error { + boostApi, ncloser, err := lcli.GetBoostAPI(cctx) + if err != nil { + return xerrors.Errorf("getting boost api: %w", err) + } + defer ncloser() + + ctx := lcli.DaemonContext(cctx) + + log.Debug("Get boost identity") + + res, err := boostApi.ID(ctx) + if err != nil { + return xerrors.Errorf("couldnt get boost identity: %w", err) + } + + spew.Dump(res) + + return nil + }, +} diff --git a/cmd/boost/main.go b/cmd/boost/main.go index 2f97ad02c..482034f5f 100644 --- a/cmd/boost/main.go +++ b/cmd/boost/main.go @@ -36,6 +36,7 @@ func main() { Commands: []*cli.Command{ runCmd, initCmd, + dummydealCmd, }, } app.Setup() diff --git a/filestore/_test/a/b/c/d/existing.txt b/filestore/_test/a/b/c/d/existing.txt deleted file mode 100644 index 8579fcecf13db2c663f1479dc74d18b9ddc73134..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 64 zcmV-G0Kfm|OE#j|AY1mlX{m40t#_J}Y4*AjSHpoDXCRkx Wy6nab4nhD+(}+=jK{z5}&pOB$cOGs4 diff --git a/go.mod b/go.mod index dac16ca10..b28adb1cd 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,13 @@ module github.com/filecoin-project/boost -go 1.16 +go 1.17 replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi require ( contrib.go.opencensus.io/exporter/prometheus v0.4.0 github.com/BurntSushi/toml v0.3.1 + github.com/davecgh/go-spew v1.1.1 github.com/dgraph-io/badger/v2 v2.2007.2 github.com/filecoin-project/dagstore v0.4.3 github.com/filecoin-project/go-address v0.0.5 @@ -25,6 +26,8 @@ require ( github.com/filecoin-project/lotus v1.13.0 github.com/filecoin-project/specs-actors v0.9.14 github.com/filecoin-project/specs-actors/v2 v2.3.5 + github.com/filecoin-project/specs-actors/v5 v5.0.4 + github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 github.com/golang/mock v1.6.0 github.com/google/uuid v1.3.0 @@ -40,12 +43,11 @@ require ( github.com/ipfs/go-graphsync v0.10.1 github.com/ipfs/go-ipfs-blockstore v1.0.4 github.com/ipfs/go-ipfs-blocksutil v0.0.1 + github.com/ipfs/go-ipfs-ds-help v1.0.0 github.com/ipfs/go-ipfs-exchange-interface v0.0.1 github.com/ipfs/go-ipfs-routing v0.1.0 - github.com/ipfs/go-ipld-cbor v0.0.5 github.com/ipfs/go-log/v2 v2.3.0 github.com/ipfs/go-metrics-interface v0.0.1 - github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 github.com/ipld/go-ipld-selector-text-lite v0.0.0 github.com/kelseyhightower/envconfig v1.4.0 @@ -69,7 +71,6 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.0.3 github.com/multiformats/go-multiaddr v0.4.0 - github.com/multiformats/go-varint v0.0.6 github.com/prometheus/client_golang v1.11.0 github.com/raulk/clock v1.1.0 github.com/raulk/go-watchdog v1.0.1 @@ -83,3 +84,223 @@ require ( golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 ) + +require ( + github.com/DataDog/zstd v1.4.1 // indirect + github.com/GeertJohan/go.incremental v1.0.0 // indirect + github.com/GeertJohan/go.rice v1.0.0 // indirect + github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee // indirect + github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa // indirect + github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect + github.com/Stebalien/go-bitfield v0.0.1 // indirect + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect + github.com/akavel/rsrc v0.8.0 // indirect + github.com/benbjohnson/clock v1.1.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bep/debounce v1.2.0 // indirect + github.com/btcsuite/btcd v0.22.0-beta // indirect + github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cheekybits/genny v1.0.0 // indirect + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect + github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 // indirect + github.com/coreos/go-systemd/v22 v22.1.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect + github.com/daaku/go.zipexe v1.0.0 // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e // indirect + github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect + github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/elastic/go-sysinfo v1.3.0 // indirect + github.com/elastic/go-windows v1.0.0 // indirect + github.com/elastic/gosigar v0.12.0 // indirect + github.com/fatih/color v1.9.0 // indirect + github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f // indirect + github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect + github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 // indirect + github.com/filecoin-project/go-ds-versioning v0.1.0 // indirect + github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect + github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect + github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/go-paramfetch v0.0.2 // indirect + github.com/filecoin-project/specs-actors/v3 v3.1.1 // indirect + github.com/filecoin-project/specs-actors/v4 v4.0.1 // indirect + github.com/filecoin-project/specs-actors/v6 v6.0.0 // indirect + github.com/flynn/noise v1.0.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/gdamore/encoding v1.0.0 // indirect + github.com/gdamore/tcell/v2 v2.2.0 // indirect + github.com/go-kit/log v0.1.0 // indirect + github.com/go-logfmt/logfmt v0.5.0 // indirect + github.com/go-ole/go-ole v1.2.4 // indirect + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/godbus/dbus/v5 v5.0.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/gorilla/websocket v1.4.2 // indirect + github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 // indirect + github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 // indirect + github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/huin/goupnp v1.0.2 // indirect + github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 // indirect + github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/go-cidutil v0.0.2 // indirect + github.com/ipfs/go-filestore v1.0.0 // indirect + github.com/ipfs/go-ipfs-chunker v0.0.5 // indirect + github.com/ipfs/go-ipfs-cmds v0.3.0 // indirect + github.com/ipfs/go-ipfs-exchange-offline v0.0.1 // indirect + github.com/ipfs/go-ipfs-files v0.0.8 // indirect + github.com/ipfs/go-ipfs-http-client v0.0.6 // indirect + github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect + github.com/ipfs/go-ipfs-pq v0.0.2 // indirect + github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/ipfs/go-ipld-cbor v0.0.5 // indirect + github.com/ipfs/go-ipld-format v0.2.0 // indirect + github.com/ipfs/go-ipns v0.1.2 // indirect + github.com/ipfs/go-log v1.0.5 // indirect + github.com/ipfs/go-merkledag v0.3.2 // indirect + github.com/ipfs/go-path v0.0.7 // indirect + github.com/ipfs/go-peertaskqueue v0.2.0 // indirect + github.com/ipfs/go-unixfs v0.2.6 // indirect + github.com/ipfs/go-verifcid v0.0.1 // indirect + github.com/ipfs/interface-go-ipfs-core v0.4.0 // indirect + github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 // indirect + github.com/ipld/go-codec-dagpb v1.3.0 // indirect + github.com/ipld/go-ipld-prime v0.12.3 // indirect + github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/jessevdk/go-flags v1.4.0 // indirect + github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/klauspost/compress v1.11.7 // indirect + github.com/klauspost/cpuid/v2 v2.0.9 // indirect + github.com/koron/go-ssdp v0.0.2 // indirect + github.com/libp2p/go-addr-util v0.1.0 // indirect + github.com/libp2p/go-buffer-pool v0.0.2 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-conn-security-multistream v0.2.1 // indirect + github.com/libp2p/go-flow-metrics v0.0.3 // indirect + github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 // indirect + github.com/libp2p/go-libp2p-autonat v0.4.2 // indirect + github.com/libp2p/go-libp2p-blankhost v0.2.0 // indirect + github.com/libp2p/go-libp2p-circuit v0.4.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.4.7 // indirect + github.com/libp2p/go-libp2p-nat v0.0.6 // indirect + github.com/libp2p/go-libp2p-netutil v0.1.0 // indirect + github.com/libp2p/go-libp2p-pnet v0.2.0 // indirect + github.com/libp2p/go-libp2p-testing v0.4.2 // indirect + github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 // indirect + github.com/libp2p/go-mplex v0.3.0 // indirect + github.com/libp2p/go-msgio v0.0.6 // indirect + github.com/libp2p/go-nat v0.0.5 // indirect + github.com/libp2p/go-netroute v0.1.6 // indirect + github.com/libp2p/go-openssl v0.0.7 // indirect + github.com/libp2p/go-reuseport v0.0.2 // indirect + github.com/libp2p/go-reuseport-transport v0.0.5 // indirect + github.com/libp2p/go-sockaddr v0.1.1 // indirect + github.com/libp2p/go-stream-muxer-multistream v0.3.0 // indirect + github.com/libp2p/go-tcp-transport v0.2.8 // indirect + github.com/libp2p/go-ws-transport v0.5.0 // indirect + github.com/libp2p/go-yamux/v2 v2.2.0 // indirect + github.com/lucas-clemente/quic-go v0.21.2 // indirect + github.com/lucasb-eyer/go-colorful v1.0.3 // indirect + github.com/marten-seemann/qtls-go1-15 v0.1.5 // indirect + github.com/marten-seemann/qtls-go1-16 v0.1.4 // indirect + github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-colorable v0.1.8 // indirect + github.com/mattn/go-isatty v0.0.13 // indirect + github.com/mattn/go-runewidth v0.0.10 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/miekg/dns v1.1.43 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base36 v0.1.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.0.3 // indirect + github.com/multiformats/go-multicodec v0.3.0 // indirect + github.com/multiformats/go-multihash v0.0.15 // indirect + github.com/multiformats/go-multistream v0.2.2 // indirect + github.com/multiformats/go-varint v0.0.6 // indirect + github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo v1.16.4 // indirect + github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.30.0 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/statsd_exporter v0.21.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect + github.com/rivo/uniseg v0.1.0 // indirect + github.com/rs/cors v1.7.0 // indirect + github.com/russross/blackfriday/v2 v2.0.1 // indirect + github.com/shirou/gopsutil v2.18.12+incompatible // indirect + github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/stretchr/objx v0.1.1 // indirect + github.com/tj/go-spin v1.1.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.0.1 // indirect + github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba // indirect + github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect + github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8 // indirect + github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 // indirect + github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 // indirect + github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect + github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb // indirect + github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 // indirect + github.com/zondax/hid v0.9.0 // indirect + github.com/zondax/ledger-go v0.12.1 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/dig v1.10.0 // indirect + go.uber.org/zap v1.19.0 // indirect + go4.org v0.0.0-20200411211856-f5505b9728dd // indirect + golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e // indirect + golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013 // indirect + golang.org/x/mod v0.4.2 // indirect + golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect + golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 // indirect + golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/tools v0.1.5 // indirect + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 // indirect + google.golang.org/grpc v1.40.0 // indirect + google.golang.org/protobuf v1.27.1 // indirect + gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect + modernc.org/cc v1.0.0 // indirect + modernc.org/golex v1.0.1 // indirect + modernc.org/mathutil v1.1.1 // indirect + modernc.org/strutil v1.1.0 // indirect + modernc.org/xc v1.0.0 // indirect +) diff --git a/lib/rpcenc/reader.go b/lib/rpcenc/reader.go new file mode 100644 index 000000000..6693dc83d --- /dev/null +++ b/lib/rpcenc/reader.go @@ -0,0 +1,445 @@ +package rpcenc + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "sync" + "time" + + "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-state-types/abi" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" +) + +var log = logging.Logger("rpcenc") + +var Timeout = 30 * time.Second + +type StreamType string + +const ( + Null StreamType = "null" + PushStream StreamType = "push" + // TODO: Data transfer handoff to workers? +) + +type ReaderStream struct { + Type StreamType + Info string +} + +var client = func() *http.Client { + c := *http.DefaultClient + c.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + return &c +}() + +/* + + Example rpc function: + Push(context.Context, io.Reader) error + + Request flow: + 1. Client invokes a method with an io.Reader param + 2. go-jsonrpc invokes `ReaderParamEncoder` for the client-provided io.Reader + 3. `ReaderParamEncoder` transforms the reader into a `ReaderStream` which can + be serialized as JSON, and sent as jsonrpc request parameter + 3.1. If the reader is of type `*sealing.NullReader`, the resulting object + is `ReaderStream{ Type: "null", Info: "[base 10 number of bytes]" }` + 3.2. If the reader is of type `*RpcReader`, and it wasn't read from, we + notify that RpcReader to go a different push endpoint, and return + a `ReaderStream` object like in 3.4. + 3.3. In remaining cases we start a goroutine which: + 3.3.1. Makes a HEAD request to the server push endpoint + 3.3.2. If the HEAD request is redirected, it follows the redirect + 3.3.3. If the request succeeds, it starts a POST request to the + endpoint to which the last HEAD request was sent with the + reader set as request body. + 3.4. We return a `ReaderStream` indicating the uuid of push request, ex: + `ReaderStream{ Type: "push", Info: "[UUID string]" }` + 4. If the reader wasn't a NullReader, the server will receive a HEAD (or + POST in case of older clients) request to the push endpoint. + 4.1. The server gets or registers an `*RpcReader` in the `readers` map. + 4.2. It waits for a request to a matching push endpoint to be opened + 4.3. After the request is opened, it returns the `*RpcReader` to + go-jsonrpc, which will pass it as the io.Reader parameter to the + rpc method implementation + 4.4. If the first request made to the push endpoint was a POST, the + returned `*RpcReader` acts as a simple reader reading the POST + request body + 4.5. If the first request made to the push endpoint was a HEAD + 4.5.1. On the first call to Read or Close the server responds with + a 200 OK header, the client starts a POST request to the same + push URL, and the reader starts passing through the POST request + body + 4.5.2. If the reader is passed to another (now client) RPC method as a + reader parameter, the server for the first request responds to the + HEAD request with http 302 Found, instructing the first client to + go to the push endpoint of the second RPC server + 5. If the reader was a NullReader (ReaderStream.Type=="null"), we instantiate + it, and provide to the method implementation + +*/ + +func ReaderParamEncoder(addr string) jsonrpc.Option { + // Client side parameter encoder. Runs on the rpc client side. io.Reader -> ReaderStream{} + return jsonrpc.WithParamEncoder(new(io.Reader), func(value reflect.Value) (reflect.Value, error) { + r := value.Interface().(io.Reader) + + if r, ok := r.(*sealing.NullReader); ok { + return reflect.ValueOf(ReaderStream{Type: Null, Info: fmt.Sprint(r.N)}), nil + } + + reqID := uuid.New() + u, err := url.Parse(addr) + if err != nil { + return reflect.Value{}, xerrors.Errorf("parsing push address: %w", err) + } + u.Path = path.Join(u.Path, reqID.String()) + + rpcReader, redir := r.(*RpcReader) + if redir { + // if we have an rpc stream, redirect instead of proxying all the data + redir = rpcReader.redirect(u.String()) + } + + if !redir { + go func() { + // TODO: figure out errors here + for { + req, err := http.NewRequest("HEAD", u.String(), nil) + if err != nil { + log.Errorf("sending HEAD request for the reder param: %+v", err) + return + } + req.Header.Set("Content-Type", "application/octet-stream") + resp, err := client.Do(req) + if err != nil { + log.Errorf("sending reader param: %+v", err) + return + } + // todo do we need to close the body for a head request? + + if resp.StatusCode == http.StatusFound { + nextStr := resp.Header.Get("Location") + u, err = url.Parse(nextStr) + if err != nil { + log.Errorf("sending HEAD request for the reder param, parsing next url (%s): %+v", nextStr, err) + return + } + + continue + } + + if resp.StatusCode == http.StatusNoContent { // reader closed before reading anything + // todo just return?? + return + } + + if resp.StatusCode != http.StatusOK { + b, _ := ioutil.ReadAll(resp.Body) + log.Errorf("sending reader param (%s): non-200 status: %s, msg: '%s'", u.String(), resp.Status, string(b)) + return + } + + break + } + + // now actually send the data + req, err := http.NewRequest("POST", u.String(), r) + if err != nil { + log.Errorf("sending reader param: %+v", err) + return + } + req.Header.Set("Content-Type", "application/octet-stream") + resp, err := client.Do(req) + if err != nil { + log.Errorf("sending reader param: %+v", err) + return + } + + defer resp.Body.Close() //nolint + + if resp.StatusCode != http.StatusOK { + b, _ := ioutil.ReadAll(resp.Body) + log.Errorf("sending reader param (%s): non-200 status: %s, msg: '%s'", u.String(), resp.Status, string(b)) + return + } + }() + } + + return reflect.ValueOf(ReaderStream{Type: PushStream, Info: reqID.String()}), nil + }) +} + +type resType int + +const ( + resStart resType = iota // send on first read after HEAD + resRedirect // send on redirect before first read after HEAD + resError + // done/closed = close res channel +) + +type readRes struct { + rt resType + meta string +} + +// RpcReader watches the ReadCloser and closes the res channel when +// either: (1) the ReaderCloser fails on Read (including with a benign error +// like EOF), or (2) when Close is called. +// +// Use it be notified of terminal states, in situations where a Read failure (or +// EOF) is considered a terminal state too (besides Close). +type RpcReader struct { + postBody io.ReadCloser // nil on initial head request + next chan *RpcReader // on head will get us the postBody after sending resStart + mustRedirect bool + + res chan readRes + beginOnce *sync.Once + closeOnce sync.Once +} + +var ErrHasBody = errors.New("RPCReader has body, either already read from or from a client with no redirect support") +var ErrMustRedirect = errors.New("reader can't be read directly; marked as MustRedirect") + +// MustRedirect marks the reader as required to be redirected. Will make local +// calls Read fail. MUST be called before this reader is used in any goroutine. +// If the reader can't be redirected will return ErrHasBody +func (w *RpcReader) MustRedirect() error { + if w.postBody != nil { + w.closeOnce.Do(func() { + w.res <- readRes{ + rt: resError, + } + close(w.res) + }) + + return ErrHasBody + } + + w.mustRedirect = true + return nil +} + +func (w *RpcReader) beginPost() { + if w.mustRedirect { + w.res <- readRes{ + rt: resError, + } + close(w.res) + return + } + + if w.postBody == nil { + w.res <- readRes{ + rt: resStart, + } + + nr := <-w.next + + w.postBody = nr.postBody + w.res = nr.res + w.beginOnce = nr.beginOnce + } +} + +func (w *RpcReader) Read(p []byte) (int, error) { + w.beginOnce.Do(func() { + w.beginPost() + }) + + if w.mustRedirect { + return 0, ErrMustRedirect + } + + if w.postBody == nil { + return 0, xerrors.Errorf("reader already closed or redirected") + } + + n, err := w.postBody.Read(p) + if err != nil { + w.closeOnce.Do(func() { + close(w.res) + }) + } + return n, err +} + +func (w *RpcReader) Close() error { + w.beginOnce.Do(func() {}) + w.closeOnce.Do(func() { + close(w.res) + }) + if w.postBody == nil { + return nil + } + return w.postBody.Close() +} + +func (w *RpcReader) redirect(to string) bool { + if w.postBody != nil { + return false + } + + done := false + + w.beginOnce.Do(func() { + w.closeOnce.Do(func() { + w.res <- readRes{ + rt: resRedirect, + meta: to, + } + + done = true + close(w.res) + }) + }) + + return done +} + +func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { + var readersLk sync.Mutex + readers := map[uuid.UUID]chan *RpcReader{} + + // runs on the rpc server side, called by the client before making the jsonrpc request + hnd := func(resp http.ResponseWriter, req *http.Request) { + strId := path.Base(req.URL.Path) + u, err := uuid.Parse(strId) + if err != nil { + http.Error(resp, fmt.Sprintf("parsing reader uuid: %s", err), 400) + return + } + + readersLk.Lock() + ch, found := readers[u] + if !found { + ch = make(chan *RpcReader) + readers[u] = ch + } + readersLk.Unlock() + + wr := &RpcReader{ + res: make(chan readRes), + next: ch, + beginOnce: &sync.Once{}, + } + + switch req.Method { + case http.MethodHead: + // leave body nil + case http.MethodPost: + wr.postBody = req.Body + default: + http.Error(resp, "unsupported method", http.StatusMethodNotAllowed) + } + + tctx, cancel := context.WithTimeout(req.Context(), Timeout) + defer cancel() + + select { + case ch <- wr: + case <-tctx.Done(): + close(ch) + log.Errorf("context error in reader stream handler (1): %v", tctx.Err()) + resp.WriteHeader(500) + return + } + + select { + case res, ok := <-wr.res: + if !ok { + if req.Method == http.MethodHead { + resp.WriteHeader(http.StatusNoContent) + } else { + resp.WriteHeader(http.StatusOK) + } + return + } + // TODO should we check if we failed the Read, and if so + // return an HTTP 500? i.e. turn res into a chan error? + + switch res.rt { + case resRedirect: + http.Redirect(resp, req, res.meta, http.StatusFound) + case resStart: // responding to HEAD, request POST with reader data + resp.WriteHeader(http.StatusOK) + case resError: + resp.WriteHeader(500) + default: + log.Errorf("unknown res.rt") + resp.WriteHeader(500) + } + + return + case <-req.Context().Done(): + log.Errorf("context error in reader stream handler (2): %v", req.Context().Err()) + resp.WriteHeader(500) + return + } + } + + // Server side reader decoder. runs on the rpc server side, invoked when decoding client request parameters. json(ReaderStream{}) -> io.Reader + dec := jsonrpc.WithParamDecoder(new(io.Reader), func(ctx context.Context, b []byte) (reflect.Value, error) { + var rs ReaderStream + if err := json.Unmarshal(b, &rs); err != nil { + return reflect.Value{}, xerrors.Errorf("unmarshaling reader id: %w", err) + } + + if rs.Type == Null { + n, err := strconv.ParseInt(rs.Info, 10, 64) + if err != nil { + return reflect.Value{}, xerrors.Errorf("parsing null byte count: %w", err) + } + + return reflect.ValueOf(sealing.NewNullReader(abi.UnpaddedPieceSize(n))), nil + } + + u, err := uuid.Parse(rs.Info) + if err != nil { + return reflect.Value{}, xerrors.Errorf("parsing reader UUDD: %w", err) + } + + readersLk.Lock() + ch, found := readers[u] + if !found { + ch = make(chan *RpcReader) + readers[u] = ch + } + readersLk.Unlock() + + ctx, cancel := context.WithTimeout(ctx, Timeout) + defer cancel() + + select { + case wr, ok := <-ch: + if !ok { + return reflect.Value{}, xerrors.Errorf("handler timed out") + } + + return reflect.ValueOf(wr), nil + case <-ctx.Done(): + return reflect.Value{}, ctx.Err() + } + }) + + return hnd, dec +} diff --git a/node/impl/client/car_helpers.go b/node/impl/client/car_helpers.go deleted file mode 100644 index c638b4bef..000000000 --- a/node/impl/client/car_helpers.go +++ /dev/null @@ -1,91 +0,0 @@ -package client - -import ( - "fmt" - "io" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/ipld/go-car/util" - "github.com/multiformats/go-varint" -) - -// ————————————————————————————————————————————————————————— -// -// This code is temporary, and should be deleted when -// https://github.com/ipld/go-car/issues/196 is resolved. -// -// ————————————————————————————————————————————————————————— - -func init() { - cbor.RegisterCborType(CarHeader{}) -} - -type CarHeader struct { - Roots []cid.Cid - Version uint64 -} - -func readHeader(r io.Reader) (*CarHeader, error) { - hb, err := ldRead(r, false) - if err != nil { - return nil, err - } - - var ch CarHeader - if err := cbor.DecodeInto(hb, &ch); err != nil { - return nil, fmt.Errorf("invalid header: %v", err) - } - - return &ch, nil -} - -func writeHeader(h *CarHeader, w io.Writer) error { - hb, err := cbor.DumpObject(h) - if err != nil { - return err - } - - return util.LdWrite(w, hb) -} - -func ldRead(r io.Reader, zeroLenAsEOF bool) ([]byte, error) { - l, err := varint.ReadUvarint(toByteReader(r)) - if err != nil { - // If the length of bytes read is non-zero when the error is EOF then signal an unclean EOF. - if l > 0 && err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - return nil, err - } else if l == 0 && zeroLenAsEOF { - return nil, io.EOF - } - - buf := make([]byte, l) - if _, err := io.ReadFull(r, buf); err != nil { - return nil, err - } - - return buf, nil -} - -type readerPlusByte struct { - io.Reader -} - -func (rb readerPlusByte) ReadByte() (byte, error) { - return readByte(rb) -} - -func readByte(r io.Reader) (byte, error) { - var p [1]byte - _, err := io.ReadFull(r, p[:]) - return p[0], err -} - -func toByteReader(r io.Reader) io.ByteReader { - if br, ok := r.(io.ByteReader); ok { - return br - } - return &readerPlusByte{r} -} diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index db2660cfd..8082238a7 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "fmt" "net/http" "os" "path/filepath" @@ -15,20 +14,15 @@ import ( "go.uber.org/multierr" "golang.org/x/xerrors" + "github.com/filecoin-project/boost/storagemarket" "github.com/filecoin-project/go-address" dtimpl "github.com/filecoin-project/go-data-transfer/impl" dtnet "github.com/filecoin-project/go-data-transfer/network" dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync" - piecefilestore "github.com/filecoin-project/go-fil-markets/filestore" piecestoreimpl "github.com/filecoin-project/go-fil-markets/piecestore/impl" "github.com/filecoin-project/go-fil-markets/retrievalmarket" retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" - smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" @@ -49,14 +43,10 @@ import ( "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/gen/slashfilter" - "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/markets" "github.com/filecoin-project/lotus/markets/dagstore" marketevents "github.com/filecoin-project/lotus/markets/loggers" - lotusminer "github.com/filecoin-project/lotus/miner" ) var ( @@ -101,24 +91,6 @@ func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.Retrieva }) } -func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h storagemarket.StorageProvider, j journal.Journal) { - ctx := helpers.LifecycleCtx(mctx, lc) - h.OnReady(marketevents.ReadyLogger("storage provider")) - lc.Append(fx.Hook{ - OnStart: func(context.Context) error { - h.SubscribeToEvents(marketevents.StorageProviderLogger) - - evtType := j.RegisterEventType("markets/storage/provider", "state_change") - h.SubscribeToEvents(markets.StorageProviderJournaler(j, evtType)) - - return h.Start(ctx) - }, - OnStop: func(context.Context) error { - return h.Stop() - }, - }) -} - func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node lapi.FullNode, minerAddress dtypes.MinerAddress) { lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { @@ -216,231 +188,6 @@ func StagingBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRe return blockstore.FromDatastore(stagingds), nil } -// StagingGraphsync creates a graphsync instance which reads and writes blocks -// to the StagingBlockstore -//func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { -//return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { -//graphsyncNetwork := gsnet.NewFromLibp2pHost(h) -//lsys := storeutil.LinkSystemForBlockstore(ibs) -//gs := graphsync.New(helpers.LifecycleCtx(mctx, lc), -//graphsyncNetwork, -//lsys, -//graphsync.RejectAllRequestsByDefault(), -//graphsync.MaxInProgressIncomingRequests(parallelTransfersForRetrieval), -//graphsync.MaxInProgressOutgoingRequests(parallelTransfersForStorage), -//graphsyncimpl.MaxLinksPerIncomingRequests(config.MaxTraversalLinks), -//graphsyncimpl.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks)) - -//return gs -//} -//} - -func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api v1api.FullNode, epp gen.WinningPoStProver, sf *slashfilter.SlashFilter, j journal.Journal) (*lotusminer.Miner, error) { - minerAddr, err := minerAddrFromDS(ds) - if err != nil { - return nil, err - } - - m := lotusminer.NewMiner(api, epp, minerAddr, sf, j) - - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - if err := m.Start(ctx); err != nil { - return err - } - return nil - }, - OnStop: func(ctx context.Context) error { - return m.Stop(ctx) - }, - }) - - return m, nil -} - -func NewStorageAsk(ctx helpers.MetricsCtx, fapi v1api.FullNode, ds dtypes.MetadataDS, minerAddress dtypes.MinerAddress, spn storagemarket.StorageProviderNode) (*storedask.StoredAsk, error) { - - mi, err := fapi.StateMinerInfo(ctx, address.Address(minerAddress), types.EmptyTSK) - if err != nil { - return nil, err - } - - providerDs := namespace.Wrap(ds, datastore.NewKey("/deals/provider")) - // legacy this was mistake where this key was place -- so we move the legacy key if need be - err = shared.MoveKey(providerDs, "/latest-ask", "/storage-ask/latest") - if err != nil { - return nil, err - } - return storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress), - storagemarket.MaxPieceSize(abi.PaddedPieceSize(mi.SectorSize))) -} - -func BasicDealFilter(cfg config.DealmakingConfig, user dtypes.StorageDealFilter) func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, - offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc, - verifiedOk dtypes.ConsiderVerifiedStorageDealsConfigFunc, - unverifiedOk dtypes.ConsiderUnverifiedStorageDealsConfigFunc, - blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc, - expectedSealTimeFunc dtypes.GetExpectedSealDurationFunc, - startDelay dtypes.GetMaxDealStartDelayFunc, - spn storagemarket.StorageProviderNode, - r repo.LockedRepo, -) dtypes.StorageDealFilter { - return func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, - offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc, - verifiedOk dtypes.ConsiderVerifiedStorageDealsConfigFunc, - unverifiedOk dtypes.ConsiderUnverifiedStorageDealsConfigFunc, - blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc, - expectedSealTimeFunc dtypes.GetExpectedSealDurationFunc, - startDelay dtypes.GetMaxDealStartDelayFunc, - spn storagemarket.StorageProviderNode, - r repo.LockedRepo, - ) dtypes.StorageDealFilter { - - return func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) { - b, err := onlineOk() - if err != nil { - return false, "miner error", err - } - - if deal.Ref != nil && deal.Ref.TransferType != storagemarket.TTManual && !b { - log.Warnf("online storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String()) - return false, "miner is not considering online storage deals", nil - } - - b, err = offlineOk() - if err != nil { - return false, "miner error", err - } - - if deal.Ref != nil && deal.Ref.TransferType == storagemarket.TTManual && !b { - log.Warnf("offline storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String()) - return false, "miner is not accepting offline storage deals", nil - } - - b, err = verifiedOk() - if err != nil { - return false, "miner error", err - } - - if deal.Proposal.VerifiedDeal && !b { - log.Warnf("verified storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String()) - return false, "miner is not accepting verified storage deals", nil - } - - b, err = unverifiedOk() - if err != nil { - return false, "miner error", err - } - - if !deal.Proposal.VerifiedDeal && !b { - log.Warnf("unverified storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String()) - return false, "miner is not accepting unverified storage deals", nil - } - - blocklist, err := blocklistFunc() - if err != nil { - return false, "miner error", err - } - - for idx := range blocklist { - if deal.Proposal.PieceCID.Equals(blocklist[idx]) { - log.Warnf("piece CID in proposal %s is blocklisted; rejecting storage deal proposal from client: %s", deal.Proposal.PieceCID, deal.Client.String()) - return false, fmt.Sprintf("miner has blocklisted piece CID %s", deal.Proposal.PieceCID), nil - } - } - - //sealDuration, err := expectedSealTimeFunc() - //if err != nil { - //return false, "miner error", err - //} - - //sealEpochs := sealDuration / (time.Duration(build.BlockDelaySecs) * time.Second) - //_, ht, err := spn.GetChainHead(ctx) - //if err != nil { - //return false, "failed to get chain head", err - //} - //earliest := abi.ChainEpoch(sealEpochs) + ht - //if deal.Proposal.StartEpoch < earliest { - //log.Warnw("proposed deal would start before sealing can be completed; rejecting storage deal proposal from client", "piece_cid", deal.Proposal.PieceCID, "client", deal.Client.String(), "seal_duration", sealDuration, "earliest", earliest, "curepoch", ht) - //return false, fmt.Sprintf("cannot seal a sector before %s", deal.Proposal.StartEpoch), nil - //} - - sd, err := startDelay() - if err != nil { - return false, "miner error", err - } - - dir := filepath.Join(r.Path(), StagingAreaDirName) - diskUsageBytes, err := r.DiskUsage(dir) - if err != nil { - return false, "miner error", err - } - - if cfg.MaxStagingDealsBytes != 0 && diskUsageBytes >= cfg.MaxStagingDealsBytes { - log.Errorw("proposed deal rejected because there are too many deals in the staging area at the moment", "MaxStagingDealsBytes", cfg.MaxStagingDealsBytes, "DiskUsageBytes", diskUsageBytes) - return false, "cannot accept deal as miner is overloaded at the moment - there are too many staging deals being processed", nil - } - - _ = sd - - // Reject if it's more than 7 days in the future - // TODO: read from cfg - //maxStartEpoch := earliest + abi.ChainEpoch(uint64(sd.Seconds())/build.BlockDelaySecs) - //if deal.Proposal.StartEpoch > maxStartEpoch { - //return false, fmt.Sprintf("deal start epoch is too far in the future: %s > %s", deal.Proposal.StartEpoch, maxStartEpoch), nil - //} - - //if user != nil { - //return user(ctx, deal) - //} - - return true, "", nil - } - } -} - -func StorageProvider(minerAddress dtypes.MinerAddress, - storedAsk *storedask.StoredAsk, - h host.Host, ds dtypes.MetadataDS, - r repo.LockedRepo, - pieceStore dtypes.ProviderPieceStore, - dataTransfer dtypes.ProviderDataTransfer, - spn storagemarket.StorageProviderNode, - df dtypes.StorageDealFilter, - dsw *dagstore.Wrapper, -) (storagemarket.StorageProvider, error) { - net := smnet.NewFromLibp2pHost(h) - - dir := filepath.Join(r.Path(), StagingAreaDirName) - - // migrate temporary files that were created directly under the repo, by - // moving them to the new directory and symlinking them. - oldDir := r.Path() - if err := migrateDealStaging(oldDir, dir); err != nil { - return nil, xerrors.Errorf("failed to make deal staging directory %w", err) - } - - store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(dir)) - if err != nil { - return nil, err - } - - opt := storageimpl.CustomDealDecisionLogic(storageimpl.DealDeciderFunc(df)) - - return storageimpl.NewProvider( - net, - namespace.Wrap(ds, datastore.NewKey("/deals/provider")), - store, - dsw, - pieceStore, - dataTransfer, - spn, - address.Address(minerAddress), - storedAsk, - opt, - ) -} - func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalDealFilter { return func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, @@ -478,20 +225,6 @@ func RetrievalNetwork(h host.Host) rmnet.RetrievalMarketNetwork { return rmnet.NewFromLibp2pHost(h) } -// RetrievalPricingFunc configures the pricing function to use for retrieval deals. -//func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, -//_ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { - -//return func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, -//_ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { -//if cfg.RetrievalPricing.Strategy == config.RetrievalPricingExternalMode { -//return pricing.ExternalRetrievalPricingFunc(cfg.RetrievalPricing.External.Path) -//} - -//return retrievalimpl.DefaultPricingFunc(cfg.RetrievalPricing.Default.VerifiedDealsFreeTransfer) -//} -//} - // RetrievalProvider creates a new retrieval provider attached to the provider blockstore func RetrievalProvider( maddr dtypes.MinerAddress, @@ -767,59 +500,6 @@ func mutateCfg(r repo.LockedRepo, mutator func(*config.Boost)) error { return multierr.Combine(typeErr, setConfigErr) } -func migrateDealStaging(oldPath, newPath string) error { - dirInfo, err := os.Stat(newPath) - if err == nil { - if !dirInfo.IsDir() { - return xerrors.Errorf("%s is not a directory", newPath) - } - // The newPath exists already, below migration has already occurred. - return nil - } - - // if the directory doesn't exist, create it - if os.IsNotExist(err) { - if err := os.MkdirAll(newPath, 0755); err != nil { - return xerrors.Errorf("failed to mk directory %s for deal staging: %w", newPath, err) - } - } else { // if we failed for other reasons, abort. - return err - } - - // if this is the first time we created the directory, symlink all staged deals into it. "Migration" - // get a list of files in the miner repo - dirEntries, err := os.ReadDir(oldPath) - if err != nil { - return xerrors.Errorf("failed to list directory %s for deal staging: %w", oldPath, err) - } - - for _, entry := range dirEntries { - // ignore directories, they are not the deals. - if entry.IsDir() { - continue - } - // the FileStore from fil-storage-market creates temporary staged deal files with the pattern "fstmp" - // https://github.com/filecoin-project/go-fil-markets/blob/00ff81e477d846ac0cb58a0c7d1c2e9afb5ee1db/filestore/filestore.go#L69 - name := entry.Name() - if strings.Contains(name, "fstmp") { - // from the miner repo - oldPath := filepath.Join(oldPath, name) - // to its subdir "deal-staging" - newPath := filepath.Join(newPath, name) - // create a symbolic link in the new deal staging directory to preserve existing staged deals. - // all future staged deals will be created here. - if err := os.Rename(oldPath, newPath); err != nil { - return xerrors.Errorf("failed to move %s to %s: %w", oldPath, newPath, err) - } - if err := os.Symlink(newPath, oldPath); err != nil { - return xerrors.Errorf("failed to symlink %s to %s: %w", oldPath, newPath, err) - } - log.Infow("symlinked staged deal", "from", oldPath, "to", newPath) - } - } - return nil -} - func StorageNetworkName(ctx helpers.MetricsCtx, a v1api.FullNode) (dtypes.NetworkName, error) { n, err := a.StateNetworkName(ctx) if err != nil { @@ -827,3 +507,7 @@ func StorageNetworkName(ctx helpers.MetricsCtx, a v1api.FullNode) (dtypes.Networ } return dtypes.NetworkName(n), nil } + +func NewStorageMarketProvider(ctx helpers.MetricsCtx, a v1api.FullNode) (*storagemarket.Provider, error) { + return storagemarket.NewProvider(a) +} diff --git a/node/repo/fsrepo.go b/node/repo/fsrepo.go index 150898f1a..d870ef4d3 100644 --- a/node/repo/fsrepo.go +++ b/node/repo/fsrepo.go @@ -42,8 +42,8 @@ const ( type RepoType int const ( - //_ = iota // Default is invalid - Boost RepoType = iota + 6 + _ = iota // Default is invalid + Boost RepoType = iota ) func (t RepoType) String() string { diff --git a/storage/sectorblocks/sectorblocks.go b/storage/sectorblocks/sectorblocks.go new file mode 100644 index 000000000..ad4ffc0db --- /dev/null +++ b/storage/sectorblocks/sectorblocks.go @@ -0,0 +1,176 @@ +package sectorblocks + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "io" + "sync" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/ipfs/go-datastore/query" + dshelp "github.com/ipfs/go-ipfs-ds-help" + "golang.org/x/xerrors" + + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +type SealSerialization uint8 + +const ( + SerializationUnixfs0 SealSerialization = 'u' +) + +var dsPrefix = datastore.NewKey("/sealedblocks") + +var ErrNotFound = errors.New("not found") + +func DealIDToDsKey(dealID abi.DealID) datastore.Key { + buf := make([]byte, binary.MaxVarintLen64) + size := binary.PutUvarint(buf, uint64(dealID)) + return dshelp.NewKeyFromBinary(buf[:size]) +} + +func DsKeyToDealID(key datastore.Key) (uint64, error) { + buf, err := dshelp.BinaryFromDsKey(key) + if err != nil { + return 0, err + } + dealID, _ := binary.Uvarint(buf) + return dealID, nil +} + +type SectorBuilder interface { + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error) + SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) +} + +type SectorBlocks struct { + SectorBuilder + + keys datastore.Batching + keyLk sync.Mutex +} + +func NewSectorBlocks(sb SectorBuilder, ds dtypes.MetadataDS) *SectorBlocks { + sbc := &SectorBlocks{ + SectorBuilder: sb, + keys: namespace.Wrap(ds, dsPrefix), + } + + return sbc +} + +func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, offset abi.PaddedPieceSize, size abi.UnpaddedPieceSize) error { + st.keyLk.Lock() // TODO: make this multithreaded + defer st.keyLk.Unlock() + + v, err := st.keys.Get(DealIDToDsKey(dealID)) + if err == datastore.ErrNotFound { + err = nil + } + if err != nil { + return xerrors.Errorf("getting existing refs: %w", err) + } + + var refs api.SealedRefs + if len(v) > 0 { + if err := cborutil.ReadCborRPC(bytes.NewReader(v), &refs); err != nil { + return xerrors.Errorf("decoding existing refs: %w", err) + } + } + + refs.Refs = append(refs.Refs, api.SealedRef{ + SectorID: sectorID, + Offset: offset, + Size: size, + }) + + newRef, err := cborutil.Dump(&refs) + if err != nil { + return xerrors.Errorf("serializing refs: %w", err) + } + return st.keys.Put(DealIDToDsKey(dealID), newRef) // TODO: batch somehow +} + +func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { + so, err := st.SectorBuilder.SectorAddPieceToAny(ctx, size, r, d) + if err != nil { + return 0, 0, err + } + + // TODO: DealID has very low finality here + err = st.writeRef(d.DealID, so.Sector, so.Offset, size) + if err != nil { + return 0, 0, xerrors.Errorf("writeRef: %w", err) + } + + return so.Sector, so.Offset, nil +} + +func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) { + res, err := st.keys.Query(query.Query{}) + if err != nil { + return nil, err + } + + ents, err := res.Rest() + if err != nil { + return nil, err + } + + out := map[uint64][]api.SealedRef{} + for _, ent := range ents { + dealID, err := DsKeyToDealID(datastore.RawKey(ent.Key)) + if err != nil { + return nil, err + } + + var refs api.SealedRefs + if err := cborutil.ReadCborRPC(bytes.NewReader(ent.Value), &refs); err != nil { + return nil, err + } + + out[dealID] = refs.Refs + } + + return out, nil +} + +func (st *SectorBlocks) GetRefs(dealID abi.DealID) ([]api.SealedRef, error) { // TODO: track local sectors + ent, err := st.keys.Get(DealIDToDsKey(dealID)) + if err == datastore.ErrNotFound { + err = ErrNotFound + } + if err != nil { + return nil, err + } + + var refs api.SealedRefs + if err := cborutil.ReadCborRPC(bytes.NewReader(ent), &refs); err != nil { + return nil, err + } + + return refs.Refs, nil +} + +func (st *SectorBlocks) GetSize(dealID abi.DealID) (uint64, error) { + refs, err := st.GetRefs(dealID) + if err != nil { + return 0, err + } + + return uint64(refs[0].Size), nil +} + +func (st *SectorBlocks) Has(dealID abi.DealID) (bool, error) { + // TODO: ensure sector is still there + return st.keys.Has(DealIDToDsKey(dealID)) +} diff --git a/storagemarket/adapter.go b/storagemarket/adapter.go new file mode 100644 index 000000000..3d393ede2 --- /dev/null +++ b/storagemarket/adapter.go @@ -0,0 +1,388 @@ +package storagemarket + +// this file implements storagemarket.StorageProviderNode + +// TODO: we should remove this, and incorporate in the actual Provider + +import ( + "context" + "io" + "time" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + + "github.com/filecoin-project/boost/build" + "github.com/filecoin-project/boost/storage/sectorblocks" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/lib/sigs" + "github.com/filecoin-project/lotus/markets/utils" +) + +var addPieceRetryWait = 5 * time.Minute +var addPieceRetryTimeout = 6 * time.Hour +var log = logging.Logger("storageadapter") + +type Adapter struct { + v1api.FullNode + + secb *sectorblocks.SectorBlocks + //ev *events.Events + + dealPublisher *DealPublisher + + addBalanceSpec *api.MessageSendSpec + maxDealCollateralMultiplier uint64 + //dsMatcher *dealStateMatcher + scMgr *SectorCommittedManager +} + +func (n *Adapter) PublishDeals(ctx context.Context, deal storagemarket.MinerDeal) (cid.Cid, error) { + return n.dealPublisher.Publish(ctx, deal.ClientDealProposal) +} + +func (n *Adapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData io.Reader) (*storagemarket.PackingResult, error) { + if deal.PublishCid == nil { + return nil, xerrors.Errorf("deal.PublishCid can't be nil") + } + + sdInfo := api.PieceDealInfo{ + DealID: deal.DealID, + DealProposal: &deal.Proposal, + PublishCid: deal.PublishCid, + DealSchedule: api.DealSchedule{ + StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch, + EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch, + }, + KeepUnsealed: deal.FastRetrieval, + } + + p, offset, err := n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) + curTime := build.Clock.Now() + for build.Clock.Since(curTime) < addPieceRetryTimeout { + if !xerrors.Is(err, sealing.ErrTooManySectorsSealing) { + if err != nil { + log.Errorf("failed to addPiece for deal %d, err: %v", deal.DealID, err) + } + break + } + select { + case <-build.Clock.After(addPieceRetryWait): + p, offset, err = n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) + case <-ctx.Done(): + return nil, xerrors.New("context expired while waiting to retry AddPiece") + } + } + + if err != nil { + return nil, xerrors.Errorf("AddPiece failed: %s", err) + } + log.Warnf("New Deal: deal %d", deal.DealID) + + return &storagemarket.PackingResult{ + SectorNumber: p, + Offset: offset, + Size: pieceSize.Padded(), + }, nil +} + +func (n *Adapter) VerifySignature(ctx context.Context, sig crypto.Signature, addr address.Address, input []byte, encodedTs shared.TipSetToken) (bool, error) { + addr, err := n.StateAccountKey(ctx, addr, types.EmptyTSK) + if err != nil { + return false, err + } + + err = sigs.Verify(&sig, addr, input) + return err == nil, err +} + +func (n *Adapter) GetMinerWorkerAddress(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (address.Address, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return address.Undef, err + } + + mi, err := n.StateMinerInfo(ctx, maddr, tsk) + if err != nil { + return address.Address{}, err + } + return mi.Worker, nil +} + +func (n *Adapter) GetProofType(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return 0, err + } + + mi, err := n.StateMinerInfo(ctx, maddr, tsk) + if err != nil { + return 0, err + } + + nver, err := n.StateNetworkVersion(ctx, tsk) + if err != nil { + return 0, err + } + + return miner.PreferredSealProofTypeFromWindowPoStType(nver, mi.WindowPoStProofType) +} + +func (n *Adapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { + signer, err := n.StateAccountKey(ctx, signer, types.EmptyTSK) + if err != nil { + return nil, err + } + + localSignature, err := n.WalletSign(ctx, signer, b) + if err != nil { + return nil, err + } + return localSignature, nil +} + +func (n *Adapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return n.MarketReserveFunds(ctx, wallet, addr, amt) +} + +func (n *Adapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error { + return n.MarketReleaseFunds(ctx, addr, amt) +} + +// Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. +func (n *Adapter) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { + // (Provider Node API) + smsg, err := n.MpoolPushMessage(ctx, &types.Message{ + To: market.Address, + From: addr, + Value: amount, + Method: market.Methods.AddBalance, + }, n.addBalanceSpec) + if err != nil { + return cid.Undef, err + } + + return smsg.Cid(), nil +} + +func (n *Adapter) GetBalance(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (storagemarket.Balance, error) { + tsk, err := types.TipSetKeyFromBytes(encodedTs) + if err != nil { + return storagemarket.Balance{}, err + } + + bal, err := n.StateMarketBalance(ctx, addr, tsk) + if err != nil { + return storagemarket.Balance{}, err + } + + return utils.ToSharedBalance(bal), nil +} + +// TODO: why doesnt this method take in a sector ID? +func (n *Adapter) LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, encodedTs shared.TipSetToken) (sectorID abi.SectorNumber, offset abi.PaddedPieceSize, length abi.PaddedPieceSize, err error) { + refs, err := n.secb.GetRefs(dealID) + if err != nil { + return 0, 0, 0, err + } + if len(refs) == 0 { + return 0, 0, 0, xerrors.New("no sector information for deal ID") + } + + // TODO: better strategy (e.g. look for already unsealed) + var best api.SealedRef + var bestSi api.SectorInfo + for _, r := range refs { + si, err := n.secb.SectorBuilder.SectorsStatus(ctx, r.SectorID, false) + if err != nil { + return 0, 0, 0, xerrors.Errorf("getting sector info: %w", err) + } + if si.State == api.SectorState(sealing.Proving) { + best = r + bestSi = si + break + } + } + if bestSi.State == api.SectorState(sealing.UndefinedSectorState) { + return 0, 0, 0, xerrors.New("no sealed sector found") + } + return best.SectorID, best.Offset, best.Size.Padded(), nil +} + +func (n *Adapter) DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) { + bounds, err := n.StateDealProviderCollateralBounds(ctx, size, isVerified, types.EmptyTSK) + if err != nil { + return abi.TokenAmount{}, abi.TokenAmount{}, err + } + + // The maximum amount of collateral that the provider will put into escrow + // for a deal is calculated as a multiple of the minimum bounded amount + max := types.BigMul(bounds.Min, types.NewInt(n.maxDealCollateralMultiplier)) + + return bounds.Min, max, nil +} + +// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) +func (n *Adapter) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market2.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error { + return n.scMgr.OnDealSectorPreCommitted(ctx, provider, market.DealProposal(proposal), *publishCid, cb) +} + +// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) +func (n *Adapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal market2.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { + return n.scMgr.OnDealSectorCommitted(ctx, provider, sectorNumber, market.DealProposal(proposal), *publishCid, cb) +} + +func (n *Adapter) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { + head, err := n.ChainHead(ctx) + if err != nil { + return nil, 0, err + } + + return head.Key().Bytes(), head.Height(), nil +} + +func (n *Adapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error { + receipt, err := n.StateWaitMsg(ctx, mcid, 2*build.MessageConfidence, api.LookbackNoLimit, true) + if err != nil { + return cb(0, nil, cid.Undef, err) + } + return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, receipt.Message, nil) +} + +func (n *Adapter) WaitForPublishDeals(ctx context.Context, publishCid cid.Cid, proposal market2.DealProposal) (*storagemarket.PublishDealsWaitResult, error) { + // Wait for deal to be published (plus additional time for confidence) + receipt, err := n.StateWaitMsg(ctx, publishCid, 2*build.MessageConfidence, api.LookbackNoLimit, true) + if err != nil { + return nil, xerrors.Errorf("WaitForPublishDeals errored: %w", err) + } + if receipt.Receipt.ExitCode != exitcode.Ok { + return nil, xerrors.Errorf("WaitForPublishDeals exit code: %s", receipt.Receipt.ExitCode) + } + + // The deal ID may have changed since publish if there was a reorg, so + // get the current deal ID + head, err := n.ChainHead(ctx) + if err != nil { + return nil, xerrors.Errorf("WaitForPublishDeals failed to get chain head: %w", err) + } + + res, err := n.scMgr.dealInfo.GetCurrentDealInfo(ctx, head.Key().Bytes(), (*market.DealProposal)(&proposal), publishCid) + if err != nil { + return nil, xerrors.Errorf("WaitForPublishDeals getting deal info errored: %w", err) + } + + return &storagemarket.PublishDealsWaitResult{DealID: res.DealID, FinalCid: receipt.Message}, nil +} + +func (n *Adapter) GetDataCap(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*abi.StoragePower, error) { + tsk, err := types.TipSetKeyFromBytes(encodedTs) + if err != nil { + return nil, err + } + + sp, err := n.StateVerifiedClientStatus(ctx, addr, tsk) + return sp, err +} + +func (n *Adapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error { + //head, err := n.ChainHead(ctx) + //if err != nil { + //return xerrors.Errorf("client: failed to get chain head: %w", err) + //} + + //sd, err := n.StateMarketStorageDeal(ctx, dealID, head.Key()) + //if err != nil { + //return xerrors.Errorf("client: failed to look up deal %d on chain: %w", dealID, err) + //} + + //// Called immediately to check if the deal has already expired or been slashed + //checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { + //if ts == nil { + //// keep listening for events + //return false, true, nil + //} + + //// Check if the deal has already expired + //if sd.Proposal.EndEpoch <= ts.Height() { + //onDealExpired(nil) + //return true, false, nil + //} + + //// If there is no deal assume it's already been slashed + //if sd.State.SectorStartEpoch < 0 { + //onDealSlashed(ts.Height(), nil) + //return true, false, nil + //} + + //// No events have occurred yet, so return + //// done: false, more: true (keep listening for events) + //return false, true, nil + //} + + //// Called when there was a match against the state change we're looking for + //// and the chain has advanced to the confidence height + //stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { + //// Check if the deal has already expired + //if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() { + //onDealExpired(nil) + //return false, nil + //} + + //// Timeout waiting for state change + //if states == nil { + //log.Error("timed out waiting for deal expiry") + //return false, nil + //} + + //changedDeals, ok := states.(state.ChangedDeals) + //if !ok { + //panic("Expected state.ChangedDeals") + //} + + //deal, ok := changedDeals[dealID] + //if !ok { + //// No change to deal + //return true, nil + //} + + //// Deal was slashed + //if deal.To == nil { + //onDealSlashed(ts2.Height(), nil) + //return false, nil + //} + + //return true, nil + //} + + //// Called when there was a chain reorg and the state change was reverted + //revert := func(ctx context.Context, ts *types.TipSet) error { + //// TODO: Is it ok to just ignore this? + //log.Warn("deal state reverted; TODO: actually handle this!") + //return nil + //} + + // Watch for state changes to the deal + //match := n.dsMatcher.matcher(ctx, dealID) + + // Wait until after the end epoch for the deal and then timeout + //timeout := (sd.Proposal.EndEpoch - head.Height()) + 1 + //if err := n.ev.StateChanged(checkFunc, stateChanged, revert, int(build.MessageConfidence)+1, timeout, match); err != nil { + //return xerrors.Errorf("failed to set up state changed handler: %w", err) + //} + + return nil +} diff --git a/storagemarket/deal_acceptance.go b/storagemarket/deal_acceptance.go index 4fd6c11a6..07ee71c7c 100644 --- a/storagemarket/deal_acceptance.go +++ b/storagemarket/deal_acceptance.go @@ -5,10 +5,8 @@ import ( "fmt" "github.com/filecoin-project/boost/storagemarket/types" - - "github.com/filecoin-project/go-fil-markets/shared" - cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -20,13 +18,12 @@ import ( const DealMaxLabelSize = 256 // ValidateDealProposal validates a proposed deal against the provider criteria -func (p *provider) validateDealProposal(deal types.ProviderDealState) error { - tok, curEpoch, err := p.lotusNode.GetChainHead(p.ctx) +func (p *Provider) validateDealProposal(deal types.ProviderDealState) error { + tok, curEpoch, err := p.adapter.GetChainHead(p.ctx) if err != nil { return fmt.Errorf("node error getting most recent state id: %w", err) } - // verify client signature if err := p.validateSignature(tok, deal); err != nil { return fmt.Errorf("validateSignature failed: %w", err) } @@ -74,7 +71,7 @@ func (p *provider) validateDealProposal(deal types.ProviderDealState) error { return fmt.Errorf("invalid deal end epoch %d: cannot be more than %d past current epoch %d", proposal.EndEpoch, miner.MaxSectorExpirationExtension, curEpoch) } - pcMin, pcMax, err := p.lotusNode.DealProviderCollateralBounds(p.ctx, proposal.PieceSize, proposal.VerifiedDeal) + pcMin, pcMax, err := p.adapter.DealProviderCollateralBounds(p.ctx, proposal.PieceSize, proposal.VerifiedDeal) if err != nil { return fmt.Errorf("node error getting collateral bounds: %w", err) } @@ -92,7 +89,7 @@ func (p *provider) validateDealProposal(deal types.ProviderDealState) error { } // check market funds - clientMarketBalance, err := p.lotusNode.GetBalance(p.ctx, proposal.Client, tok) + clientMarketBalance, err := p.adapter.GetBalance(p.ctx, proposal.Client, tok) if err != nil { return fmt.Errorf("node error getting client market balance failed: %w", err) } @@ -105,7 +102,7 @@ func (p *provider) validateDealProposal(deal types.ProviderDealState) error { // Verified deal checks if proposal.VerifiedDeal { - dataCap, err := p.lotusNode.GetDataCap(p.ctx, proposal.Client, tok) + dataCap, err := p.adapter.GetDataCap(p.ctx, proposal.Client, tok) if err != nil { return fmt.Errorf("node error fetching verified data cap: %w", err) } @@ -123,7 +120,7 @@ func (p *provider) validateDealProposal(deal types.ProviderDealState) error { return nil } -func (p *provider) validateAsk(deal types.ProviderDealState) error { +func (p *Provider) validateAsk(deal types.ProviderDealState) error { ask := p.GetAsk() askPrice := ask.Price if deal.ClientDealProposal.Proposal.VerifiedDeal { @@ -147,13 +144,13 @@ func (p *provider) validateAsk(deal types.ProviderDealState) error { return nil } -func (p *provider) validateSignature(tok shared.TipSetToken, deal types.ProviderDealState) error { +func (p *Provider) validateSignature(tok shared.TipSetToken, deal types.ProviderDealState) error { b, err := cborutil.Dump(&deal.ClientDealProposal.Proposal) if err != nil { return fmt.Errorf("failed to serialize client deal proposal: %w", err) } - verified, err := p.lotusNode.VerifySignature(p.ctx, deal.ClientDealProposal.ClientSignature, deal.ClientDealProposal.Proposal.Client, b, tok) + verified, err := p.adapter.VerifySignature(p.ctx, deal.ClientDealProposal.ClientSignature, deal.ClientDealProposal.Proposal.Client, b, tok) if err != nil { return fmt.Errorf("error verifying signature: %w", err) } diff --git a/storagemarket/deal_execution.go b/storagemarket/deal_execution.go index 42ed7000c..a2091c03d 100644 --- a/storagemarket/deal_execution.go +++ b/storagemarket/deal_execution.go @@ -8,8 +8,6 @@ import ( "os" "time" - "github.com/filecoin-project/boost/stores" - "github.com/libp2p/go-libp2p-core/event" "github.com/filecoin-project/go-padreader" @@ -25,7 +23,7 @@ import ( "github.com/filecoin-project/boost/storagemarket/types" ) -func (p *provider) failDeal(ds *types.ProviderDealState, err error) { +func (p *Provider) failDeal(ds *types.ProviderDealState, err error) { p.cleanupDeal(ds) select { @@ -34,7 +32,7 @@ func (p *provider) failDeal(ds *types.ProviderDealState, err error) { } } -func (p *provider) cleanupDeal(ds *types.ProviderDealState) { +func (p *Provider) cleanupDeal(ds *types.ProviderDealState) { _ = os.Remove(ds.InboundCARPath) // ... //cleanup resources here @@ -50,9 +48,10 @@ func transferEventToProviderEvent(ds *types.ProviderDealState, evt types.DataTra return types.ProviderDealEvent{} } -func (p *provider) doDeal(ds *types.ProviderDealState, publisher event.Emitter) { +func (p *Provider) doDeal(ds *types.ProviderDealState, publisher event.Emitter) { // publish an event with the current state of the deal if err := publisher.Emit(dealStateToEvent(ds)); err != nil { + panic(err) // log } @@ -63,6 +62,7 @@ func (p *provider) doDeal(ds *types.ProviderDealState, publisher event.Emitter) return } if err := publisher.Emit(dealStateToEvent(ds)); err != nil { + panic(err) // log } } @@ -74,6 +74,7 @@ func (p *provider) doDeal(ds *types.ProviderDealState, publisher event.Emitter) return } if err := publisher.Emit(dealStateToEvent(ds)); err != nil { + panic(err) // log } } @@ -86,6 +87,7 @@ func (p *provider) doDeal(ds *types.ProviderDealState, publisher event.Emitter) } if err := publisher.Emit(dealStateToEvent(ds)); err != nil { + panic(err) // log } } @@ -98,7 +100,7 @@ func (p *provider) doDeal(ds *types.ProviderDealState, publisher event.Emitter) // Watch deal on chain and change state in DB and emit notifications. } -func (p *provider) transferAndVerify(ds *types.ProviderDealState, publisher event.Emitter) error { +func (p *Provider) transferAndVerify(ds *types.ProviderDealState, publisher event.Emitter) error { // Transfer Data u, err := url.Parse(ds.TransferURL) if err != nil { @@ -120,6 +122,7 @@ func (p *provider) transferAndVerify(ds *types.ProviderDealState, publisher even case evt := <-transferSub.Out(): dtEvent := evt.(types.DataTransferEvent) if err := publisher.Emit(transferEventToProviderEvent(ds, dtEvent)); err != nil { + panic(err) // log } // if dtEvent.Type == Completed || Cancelled || Error { @@ -143,9 +146,9 @@ func (p *provider) transferAndVerify(ds *types.ProviderDealState, publisher even // persist transferred checkpoint ds.Checkpoint = dealcheckpoints.Transferred - if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { - return fmt.Errorf("failed to persist deal state: %w", err) - } + //if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { + //return fmt.Errorf("failed to persist deal state: %w", err) + //} // TODO : Emit a notification here return nil @@ -153,7 +156,7 @@ func (p *provider) transferAndVerify(ds *types.ProviderDealState, publisher even // GeneratePieceCommitment generates the pieceCid for the CARv1 deal payload in // the CARv2 file that already exists at the given path. -func (p *provider) generatePieceCommitment(ds *types.ProviderDealState) (c cid.Cid, finalErr error) { +func (p *Provider) generatePieceCommitment(ds *types.ProviderDealState) (c cid.Cid, finalErr error) { rd, err := carv2.OpenReader(ds.InboundCARPath) if err != nil { return cid.Undef, fmt.Errorf("failed to get CARv2 reader: %w", err) @@ -204,38 +207,38 @@ func (p *provider) generatePieceCommitment(ds *types.ProviderDealState) (c cid.C return cidAndSize.PieceCID, err } -func (p *provider) publishDeal(ds *types.ProviderDealState) error { - if ds.Checkpoint < dealcheckpoints.Published { - mcid, err := p.lotusNode.PublishDeals(p.ctx, *ds) - if err != nil { - return fmt.Errorf("failed to publish deal: %w", err) - } - - ds.PublishCid = mcid - ds.Checkpoint = dealcheckpoints.Published - if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { - return fmt.Errorf("failed to update deal: %w", err) - } - } - - res, err := p.lotusNode.WaitForPublishDeals(p.ctx, ds.PublishCid, ds.ClientDealProposal.Proposal) - if err != nil { - return fmt.Errorf("wait for publish failed: %w", err) - } - - ds.PublishCid = res.FinalCid - ds.DealID = res.DealID - ds.Checkpoint = dealcheckpoints.PublishConfirmed - if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { - return fmt.Errorf("failed to update deal: %w", err) - } +func (p *Provider) publishDeal(ds *types.ProviderDealState) error { + //if ds.Checkpoint < dealcheckpoints.Published { + //mcid, err := p.fullnodeApi.PublishDeals(p.ctx, *ds) + //if err != nil { + //return fmt.Errorf("failed to publish deal: %w", err) + //} + + //ds.PublishCid = mcid + //ds.Checkpoint = dealcheckpoints.Published + //if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { + //return fmt.Errorf("failed to update deal: %w", err) + //} + //} + + //res, err := p.lotusNode.WaitForPublishDeals(p.ctx, ds.PublishCid, ds.ClientDealProposal.Proposal) + //if err != nil { + //return fmt.Errorf("wait for publish failed: %w", err) + //} + + //ds.PublishCid = res.FinalCid + //ds.DealID = res.DealID + //ds.Checkpoint = dealcheckpoints.PublishConfirmed + //if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { + //return fmt.Errorf("failed to update deal: %w", err) + //} // TODO Release funds ? How does that work ? return nil } // HandoffDeal hands off a published deal for sealing and commitment in a sector -func (p *provider) addPiece(ds *types.ProviderDealState) error { +func (p *Provider) addPiece(ds *types.ProviderDealState) error { v2r, err := carv2.OpenReader(ds.InboundCARPath) if err != nil { return fmt.Errorf("failed to open CARv2 file: %w", err) @@ -247,36 +250,38 @@ func (p *provider) addPiece(ds *types.ProviderDealState) error { return fmt.Errorf("failed to create inflator: %w", err) } - packingInfo, packingErr := p.lotusNode.OnDealComplete( - p.ctx, - *ds, - ds.ClientDealProposal.Proposal.PieceSize.Unpadded(), - paddedReader, - ) - - // Close the reader as we're done reading from it. - if err := v2r.Close(); err != nil { - return fmt.Errorf("failed to close CARv2 reader: %w", err) - } - - if packingErr != nil { - return fmt.Errorf("packing piece %s: %w", ds.ClientDealProposal.Proposal.PieceCID, packingErr) - } + _ = paddedReader - ds.SectorID = packingInfo.SectorNumber - ds.Offset = packingInfo.Offset - ds.Length = packingInfo.Size - ds.Checkpoint = dealcheckpoints.AddedPiece - if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { - return fmt.Errorf("failed to update deal: %w", err) - } + //packingInfo, packingErr := p.fullnodeApi.OnDealComplete( + //p.ctx, + //*ds, + //ds.ClientDealProposal.Proposal.PieceSize.Unpadded(), + //paddedReader, + //) - // Register the deal data as a "shard" with the DAG store. Later it can be - // fetched from the DAG store during retrieval. - if err := stores.RegisterShardSync(p.ctx, p.dagStore, ds.ClientDealProposal.Proposal.PieceCID, ds.InboundCARPath, true); err != nil { - err = fmt.Errorf("failed to activate shard: %w", err) - log.Error(err) - } + // Close the reader as we're done reading from it. + //if err := v2r.Close(); err != nil { + //return fmt.Errorf("failed to close CARv2 reader: %w", err) + //} + + //if packingErr != nil { + //return fmt.Errorf("packing piece %s: %w", ds.ClientDealProposal.Proposal.PieceCID, packingErr) + //} + + //ds.SectorID = packingInfo.SectorNumber + //ds.Offset = packingInfo.Offset + //ds.Length = packingInfo.Size + //ds.Checkpoint = dealcheckpoints.AddedPiece + //if err := p.dbApi.CreateOrUpdateDeal(ds); err != nil { + //return fmt.Errorf("failed to update deal: %w", err) + //} + + //// Register the deal data as a "shard" with the DAG store. Later it can be + //// fetched from the DAG store during retrieval. + //if err := stores.RegisterShardSync(p.ctx, p.dagStore, ds.ClientDealProposal.Proposal.PieceCID, ds.InboundCARPath, true); err != nil { + //err = fmt.Errorf("failed to activate shard: %w", err) + //log.Error(err) + //} return nil } diff --git a/storagemarket/deal_publisher.go b/storagemarket/deal_publisher.go new file mode 100644 index 000000000..a51beef63 --- /dev/null +++ b/storagemarket/deal_publisher.go @@ -0,0 +1,448 @@ +package storagemarket + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/ipfs/go-cid" + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/storage" +) + +type dealPublisherAPI interface { + ChainHead(context.Context) (*types.TipSet, error) + MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + + WalletBalance(context.Context, address.Address) (types.BigInt, error) + WalletHas(context.Context, address.Address) (bool, error) + StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) +} + +// DealPublisher batches deal publishing so that many deals can be included in +// a single publish message. This saves gas for miners that publish deals +// frequently. +// When a deal is submitted, the DealPublisher waits a configurable amount of +// time for other deals to be submitted before sending the publish message. +// There is a configurable maximum number of deals that can be included in one +// message. When the limit is reached the DealPublisher immediately submits a +// publish message with all deals in the queue. +type DealPublisher struct { + api dealPublisherAPI + as *storage.AddressSelector + + ctx context.Context + Shutdown context.CancelFunc + + maxDealsPerPublishMsg uint64 + publishPeriod time.Duration + publishSpec *api.MessageSendSpec + + lk sync.Mutex + pending []*pendingDeal + cancelWaitForMoreDeals context.CancelFunc + publishPeriodStart time.Time + startEpochSealingBuffer abi.ChainEpoch +} + +// A deal that is queued to be published +type pendingDeal struct { + ctx context.Context + deal market2.ClientDealProposal + Result chan publishResult +} + +// The result of publishing a deal +type publishResult struct { + msgCid cid.Cid + err error +} + +func newPendingDeal(ctx context.Context, deal market2.ClientDealProposal) *pendingDeal { + return &pendingDeal{ + ctx: ctx, + deal: deal, + Result: make(chan publishResult), + } +} + +type PublishMsgConfig struct { + // The amount of time to wait for more deals to arrive before + // publishing + Period time.Duration + // The maximum number of deals to include in a single PublishStorageDeals + // message + MaxDealsPerMsg uint64 + // Minimum start epoch buffer to give time for sealing of sector with deal + StartEpochSealingBuffer uint64 +} + +func NewDealPublisher( + feeConfig *config.MinerFeeConfig, + publishMsgCfg PublishMsgConfig, +) func(lc fx.Lifecycle, full api.FullNode, as *storage.AddressSelector) *DealPublisher { + return func(lc fx.Lifecycle, full api.FullNode, as *storage.AddressSelector) *DealPublisher { + maxFee := abi.NewTokenAmount(0) + if feeConfig != nil { + maxFee = abi.TokenAmount(feeConfig.MaxPublishDealsFee) + } + publishSpec := &api.MessageSendSpec{MaxFee: maxFee} + dp := newDealPublisher(full, as, publishMsgCfg, publishSpec) + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + dp.Shutdown() + return nil + }, + }) + return dp + } +} + +func newDealPublisher( + dpapi dealPublisherAPI, + as *storage.AddressSelector, + publishMsgCfg PublishMsgConfig, + publishSpec *api.MessageSendSpec, +) *DealPublisher { + ctx, cancel := context.WithCancel(context.Background()) + return &DealPublisher{ + api: dpapi, + as: as, + ctx: ctx, + Shutdown: cancel, + maxDealsPerPublishMsg: publishMsgCfg.MaxDealsPerMsg, + publishPeriod: publishMsgCfg.Period, + startEpochSealingBuffer: abi.ChainEpoch(publishMsgCfg.StartEpochSealingBuffer), + publishSpec: publishSpec, + } +} + +// PendingDeals returns the list of deals that are queued up to be published +func (p *DealPublisher) PendingDeals() api.PendingDealInfo { + p.lk.Lock() + defer p.lk.Unlock() + + // Filter out deals whose context has been cancelled + deals := make([]*pendingDeal, 0, len(p.pending)) + for _, dl := range p.pending { + if dl.ctx.Err() == nil { + deals = append(deals, dl) + } + } + + pending := make([]market2.ClientDealProposal, len(deals)) + for i, deal := range deals { + pending[i] = deal.deal + } + + return api.PendingDealInfo{ + Deals: pending, + PublishPeriodStart: p.publishPeriodStart, + PublishPeriod: p.publishPeriod, + } +} + +// ForcePublishPendingDeals publishes all pending deals without waiting for +// the publish period to elapse +func (p *DealPublisher) ForcePublishPendingDeals() { + p.lk.Lock() + defer p.lk.Unlock() + + log.Infof("force publishing deals") + p.publishAllDeals() +} + +func (p *DealPublisher) Publish(ctx context.Context, deal market2.ClientDealProposal) (cid.Cid, error) { + pdeal := newPendingDeal(ctx, deal) + + // Add the deal to the queue + p.processNewDeal(pdeal) + + // Wait for the deal to be submitted + select { + case <-ctx.Done(): + return cid.Undef, ctx.Err() + case res := <-pdeal.Result: + return res.msgCid, res.err + } +} + +func (p *DealPublisher) processNewDeal(pdeal *pendingDeal) { + p.lk.Lock() + defer p.lk.Unlock() + + // Filter out any cancelled deals + p.filterCancelledDeals() + + // If all deals have been cancelled, clear the wait-for-deals timer + if len(p.pending) == 0 && p.cancelWaitForMoreDeals != nil { + p.cancelWaitForMoreDeals() + p.cancelWaitForMoreDeals = nil + } + + // Make sure the new deal hasn't been cancelled + if pdeal.ctx.Err() != nil { + return + } + + // Add the new deal to the queue + p.pending = append(p.pending, pdeal) + log.Infof("add deal with piece CID %s to publish deals queue - %d deals in queue (max queue size %d)", + pdeal.deal.Proposal.PieceCID, len(p.pending), p.maxDealsPerPublishMsg) + + // If the maximum number of deals per message has been reached or we're not batching, send a + // publish message + if uint64(len(p.pending)) >= p.maxDealsPerPublishMsg || p.publishPeriod == 0 { + log.Infof("publish deals queue has reached max size of %d, publishing deals", p.maxDealsPerPublishMsg) + p.publishAllDeals() + return + } + + // Otherwise wait for more deals to arrive or the timeout to be reached + p.waitForMoreDeals() +} + +func (p *DealPublisher) waitForMoreDeals() { + // Check if we're already waiting for deals + if !p.publishPeriodStart.IsZero() { + elapsed := build.Clock.Since(p.publishPeriodStart) + log.Infof("%s elapsed of / %s until publish deals queue is published", + elapsed, p.publishPeriod) + return + } + + // Set a timeout to wait for more deals to arrive + log.Infof("waiting publish deals queue period of %s before publishing", p.publishPeriod) + ctx, cancel := context.WithCancel(p.ctx) + + // Create the timer _before_ taking the current time so publishPeriod+timeout is always >= + // the actual timer timeout. + timer := build.Clock.Timer(p.publishPeriod) + + p.publishPeriodStart = build.Clock.Now() + p.cancelWaitForMoreDeals = cancel + + go func() { + select { + case <-ctx.Done(): + timer.Stop() + case <-timer.C: + p.lk.Lock() + defer p.lk.Unlock() + + // The timeout has expired so publish all pending deals + log.Infof("publish deals queue period of %s has expired, publishing deals", p.publishPeriod) + p.publishAllDeals() + } + }() +} + +func (p *DealPublisher) publishAllDeals() { + // If the timeout hasn't yet been cancelled, cancel it + if p.cancelWaitForMoreDeals != nil { + p.cancelWaitForMoreDeals() + p.cancelWaitForMoreDeals = nil + p.publishPeriodStart = time.Time{} + } + + // Filter out any deals that have been cancelled + p.filterCancelledDeals() + deals := p.pending + p.pending = nil + + // Send the publish message + go p.publishReady(deals) +} + +func (p *DealPublisher) publishReady(ready []*pendingDeal) { + if len(ready) == 0 { + return + } + + // onComplete is called when the publish message has been sent or there + // was an error + onComplete := func(pd *pendingDeal, msgCid cid.Cid, err error) { + // Send the publish result on the pending deal's Result channel + res := publishResult{ + msgCid: msgCid, + err: err, + } + select { + case <-p.ctx.Done(): + case <-pd.ctx.Done(): + case pd.Result <- res: + } + } + + // Validate each deal to make sure it can be published + validated := make([]*pendingDeal, 0, len(ready)) + deals := make([]market2.ClientDealProposal, 0, len(ready)) + for _, pd := range ready { + // Validate the deal + if err := p.validateDeal(pd.deal); err != nil { + // Validation failed, complete immediately with an error + go onComplete(pd, cid.Undef, xerrors.Errorf("publish validation failed: %w", err)) + continue + } + + validated = append(validated, pd) + deals = append(deals, pd.deal) + } + + // Send the publish message + msgCid, err := p.publishDealProposals(deals) + + // Signal that each deal has been published + for _, pd := range validated { + go onComplete(pd, msgCid, err) + } +} + +// validateDeal checks that the deal proposal start epoch hasn't already +// elapsed +func (p *DealPublisher) validateDeal(deal market2.ClientDealProposal) error { + start := time.Now() + + pcid, err := deal.Proposal.Cid() + if err != nil { + return xerrors.Errorf("computing proposal cid: %w", err) + } + + head, err := p.api.ChainHead(p.ctx) + if err != nil { + return err + } + if head.Height()+p.startEpochSealingBuffer > deal.Proposal.StartEpoch { + return xerrors.Errorf( + "cannot publish deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d", + deal.Proposal.PieceCID, head.Height(), deal.Proposal.StartEpoch) + } + + mi, err := p.api.StateMinerInfo(p.ctx, deal.Proposal.Provider, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting provider info: %w", err) + } + + params, err := actors.SerializeParams(&market2.PublishStorageDealsParams{ + Deals: []market0.ClientDealProposal{deal}, + }) + if err != nil { + return xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err) + } + + addr, _, err := p.as.AddressFor(p.ctx, p.api, mi, api.DealPublishAddr, big.Zero(), big.Zero()) + if err != nil { + return xerrors.Errorf("selecting address for publishing deals: %w", err) + } + + res, err := p.api.StateCall(p.ctx, &types.Message{ + To: market.Address, + From: addr, + Value: types.NewInt(0), + Method: market.Methods.PublishStorageDeals, + Params: params, + }, head.Key()) + if err != nil { + return xerrors.Errorf("simulating deal publish message: %w", err) + } + if res.MsgRct.ExitCode != exitcode.Ok { + return xerrors.Errorf("simulating deal publish message: non-zero exitcode %s; message: %s", res.MsgRct.ExitCode, res.Error) + } + + took := time.Since(start) + log.Infow("validating deal", "took", took, "proposal", pcid) + + return nil +} + +// Sends the publish message +func (p *DealPublisher) publishDealProposals(deals []market2.ClientDealProposal) (cid.Cid, error) { + if len(deals) == 0 { + return cid.Undef, nil + } + + log.Infof("publishing %d deals in publish deals queue with piece CIDs: %s", len(deals), pieceCids(deals)) + + provider := deals[0].Proposal.Provider + for _, dl := range deals { + if dl.Proposal.Provider != provider { + msg := fmt.Sprintf("publishing %d deals failed: ", len(deals)) + + "not all deals are for same provider: " + + fmt.Sprintf("deal with piece CID %s is for provider %s ", deals[0].Proposal.PieceCID, deals[0].Proposal.Provider) + + fmt.Sprintf("but deal with piece CID %s is for provider %s", dl.Proposal.PieceCID, dl.Proposal.Provider) + return cid.Undef, xerrors.Errorf(msg) + } + } + + mi, err := p.api.StateMinerInfo(p.ctx, provider, types.EmptyTSK) + if err != nil { + return cid.Undef, err + } + + params, err := actors.SerializeParams(&market2.PublishStorageDealsParams{ + Deals: deals, + }) + + if err != nil { + return cid.Undef, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err) + } + + addr, _, err := p.as.AddressFor(p.ctx, p.api, mi, api.DealPublishAddr, big.Zero(), big.Zero()) + if err != nil { + return cid.Undef, xerrors.Errorf("selecting address for publishing deals: %w", err) + } + + smsg, err := p.api.MpoolPushMessage(p.ctx, &types.Message{ + To: market.Address, + From: addr, + Value: types.NewInt(0), + Method: market.Methods.PublishStorageDeals, + Params: params, + }, p.publishSpec) + + if err != nil { + return cid.Undef, err + } + return smsg.Cid(), nil +} + +func pieceCids(deals []market2.ClientDealProposal) string { + cids := make([]string, 0, len(deals)) + for _, dl := range deals { + cids = append(cids, dl.Proposal.PieceCID.String()) + } + return strings.Join(cids, ", ") +} + +// filter out deals that have been cancelled +func (p *DealPublisher) filterCancelledDeals() { + filtered := p.pending[:0] + for _, pd := range p.pending { + if pd.ctx.Err() != nil { + continue + } + filtered = append(filtered, pd) + } + p.pending = filtered +} diff --git a/storagemarket/lotusnode/node.go b/storagemarket/lotusnode/node.go deleted file mode 100644 index 00f847ae8..000000000 --- a/storagemarket/lotusnode/node.go +++ /dev/null @@ -1,107 +0,0 @@ -package lotusnode - -import ( - "context" - "io" - - "github.com/filecoin-project/boost/storagemarket/types" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - "github.com/ipfs/go-cid" -) - -// DealSectorPreCommittedCallback is a callback that runs when a sector is pre-committed -// sectorNumber: the number of the sector that the deal is in -// isActive: the deal is already active -type DealSectorPreCommittedCallback func(sectorNumber abi.SectorNumber, isActive bool, err error) - -// DealSectorCommittedCallback is a callback that runs when a sector is committed -type DealSectorCommittedCallback func(err error) - -// DealExpiredCallback is a callback that runs when a deal expires -type DealExpiredCallback func(err error) - -// DealSlashedCallback is a callback that runs when a deal gets slashed -type DealSlashedCallback func(slashEpoch abi.ChainEpoch, err error) - -// PackingResult returns information about how a deal was put into a sector -type PackingResult struct { - SectorNumber abi.SectorNumber - Offset abi.PaddedPieceSize - Size abi.PaddedPieceSize -} - -// PublishDealsWaitResult is the result of a call to wait for publish deals to -// appear on chain -type PublishDealsWaitResult struct { - DealID abi.DealID - FinalCid cid.Cid -} - -// StorageProviderNode are node dependencies for a StorageProvider -type StorageProviderNode interface { - // GetChainHead returns a tipset token for the current chain head - GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) - - // Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. - AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) - - // ReserveFunds reserves the given amount of funds is ensures it is available for the deal - ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) - - // ReleaseFunds releases funds reserved with ReserveFunds - ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error - - // VerifySignature verifies a given set of data was signed properly by a given address's private key - VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, plaintext []byte, tok shared.TipSetToken) (bool, error) - - // WaitForMessage waits until a message appears on chain. If it is already on chain, the callback is called immediately - WaitForMessage(ctx context.Context, mcid cid.Cid, onCompletion func(exitcode.ExitCode, []byte, cid.Cid, error) error) error - - // SignsBytes signs the given data with the given address's private key - SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) - - // DealProviderCollateralBounds returns the min and max collateral a storage provider can issue. - DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) - - // OnDealSectorPreCommitted waits for a deal's sector to be pre-committed - OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid, cb DealSectorPreCommittedCallback) error - - // OnDealSectorCommitted waits for a deal's sector to be sealed and proved, indicating the deal is active - OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal market.DealProposal, publishCid *cid.Cid, cb DealSectorCommittedCallback) error - - // OnDealExpiredOrSlashed registers callbacks to be called when the deal expires or is slashed - OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired DealExpiredCallback, onDealSlashed DealSlashedCallback) error - - // PublishDeals publishes a deal on chain, returns the message cid, but does not wait for message to appear - PublishDeals(ctx context.Context, deal types.ProviderDealState) (cid.Cid, error) - - // WaitForPublishDeals waits for a deal publish message to land on chain. - WaitForPublishDeals(ctx context.Context, mcid cid.Cid, proposal market.DealProposal) (*PublishDealsWaitResult, error) - - // OnDealComplete is called when a deal is complete and on chain, and data has been transferred and is ready to be added to a sector - OnDealComplete(ctx context.Context, deal types.ProviderDealState, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) (*PackingResult, error) - - // GetMinerWorkerAddress returns the worker address associated with a miner - GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken) (address.Address, error) - - // GetDataCap gets the current data cap for addr - GetDataCap(ctx context.Context, addr address.Address, tok shared.TipSetToken) (*verifreg.DataCap, error) - - // GetProofType gets the current seal proof type for the given miner. - GetProofType(ctx context.Context, addr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) - - // GetBalance returns locked/unlocked for a storage participant. Used by both providers and clients. - GetBalance(ctx context.Context, addr address.Address, tok shared.TipSetToken) (Balance, error) -} - -// Balance represents a current balance of funds in the StorageMarketActor. -type Balance struct { - Locked abi.TokenAmount - Available abi.TokenAmount -} diff --git a/storagemarket/ondealsectorcommitted.go b/storagemarket/ondealsectorcommitted.go new file mode 100644 index 000000000..a6259aba6 --- /dev/null +++ b/storagemarket/ondealsectorcommitted.go @@ -0,0 +1,352 @@ +package storagemarket + +import ( + "bytes" + "context" + "sync" + + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/types" +) + +type eventsCalledAPI interface { + Called(ctx context.Context, check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error +} + +type dealInfoAPI interface { + GetCurrentDealInfo(ctx context.Context, tok sealing.TipSetToken, proposal *market.DealProposal, publishCid cid.Cid) (sealing.CurrentDealInfo, error) +} + +type diffPreCommitsAPI interface { + diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) +} + +type SectorCommittedManager struct { + ev eventsCalledAPI + dealInfo dealInfoAPI + dpc diffPreCommitsAPI +} + +func NewSectorCommittedManager(ev eventsCalledAPI, tskAPI sealing.CurrentDealInfoTskAPI, dpcAPI diffPreCommitsAPI) *SectorCommittedManager { + dim := &sealing.CurrentDealInfoManager{ + CDAPI: &sealing.CurrentDealInfoAPIAdapter{CurrentDealInfoTskAPI: tskAPI}, + } + return newSectorCommittedManager(ev, dim, dpcAPI) +} + +func newSectorCommittedManager(ev eventsCalledAPI, dealInfo dealInfoAPI, dpcAPI diffPreCommitsAPI) *SectorCommittedManager { + return &SectorCommittedManager{ + ev: ev, + dealInfo: dealInfo, + dpc: dpcAPI, + } +} + +func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, proposal market.DealProposal, publishCid cid.Cid, callback storagemarket.DealSectorPreCommittedCallback) error { + // Ensure callback is only called once + var once sync.Once + cb := func(sectorNumber abi.SectorNumber, isActive bool, err error) { + once.Do(func() { + callback(sectorNumber, isActive, err) + }) + } + + // First check if the deal is already active, and if so, bail out + checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { + dealInfo, isActive, err := mgr.checkIfDealAlreadyActive(ctx, ts, &proposal, publishCid) + if err != nil { + // Note: the error returned from here will end up being returned + // from OnDealSectorPreCommitted so no need to call the callback + // with the error + return false, false, err + } + + if isActive { + // Deal is already active, bail out + cb(0, true, nil) + return true, false, nil + } + + // Check that precommits which landed between when the deal was published + // and now don't already contain the deal we care about. + // (this can happen when the precommit lands vary quickly (in tests), or + // when the client node was down after the deal was published, and when + // the precommit containing it landed on chain) + + publishTs, err := types.TipSetKeyFromBytes(dealInfo.PublishMsgTipSet) + if err != nil { + return false, false, err + } + + diff, err := mgr.dpc.diffPreCommits(ctx, provider, publishTs, ts.Key()) + if err != nil { + return false, false, err + } + + for _, info := range diff.Added { + for _, d := range info.Info.DealIDs { + if d == dealInfo.DealID { + cb(info.Info.SectorNumber, false, nil) + return true, false, nil + } + } + } + + // Not yet active, start matching against incoming messages + return false, true, nil + } + + // Watch for a pre-commit message to the provider. + matchEvent := func(msg *types.Message) (bool, error) { + matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch) + return matched, nil + } + + // The deal must be accepted by the deal proposal start epoch, so timeout + // if the chain reaches that epoch + timeoutEpoch := proposal.StartEpoch + 1 + + // Check if the message params included the deal ID we're looking for. + called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { + defer func() { + if err != nil { + cb(0, false, xerrors.Errorf("handling applied event: %w", err)) + } + }() + + // If the deal hasn't been activated by the proposed start epoch, the + // deal will timeout (when msg == nil it means the timeout epoch was reached) + if msg == nil { + err = xerrors.Errorf("deal with piece CID %s was not activated by proposed deal start epoch %d", proposal.PieceCID, proposal.StartEpoch) + return false, err + } + + // Ignore the pre-commit message if it was not executed successfully + if rec.ExitCode != 0 { + return true, nil + } + + // When there is a reorg, the deal ID may change, so get the + // current deal ID from the publish message CID + res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), &proposal, publishCid) + if err != nil { + return false, err + } + + // Extract the message parameters + sn, err := dealSectorInPreCommitMsg(msg, res) + if err != nil { + return false, err + } + + if sn != nil { + cb(*sn, false, nil) + } + + // Didn't find the deal ID in this message, so keep looking + return true, nil + } + + revert := func(ctx context.Context, ts *types.TipSet) error { + log.Warn("deal pre-commit reverted; TODO: actually handle this!") + // TODO: Just go back to DealSealing? + return nil + } + + if err := mgr.ev.Called(ctx, checkFunc, called, revert, int(build.MessageConfidence+1), timeoutEpoch, matchEvent); err != nil { + return xerrors.Errorf("failed to set up called handler: %w", err) + } + + return nil +} + +func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, provider address.Address, sectorNumber abi.SectorNumber, proposal market.DealProposal, publishCid cid.Cid, callback storagemarket.DealSectorCommittedCallback) error { + // Ensure callback is only called once + var once sync.Once + cb := func(err error) { + once.Do(func() { + callback(err) + }) + } + + // First check if the deal is already active, and if so, bail out + checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { + _, isActive, err := mgr.checkIfDealAlreadyActive(ctx, ts, &proposal, publishCid) + if err != nil { + // Note: the error returned from here will end up being returned + // from OnDealSectorCommitted so no need to call the callback + // with the error + return false, false, err + } + + if isActive { + // Deal is already active, bail out + cb(nil) + return true, false, nil + } + + // Not yet active, start matching against incoming messages + return false, true, nil + } + + // Match a prove-commit sent to the provider with the given sector number + matchEvent := func(msg *types.Message) (matched bool, err error) { + if msg.To != provider { + return false, nil + } + + return sectorInCommitMsg(msg, sectorNumber) + } + + // The deal must be accepted by the deal proposal start epoch, so timeout + // if the chain reaches that epoch + timeoutEpoch := proposal.StartEpoch + 1 + + called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { + defer func() { + if err != nil { + cb(xerrors.Errorf("handling applied event: %w", err)) + } + }() + + // If the deal hasn't been activated by the proposed start epoch, the + // deal will timeout (when msg == nil it means the timeout epoch was reached) + if msg == nil { + err := xerrors.Errorf("deal with piece CID %s was not activated by proposed deal start epoch %d", proposal.PieceCID, proposal.StartEpoch) + return false, err + } + + // Ignore the prove-commit message if it was not executed successfully + if rec.ExitCode != 0 { + return true, nil + } + + // Get the deal info + res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), &proposal, publishCid) + if err != nil { + return false, xerrors.Errorf("failed to look up deal on chain: %w", err) + } + + // Make sure the deal is active + if res.MarketDeal.State.SectorStartEpoch < 1 { + return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", res.DealID, ts.ParentState(), ts.Height()) + } + + log.Infof("Storage deal %d activated at epoch %d", res.DealID, res.MarketDeal.State.SectorStartEpoch) + + cb(nil) + + return false, nil + } + + revert := func(ctx context.Context, ts *types.TipSet) error { + log.Warn("deal activation reverted; TODO: actually handle this!") + // TODO: Just go back to DealSealing? + return nil + } + + if err := mgr.ev.Called(ctx, checkFunc, called, revert, int(build.MessageConfidence+1), timeoutEpoch, matchEvent); err != nil { + return xerrors.Errorf("failed to set up called handler: %w", err) + } + + return nil +} + +// dealSectorInPreCommitMsg tries to find a sector containing the specified deal +func dealSectorInPreCommitMsg(msg *types.Message, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) { + switch msg.Method { + case miner.Methods.PreCommitSector: + var params miner.SectorPreCommitInfo + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("unmarshal pre commit: %w", err) + } + + // Check through the deal IDs associated with this message + for _, did := range params.DealIDs { + if did == res.DealID { + // Found the deal ID in this message. Callback with the sector ID. + return ¶ms.SectorNumber, nil + } + } + case miner.Methods.PreCommitSectorBatch: + var params miner5.PreCommitSectorBatchParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("unmarshal pre commit: %w", err) + } + + for _, precommit := range params.Sectors { + // Check through the deal IDs associated with this message + for _, did := range precommit.DealIDs { + if did == res.DealID { + // Found the deal ID in this message. Callback with the sector ID. + return &precommit.SectorNumber, nil + } + } + } + default: + return nil, xerrors.Errorf("unexpected method %d", msg.Method) + } + + return nil, nil +} + +// sectorInCommitMsg checks if the provided message commits specified sector +func sectorInCommitMsg(msg *types.Message, sectorNumber abi.SectorNumber) (bool, error) { + switch msg.Method { + case miner.Methods.ProveCommitSector: + var params miner.ProveCommitSectorParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + } + + return params.SectorNumber == sectorNumber, nil + + case miner.Methods.ProveCommitAggregate: + var params miner5.ProveCommitAggregateParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + } + + set, err := params.SectorNumbers.IsSet(uint64(sectorNumber)) + if err != nil { + return false, xerrors.Errorf("checking if sectorNumber is set in commit aggregate message: %w", err) + } + + return set, nil + + default: + return false, nil + } +} + +func (mgr *SectorCommittedManager) checkIfDealAlreadyActive(ctx context.Context, ts *types.TipSet, proposal *market.DealProposal, publishCid cid.Cid) (sealing.CurrentDealInfo, bool, error) { + res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), proposal, publishCid) + if err != nil { + // TODO: This may be fine for some errors + return res, false, xerrors.Errorf("failed to look up deal on chain: %w", err) + } + + // Sector was slashed + if res.MarketDeal.State.SlashEpoch > 0 { + return res, false, xerrors.Errorf("deal %d was slashed at epoch %d", res.DealID, res.MarketDeal.State.SlashEpoch) + } + + // Sector with deal is already active + if res.MarketDeal.State.SectorStartEpoch > 0 { + return res, true, nil + } + + return res, false, nil +} diff --git a/storagemarket/provider.go b/storagemarket/provider.go index eeed27a7a..67191c23a 100644 --- a/storagemarket/provider.go +++ b/storagemarket/provider.go @@ -9,7 +9,7 @@ import ( "time" "github.com/filecoin-project/boost/filestore" - "github.com/filecoin-project/boost/stores" + "github.com/filecoin-project/lotus/api/v1api" "github.com/libp2p/go-eventbus" @@ -18,23 +18,17 @@ import ( "github.com/filecoin-project/boost/storagemarket/types/dealcheckpoints" "github.com/filecoin-project/boost/storagemarket/datatransfer" - "github.com/filecoin-project/boost/storagemarket/lotusnode" "github.com/filecoin-project/go-address" - logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/boost/storagemarket/types" - - "github.com/filecoin-project/boost/storagemarket/datastore" ) -var log = logging.Logger("provider") - type Config struct { MaxTransferDuration time.Duration } -type provider struct { +type Provider struct { config Config // Address of the provider on chain. Address address.Address @@ -53,31 +47,35 @@ type provider struct { restartDealsChan chan restartReq // Database API - dbApi datastore.API + //dbApi datastore.API - // interacts with lotus - lotusNode lotusnode.StorageProviderNode - dagStore stores.DAGStoreWrapper + fullnodeApi v1api.FullNode + //dagStore stores.DAGStoreWrapper transport datatransfer.Transport + + adapter *Adapter } -func NewProvider(dbApi datastore.API, lotusNode lotusnode.StorageProviderNode) (*provider, error) { +//func NewProvider(dbApi datastore.API, lotusNode lotusnode.StorageProviderNode) (*provider, error) { +func NewProvider(fullnodeApi v1api.FullNode) (*Provider, error) { ctx, cancel := context.WithCancel(context.Background()) - return &provider{ - ctx: ctx, - cancel: cancel, - lotusNode: lotusNode, - dbApi: dbApi, + return &Provider{ + ctx: ctx, + cancel: cancel, + fullnodeApi: fullnodeApi, + //dbApi: dbApi, + + adapter: &Adapter{}, // TODO: instantiate properly }, nil } -func (p *provider) GetAsk() *types.StorageAsk { +func (p *Provider) GetAsk() *types.StorageAsk { return nil } -func (p *provider) ExecuteDeal(dp *types.ClientDealParams) (dh *DealHandler, pi *types.ProviderDealRejectionInfo, err error) { +func (p *Provider) ExecuteDeal(dp *types.ClientDealParams) (dh *DealHandler, pi *types.ProviderDealRejectionInfo, err error) { if _, err := url.Parse(dp.TransferURL); err != nil { return nil, nil, fmt.Errorf("transfer url is invalid: %w", err) } @@ -101,7 +99,7 @@ func (p *provider) ExecuteDeal(dp *types.ClientDealParams) (dh *DealHandler, pi // create a temp file where we will hold the deal data. tmp, err := p.fs.CreateTemp() if err != nil { - return nil, nil, fmt.Errorf("failed to create temp file: %w, err") + return nil, nil, fmt.Errorf("failed to create temp file: %w", err) } if err := tmp.Close(); err != nil { _ = os.Remove(string(tmp.OsPath())) @@ -166,7 +164,7 @@ func createPubSub(bus event.Bus) (event.Emitter, event.Subscription, error) { return emitter, sub, nil } -func (p *provider) Start() []*DealHandler { +func (p *Provider) Start() []*DealHandler { // restart all existing deals // execute db query to get all non-terminated deals here var deals []*types.ProviderDealState @@ -205,7 +203,7 @@ func (p *provider) Start() []*DealHandler { return dhs } -func (p *provider) Close() error { +func (p *Provider) Close() error { p.closeSync.Do(func() { p.cancel() p.wg.Wait() @@ -237,7 +235,7 @@ type restartReq struct { // TODO: This is transient -> If it dosen't work out, we will use locks. // 1:N will move this problem elsewhere. -func (p *provider) loop() { +func (p *Provider) loop() { defer p.wg.Done() for {