diff --git a/.circleci/config.yml b/.circleci/config.yml index 6ca0353c5ae..7b67d4a7f92 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -657,6 +657,18 @@ workflows: - build suite: itest-decode_params target: "./itests/decode_params_test.go" + - test: + name: test-itest-direct_data_onboard + requires: + - build + suite: itest-direct_data_onboard + target: "./itests/direct_data_onboard_test.go" + - test: + name: test-itest-direct_data_onboard_verified + requires: + - build + suite: itest-direct_data_onboard_verified + target: "./itests/direct_data_onboard_verified_test.go" - test: name: test-itest-dup_mpool_messages requires: diff --git a/CHANGELOG.md b/CHANGELOG.md index 21943bc6ae1..eca451ecdbd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,74 @@ # UNRELEASED +## New features + +## Improvements + +# v1.26.0-rc2 / 2024-03-0y + +This is a release candidate of the upcoming MANDATORY Lotus v1.26.0 release, which will deliver the Filecoin network version 22, codenamed Dragon 🐉. + +**This release candidate sets the calibration network to upgrade at epoch 1427974, which is 2024-03-11T14:00:00Z** +This release does NOT set the mainnet upgrade epoch yet, in which will be updated in the final release. +The Filecoin network version 22 delivers the following FIPs: + +- [FIP-0063: Switching to new Drand mainnet network](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0063.md) +- [FIP-0074: Remove cron-based automatic deal settlement](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0074.md) +- [FIP-0076: Direct data onboarding](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md) +- [FIP-0083: Add built-in Actor events in the Verified Registry, Miner and Market Actors](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0083.md) + +## v13 Builtin Actor Bundle + +The actor bundles for the **calibration network** can be checked as follows: + +``` +lotus state actor-cids --network-version=22 +Network Version: 22 +Actor Version: 13 +Manifest CID: bafy2bzacea4firkyvt2zzdwqjrws5pyeluaesh6uaid246tommayr4337xpmi + +Actor CID +account bafk2bzaceb3j36ri5y5mfklgp5emlvrms6g4733ss2j3l7jismrxq6ng3tcc6 +cron bafk2bzaceaz6rocamdxehgpwcbku6wlapwpgzyyvkrploj66mlqptsulf52bs +datacap bafk2bzacea22nv5g3yngpxvonqfj4r2nkfk64y6yw2malicm7odk77x7zuads +eam bafk2bzaceatqtjzj7623i426noaslouvluhz6e3md3vvquqzku5qj3532uaxg +ethaccount bafk2bzacean3hs7ga5csw6g3uu7watxfnqv5uvxviebn3ba6vg4sagwdur5pu +evm bafk2bzacec5ibmbtzuzjgwjmksm2n6zfq3gkicxqywwu7tsscqgdzajpfctxk +init bafk2bzaced5sq72oemz6qwi6yssxwlos2g54zfprslrx5qfhhx2vlgsbvdpcs +multisig bafk2bzacedbgei6jkx36fwdgvoohce4aghvpohqdhoco7p4thszgssms7olv2 +paymentchannel bafk2bzaceasmgmfsi4mjanxlowsub65fmevhzky4toeqbtw4kp6tmu4kxjpgq +placeholder bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro +reward bafk2bzacedjyp6ll5ez27dfgldjj4tntxfvyp4pa5zkk7s5uhipzqjyx2gmuc +storagemarket bafk2bzaceabolct6qdnefwcrtati2us3sxtxfghyqk6aamfhl6byyefmtssqi +storageminer bafk2bzaceckzw3v7wqliyggvjvihz4wywchnnsie4frfvkm3fm5znb64mofri +storagepower bafk2bzacea7t4wynzjajl442mpdqbnh3wusjusqtnzgpvefvweh4n2tgzgqhu +system bafk2bzacedjnrb5glewazsxpcx6rwiuhl4kwrfcqolyprn6rrjtlzmthlhdq6 +verifiedregistry bafk2bzacednskl3bykz5qpo54z2j2p4q44t5of4ktd6vs6ymmg2zebsbxazkm +``` + +## Migration + +We are expecting a bit heavier than normal state migration for this upgrade due to the amount of state changes introduced with Direct Data Onboarding. + +All node operators, including storage providers, should be aware that ONE pre-migration is being scheduled 120 epochs before the upgrade. It will take around 10-20 minutes for the pre-migration and less than 30 seconds for the final migration, depending on the amount of historical state in the node blockstore and the hardware specs the node is running on. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries + +We recommend node operators (who haven't enabled splitstore discard mode) that do not care about historical chain states, to prune the chain blockstore by syncing from a snapshot 1-2 days before the upgrade. + +You can test out the migration by running running the [`benchmarking a network migration` tutorial.](https://lotus.filecoin.io/kb/test-migration/) + +For certain node operators, such as full archival nodes or systems that need to keep large amounts of state (RPC providers), completing the pre-migration in time before the network upgrade might not be achievable. For those node operators, it is recommended to skip the pre-migration and run the non-cached migration (i.e., just running the migration at the exact upgrade epoch), and schedule for some downtime during the upgrade epoch. Operators of such nodes can read the [`How to disable premigration in network upgrade` tutorial.](https://lotus.filecoin.io/kb/disable-premigration/) + +## New features +- feat: api: new verified registry methods to get all allocations and claims (#11631) ([filecoin-project/lotus#11631](https://github.com/filecoin-project/lotus/pull/11631)) +- new: add forest bootstrap nodes (#11636) ([filecoin-project/lotus#11636](https://github.com/filecoin-project/lotus/pull/11636)) +- feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) ([filecoin-project/lotus#11226](https://github.com/filecoin-project/lotus/pull/11226)) +- feat: implement FIP-0063 ([filecoin-project/lotus#11572](https://github.com/filecoin-project/lotus/pull/11572)) +- feat: events: Add Lotus APIs to consume smart contract and built-in actor events ([filecoin-project/lotus#11618](https://github.com/filecoin-project/lotus/pull/11618)) + ## Improvements -### Tracing API +## Tracing API Replace the `CodeCid` field in the message trace (added in 1.23.4) with an `InvokedActor` field. @@ -67,6 +132,71 @@ Additionally, Filecoin is not Ethereum no matter how much we try to provide API/ [handlefilecoinmethod]: https://fips.filecoin.io/FIPS/fip-0054.html#handlefilecoinmethod-general-handler-for-method-numbers--1024 +### GetActorEvents and SubscribeActorEvents + +[FIP-0049](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0049.md) introduced _Actor Events_ that can be emitted by user programmed actors. [FIP-0083](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0083.md) introduces new events emitted by the builtin Verified Registry, Miner and Market Actors. These new events for builtin actors are being activated with network version 22 to coincide with _Direct Data Onboarding_ as defined in [FIP-0076](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md) which introduces additional flexibility for data onboarding. Sector, Deal and DataCap lifecycles can be tracked with these events, providing visibility and options for programmatic responses to changes in state. + +Actor events are available on message receipts, but can now be retrieved from a node using the new `GetActorEvents` and `SubscribeActorEvents` methods. These methods allow for querying and subscribing to actor events, respectively. They depend on the Lotus node both collecting events (with `Fevm.Events.RealTimeFilterAPI` and `Fevm.Events.HistoricFilterAPI`) and being enabled with the new configuration option `Events.EnableActorEventsAPI`. Note that a Lotus node can only respond to requests for historic events that it retains in its event store. + +Both `GetActorEvents` and `SubscribeActorEvents` take a filter parameter which can optionally filter events on: + +* `Addresses` of the actor(s) emitting the event +* Specific `Fields` within the event +* `FromHeight` and `ToHeight` to filter events by block height +* `TipSetKey` to restrict events contained within a specific tipset + +`GetActorEvents` provides a one-time query for actor events, while `SubscribeActorEvents` provides a long-lived connection (via websockets) to the Lotus node, allowing for real-time updates on actor events. The subscription can be cancelled by the client at any time. + +### GetAllClaims and GetAllAlocations +Additionally the methods `GetAllAllocations` and `GetAllClaims` has been added to the Lotus API. These methods lists all the available allocations and claims available in the actor state. + +### Lotus CLI + +The `filplus` commands used for listing allocations and claims have been updated. If no argument is provided to the either command, they will list out all the allocations and claims in the verified registry actor. +The output list columns have been modified to `AllocationID` and `ClaimID` instead of ID. + +```shell +lotus filplus list-allocations --help +NAME: + lotus filplus list-allocations - List allocations available in verified registry actor or made by a client if specified + +USAGE: + lotus filplus list-allocations [command options] clientAddress + +OPTIONS: + --expired list only expired allocations (default: false) + --json output results in json format (default: false) + --help, -h show help + + +lotus filplus list-claims --help +NAME: + lotus filplus list-claims - List claims available in verified registry actor or made by provider if specified + +USAGE: + lotus filplus list-claims [command options] providerAddress + +OPTIONS: + --expired list only expired claims (default: false) + --help, -h show help +``` + +## Dependencies +- github.com/filecoin-project/go-state-types (v0.12.8 -> v0.13.0-rc.2) +- chore: deps: update to go-state-types v13.0.0-rc.1 ([filecoin-project/lotus#11662](https://github.com/filecoin-project/lotus/pull/11662)) +- chore: deps: update to go-state-types v13.0.0-rc.2 ([filecoin-project/lotus#11675](https://github.com/filecoin-project/lotus/pull/11675)) +- chore: deps: update to go-multiaddr v0.12.2 (#11602) ([filecoin-project/lotus#11602](https://github.com/filecoin-project/lotus/pull/11602)) +- feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) ([filecoin-project/lotus#11612](https://github.com/filecoin-project/lotus/pull/11612)) + +## Others +- Remove PL operated bootstrap nodes from mainnet.pi ([filecoin-project/lotus#11491](https://github.com/filecoin-project/lotus/pull/11491)) +- Update epoch heights (#11637) ([filecoin-project/lotus#11637](https://github.com/filecoin-project/lotus/pull/11637)) +- chore: Set upgrade heights and change codename ([filecoin-project/lotus#11599](https://github.com/filecoin-project/lotus/pull/11599)) +- chore:: backport #11609 to the feat/nv22 branch (#11644) ([filecoin-project/lotus#11644](https://github.com/filecoin-project/lotus/pull/11644)) +- fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) ([filecoin-project/lotus#11648](https://github.com/filecoin-project/lotus/pull/11648)) +- feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet ([filecoin-project/lotus#11667]https://github.com/filecoin-project/lotus/pull/11667) +- chore: backport #11632 to release/v1.26.0 ([filecoin-project/lotus#11667](https://github.com/filecoin-project/lotus/pull/11667)) + # v1.25.2 / 2024-01-11 This is an optional but **highly recommended feature release** of Lotus, as it includes fixes for synchronizations issues that users have experienced. The feature release also introduces `Lotus-Provider` in its alpha testing phase, as well as the ability to call external PC2-binaries during the sealing process. diff --git a/api/api_full.go b/api/api_full.go index a9adb1c4bd6..e61bb0dedef 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -20,7 +20,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin/v8/paych" - "github.com/filecoin-project/go-state-types/builtin/v9/market" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" @@ -28,8 +27,10 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -552,14 +553,20 @@ type FullNode interface { // StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if // pending allocation is not found. StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read + // StateGetAllocationIdForPendingDeal is like StateGetAllocationForPendingDeal except it returns the allocation ID + StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) //perm:read // StateGetAllocation returns the allocation for a given address and allocation ID. StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read // StateGetAllocations returns the all the allocations for a given client. StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read + // StateGetAllAllocations returns the all the allocations available in verified registry actor. + StateGetAllAllocations(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read // StateGetClaim returns the claim for a given address and claim ID. StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read // StateGetClaims returns the all the claims for a given provider. StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read + // StateGetAllClaims returns the all the claims available in verified registry actor. + StateGetAllClaims(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read // StateComputeDataCID computes DataCID from a set of on-chain deals StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) //perm:read // StateLookupID retrieves the ID address of the given address @@ -896,6 +903,33 @@ type FullNode interface { // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that // the path specified when calling CreateBackup is within the base path CreateBackup(ctx context.Context, fpath string) error //perm:admin + + // Actor events + + // GetActorEvents returns all user-programmed and built-in actor events that match the given + // filter. + // This is a request/response API. + // Results available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange + // configuration options and also the amount of historical data available in the node. + // + // This is an EXPERIMENTAL API and may be subject to change. + GetActorEvents(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) //perm:read + + // SubscribeActorEvents returns a long-lived stream of all user-programmed and built-in actor + // events that match the given filter. + // Events that match the given filter are written to the stream in real-time as they are emitted + // from the FVM. + // The response stream is closed when the client disconnects, when a ToHeight is specified and is + // reached, or if there is an error while writing an event to the stream. + // This API also allows clients to read all historical events matching the given filter before any + // real-time events are written to the response stream if the filter specifies an earlier + // FromHeight. + // Results available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange + // configuration options and also the amount of historical data available in the node. + // + // Note: this API is only available via websocket connections. + // This is an EXPERIMENTAL API and may be subject to change. + SubscribeActorEvents(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) //perm:read } // reverse interface to the client, called after EthSubscribe @@ -1131,9 +1165,47 @@ type MarketBalance struct { Locked big.Int } +type MarketDealState struct { + SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated + SlashEpoch abi.ChainEpoch // -1 if deal never slashed +} + +func MakeDealState(mds market.DealState) MarketDealState { + return MarketDealState{ + SectorStartEpoch: mds.SectorStartEpoch(), + LastUpdatedEpoch: mds.LastUpdatedEpoch(), + SlashEpoch: mds.SlashEpoch(), + } +} + +type mstate struct { + s MarketDealState +} + +func (m mstate) SectorStartEpoch() abi.ChainEpoch { + return m.s.SectorStartEpoch +} + +func (m mstate) LastUpdatedEpoch() abi.ChainEpoch { + return m.s.LastUpdatedEpoch +} + +func (m mstate) SlashEpoch() abi.ChainEpoch { + return m.s.SlashEpoch +} + +func (m mstate) Equals(o market.DealState) bool { + return market.DealStatesEqual(m, o) +} + +func (m MarketDealState) Iface() market.DealState { + return mstate{m} +} + type MarketDeal struct { Proposal market.DealProposal - State market.DealState + State MarketDealState } type RetrievalOrder struct { diff --git a/api/api_gateway.go b/api/api_gateway.go index 238bf43abfe..e71a8b712fc 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -129,4 +129,7 @@ type Gateway interface { Web3ClientVersion(ctx context.Context) (string, error) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) + + GetActorEvents(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) + SubscribeActorEvents(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) } diff --git a/api/api_storage.go b/api/api_storage.go index d5b3d5c1d67..b24ee2af39e 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -24,6 +24,7 @@ import ( builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/storiface" @@ -75,7 +76,7 @@ type StorageMiner interface { // Add piece to an open sector. If no sectors with enough space are open, // either a new sector will be created, or this call will block until more // sectors can be created. - SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d piece.PieceDealInfo) (SectorOffset, error) //perm:admin SectorsUnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin @@ -353,10 +354,21 @@ type SectorLog struct { } type SectorPiece struct { - Piece abi.PieceInfo - DealInfo *PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) + Piece abi.PieceInfo + + // DealInfo is nil for pieces which do not appear in deals (e.g. filler pieces) + // NOTE: DDO pieces which aren't associated with a market deal and have no + // verified allocation will still have a non-nil DealInfo. + // nil DealInfo indicates that the piece is a filler, and has zero piece commitment. + DealInfo *piece.PieceDealInfo } +// DEPRECATED: Use piece.PieceDealInfo instead +type PieceDealInfo = piece.PieceDealInfo + +// DEPRECATED: Use piece.DealSchedule instead +type DealSchedule = piece.DealSchedule + type SectorInfo struct { SectorID abi.SectorNumber State SectorState @@ -459,28 +471,6 @@ type SectorOffset struct { Offset abi.PaddedPieceSize } -// DealInfo is a tuple of deal identity and its schedule -type PieceDealInfo struct { - // "Old" builtin-market deal info - PublishCid *cid.Cid - DealID abi.DealID - DealProposal *market.DealProposal - - // Common deal info - DealSchedule DealSchedule - - // Best-effort deal asks - KeepUnsealed bool -} - -// DealSchedule communicates the time interval of a storage deal. The deal must -// appear in a sealed (proven) sector no later than StartEpoch, otherwise it -// is invalid. -type DealSchedule struct { - StartEpoch abi.ChainEpoch - EndEpoch abi.ChainEpoch -} - // DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that // we expose through JSON-RPC to avoid clients having to depend on the // dagstore lib. diff --git a/api/cbor_gen.go b/api/cbor_gen.go index fd2cb30b496..7a3f97e5980 100644 --- a/api/cbor_gen.go +++ b/api/cbor_gen.go @@ -14,7 +14,8 @@ import ( abi "github.com/filecoin-project/go-state-types/abi" paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" - market "github.com/filecoin-project/go-state-types/builtin/v9/market" + + piece "github.com/filecoin-project/lotus/storage/pipeline/piece" ) var _ = xerrors.Errorf @@ -35,7 +36,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { } // t.Channel (address.Address) (struct) - if len("Channel") > cbg.MaxLength { + if len("Channel") > 8192 { return xerrors.Errorf("Value in field \"Channel\" was too long") } @@ -51,7 +52,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { } // t.Vouchers ([]*paych.SignedVoucher) (slice) - if len("Vouchers") > cbg.MaxLength { + if len("Vouchers") > 8192 { return xerrors.Errorf("Value in field \"Vouchers\" was too long") } @@ -62,7 +63,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Vouchers) > cbg.MaxLength { + if len(t.Vouchers) > 8192 { return xerrors.Errorf("Slice value in field t.Vouchers was too long") } @@ -73,10 +74,11 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.WaitSentinel (cid.Cid) (struct) - if len("WaitSentinel") > cbg.MaxLength { + if len("WaitSentinel") > 8192 { return xerrors.Errorf("Value in field \"WaitSentinel\" was too long") } @@ -123,7 +125,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -150,7 +152,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Vouchers: array too large (%d)", extra) } @@ -188,9 +190,9 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.WaitSentinel (cid.Cid) (struct) case "WaitSentinel": @@ -226,7 +228,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { } // t.Size (abi.UnpaddedPieceSize) (uint64) - if len("Size") > cbg.MaxLength { + if len("Size") > 8192 { return xerrors.Errorf("Value in field \"Size\" was too long") } @@ -242,7 +244,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { } // t.Offset (abi.PaddedPieceSize) (uint64) - if len("Offset") > cbg.MaxLength { + if len("Offset") > 8192 { return xerrors.Errorf("Value in field \"Offset\" was too long") } @@ -258,7 +260,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { } // t.SectorID (abi.SectorNumber) (uint64) - if len("SectorID") > cbg.MaxLength { + if len("SectorID") > 8192 { return xerrors.Errorf("Value in field \"SectorID\" was too long") } @@ -305,7 +307,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -381,7 +383,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error { } // t.Refs ([]api.SealedRef) (slice) - if len("Refs") > cbg.MaxLength { + if len("Refs") > 8192 { return xerrors.Errorf("Value in field \"Refs\" was too long") } @@ -392,7 +394,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error { return err } - if len(t.Refs) > cbg.MaxLength { + if len(t.Refs) > 8192 { return xerrors.Errorf("Slice value in field t.Refs was too long") } @@ -403,6 +405,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -436,7 +439,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -453,7 +456,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Refs: array too large (%d)", extra) } @@ -481,6 +484,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) { } } + } } @@ -505,7 +509,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { } // t.Epoch (abi.ChainEpoch) (int64) - if len("Epoch") > cbg.MaxLength { + if len("Epoch") > 8192 { return xerrors.Errorf("Value in field \"Epoch\" was too long") } @@ -527,7 +531,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { } // t.Value (abi.SealRandomness) (slice) - if len("Value") > cbg.MaxLength { + if len("Value") > 8192 { return xerrors.Errorf("Value in field \"Value\" was too long") } @@ -538,7 +542,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { return err } - if len(t.Value) > cbg.ByteArrayMaxLen { + if len(t.Value) > 2097152 { return xerrors.Errorf("Byte array in field t.Value was too long") } @@ -546,9 +550,10 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Value[:]); err != nil { + if _, err := cw.Write(t.Value); err != nil { return err } + return nil } @@ -581,7 +586,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -594,10 +599,10 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { case "Epoch": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -624,7 +629,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Value: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -635,7 +640,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { t.Value = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Value[:]); err != nil { + if _, err := io.ReadFull(cr, t.Value); err != nil { return err } @@ -660,7 +665,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { } // t.Epoch (abi.ChainEpoch) (int64) - if len("Epoch") > cbg.MaxLength { + if len("Epoch") > 8192 { return xerrors.Errorf("Value in field \"Epoch\" was too long") } @@ -682,7 +687,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { } // t.Value (abi.InteractiveSealRandomness) (slice) - if len("Value") > cbg.MaxLength { + if len("Value") > 8192 { return xerrors.Errorf("Value in field \"Value\" was too long") } @@ -693,7 +698,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { return err } - if len(t.Value) > cbg.ByteArrayMaxLen { + if len(t.Value) > 2097152 { return xerrors.Errorf("Byte array in field t.Value was too long") } @@ -701,9 +706,10 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Value[:]); err != nil { + if _, err := cw.Write(t.Value); err != nil { return err } + return nil } @@ -736,7 +742,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -749,10 +755,10 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { case "Epoch": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -779,7 +785,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Value: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -790,7 +796,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { t.Value = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Value[:]); err != nil { + if _, err := io.ReadFull(cr, t.Value); err != nil { return err } @@ -802,239 +808,6 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { return nil } -func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write([]byte{165}); err != nil { - return err - } - - // t.DealID (abi.DealID) (uint64) - if len("DealID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealID\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { - return err - } - if _, err := cw.WriteString(string("DealID")); err != nil { - return err - } - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { - return err - } - - // t.PublishCid (cid.Cid) (struct) - if len("PublishCid") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"PublishCid\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { - return err - } - if _, err := cw.WriteString(string("PublishCid")); err != nil { - return err - } - - if t.PublishCid == nil { - if _, err := cw.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { - return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) - } - } - - // t.DealProposal (market.DealProposal) (struct) - if len("DealProposal") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealProposal\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { - return err - } - if _, err := cw.WriteString(string("DealProposal")); err != nil { - return err - } - - if err := t.DealProposal.MarshalCBOR(cw); err != nil { - return err - } - - // t.DealSchedule (api.DealSchedule) (struct) - if len("DealSchedule") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealSchedule\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { - return err - } - if _, err := cw.WriteString(string("DealSchedule")); err != nil { - return err - } - - if err := t.DealSchedule.MarshalCBOR(cw); err != nil { - return err - } - - // t.KeepUnsealed (bool) (bool) - if len("KeepUnsealed") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { - return err - } - if _, err := cw.WriteString(string("KeepUnsealed")); err != nil { - return err - } - - if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { - return err - } - return nil -} - -func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) { - *t = PieceDealInfo{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadString(cr) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.DealID (abi.DealID) (uint64) - case "DealID": - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = abi.DealID(extra) - - } - // t.PublishCid (cid.Cid) (struct) - case "PublishCid": - - { - - b, err := cr.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := cr.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) - } - - t.PublishCid = &c - } - - } - // t.DealProposal (market.DealProposal) (struct) - case "DealProposal": - - { - - b, err := cr.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := cr.UnreadByte(); err != nil { - return err - } - t.DealProposal = new(market.DealProposal) - if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) - } - } - - } - // t.DealSchedule (api.DealSchedule) (struct) - case "DealSchedule": - - { - - if err := t.DealSchedule.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) - } - - } - // t.KeepUnsealed (bool) (bool) - case "KeepUnsealed": - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.KeepUnsealed = false - case 21: - t.KeepUnsealed = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} func (t *SectorPiece) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) @@ -1048,7 +821,7 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error { } // t.Piece (abi.PieceInfo) (struct) - if len("Piece") > cbg.MaxLength { + if len("Piece") > 8192 { return xerrors.Errorf("Value in field \"Piece\" was too long") } @@ -1063,8 +836,8 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error { return err } - // t.DealInfo (api.PieceDealInfo) (struct) - if len("DealInfo") > cbg.MaxLength { + // t.DealInfo (piece.PieceDealInfo) (struct) + if len("DealInfo") > 8192 { return xerrors.Errorf("Value in field \"DealInfo\" was too long") } @@ -1110,7 +883,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1129,7 +902,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { } } - // t.DealInfo (api.PieceDealInfo) (struct) + // t.DealInfo (piece.PieceDealInfo) (struct) case "DealInfo": { @@ -1142,7 +915,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { if err := cr.UnreadByte(); err != nil { return err } - t.DealInfo = new(PieceDealInfo) + t.DealInfo = new(piece.PieceDealInfo) if err := t.DealInfo.UnmarshalCBOR(cr); err != nil { return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err) } @@ -1158,160 +931,3 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { return nil } -func (t *DealSchedule) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write([]byte{162}); err != nil { - return err - } - - // t.EndEpoch (abi.ChainEpoch) (int64) - if len("EndEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"EndEpoch\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { - return err - } - if _, err := cw.WriteString(string("EndEpoch")); err != nil { - return err - } - - if t.EndEpoch >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { - return err - } - } - - // t.StartEpoch (abi.ChainEpoch) (int64) - if len("StartEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"StartEpoch\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { - return err - } - if _, err := cw.WriteString(string("StartEpoch")); err != nil { - return err - } - - if t.StartEpoch >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { - return err - } - } - return nil -} - -func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) { - *t = DealSchedule{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadString(cr) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.EndEpoch (abi.ChainEpoch) (int64) - case "EndEpoch": - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative overflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EndEpoch = abi.ChainEpoch(extraI) - } - // t.StartEpoch (abi.ChainEpoch) (int64) - case "StartEpoch": - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative overflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.StartEpoch = abi.ChainEpoch(extraI) - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 6028a40f250..29ac1d3e8d8 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -32,7 +32,6 @@ import ( "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v12/miner" "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" @@ -41,6 +40,7 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -404,6 +404,32 @@ func init() { percent := types.Percent(123) addExample(percent) addExample(&percent) + + addExample(&miner.PieceActivationManifest{ + CID: c, + Size: 2032, + VerifiedAllocationKey: nil, + Notify: nil, + }) + + addExample(&types.ActorEventBlock{ + Codec: 0x51, + Value: []byte("ddata"), + }) + + addExample(&types.ActorEventFilter{ + Addresses: []address.Address{addr}, + Fields: map[string][]types.ActorEventBlock{ + "abc": { + { + Codec: 0x51, + Value: []byte("ddata"), + }, + }, + }, + FromHeight: epochPtr(1010), + ToHeight: epochPtr(1020), + }) } func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { @@ -509,6 +535,11 @@ func exampleStruct(method string, t, parent reflect.Type) interface{} { return ns.Interface() } +func epochPtr(ei int64) *abi.ChainEpoch { + ep := abi.ChainEpoch(ei) + return &ep +} + type Visitor struct { Root string Methods map[string]ast.Node diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index f4a9832bb3f..215ccdfebdf 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -27,7 +27,7 @@ import ( auth "github.com/filecoin-project/go-jsonrpc/auth" abi "github.com/filecoin-project/go-state-types/abi" big "github.com/filecoin-project/go-state-types/big" - miner "github.com/filecoin-project/go-state-types/builtin/v12/miner" + miner "github.com/filecoin-project/go-state-types/builtin/v13/miner" paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" miner0 "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" @@ -1627,6 +1627,21 @@ func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) } +// GetActorEvents mocks base method. +func (m *MockFullNode) GetActorEvents(arg0 context.Context, arg1 *types.ActorEventFilter) ([]*types.ActorEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActorEvents", arg0, arg1) + ret0, _ := ret[0].([]*types.ActorEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActorEvents indicates an expected call of GetActorEvents. +func (mr *MockFullNodeMockRecorder) GetActorEvents(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActorEvents", reflect.TypeOf((*MockFullNode)(nil).GetActorEvents), arg0, arg1) +} + // ID mocks base method. func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { m.ctrl.T.Helper() @@ -3159,6 +3174,36 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) } +// StateGetAllAllocations mocks base method. +func (m *MockFullNode) StateGetAllAllocations(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllAllocations", arg0, arg1) + ret0, _ := ret[0].(map[verifreg.AllocationId]verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllAllocations indicates an expected call of StateGetAllAllocations. +func (mr *MockFullNodeMockRecorder) StateGetAllAllocations(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllAllocations", reflect.TypeOf((*MockFullNode)(nil).StateGetAllAllocations), arg0, arg1) +} + +// StateGetAllClaims mocks base method. +func (m *MockFullNode) StateGetAllClaims(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllClaims", arg0, arg1) + ret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllClaims indicates an expected call of StateGetAllClaims. +func (mr *MockFullNodeMockRecorder) StateGetAllClaims(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllClaims", reflect.TypeOf((*MockFullNode)(nil).StateGetAllClaims), arg0, arg1) +} + // StateGetAllocation mocks base method. func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { m.ctrl.T.Helper() @@ -3189,6 +3234,21 @@ func (mr *MockFullNodeMockRecorder) StateGetAllocationForPendingDeal(arg0, arg1, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2) } +// StateGetAllocationIdForPendingDeal mocks base method. +func (m *MockFullNode) StateGetAllocationIdForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (verifreg.AllocationId, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocationIdForPendingDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(verifreg.AllocationId) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocationIdForPendingDeal indicates an expected call of StateGetAllocationIdForPendingDeal. +func (mr *MockFullNodeMockRecorder) StateGetAllocationIdForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationIdForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationIdForPendingDeal), arg0, arg1, arg2) +} + // StateGetAllocations mocks base method. func (m *MockFullNode) StateGetAllocations(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { m.ctrl.T.Helper() @@ -3894,6 +3954,21 @@ func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2, arg3, arg4 in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2, arg3, arg4) } +// SubscribeActorEvents mocks base method. +func (m *MockFullNode) SubscribeActorEvents(arg0 context.Context, arg1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscribeActorEvents", arg0, arg1) + ret0, _ := ret[0].(<-chan *types.ActorEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubscribeActorEvents indicates an expected call of SubscribeActorEvents. +func (mr *MockFullNodeMockRecorder) SubscribeActorEvents(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeActorEvents", reflect.TypeOf((*MockFullNode)(nil).SubscribeActorEvents), arg0, arg1) +} + // SyncCheckBad mocks base method. func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) { m.ctrl.T.Helper() diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 97e441bbe6e..bd33ae445be 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -37,11 +37,13 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/journal/alerting" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/sealtasks" @@ -335,6 +337,8 @@ type FullNodeMethods struct { GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `perm:"read"` + GetActorEvents func(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) `perm:"read"` + MarketAddBalance func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"` MarketGetReserved func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"sign"` @@ -481,10 +485,16 @@ type FullNodeMethods struct { StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"` + StateGetAllAllocations func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"` + + StateGetAllClaims func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) `perm:"read"` + StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` + StateGetAllocationIdForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) `perm:"read"` + StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"` StateGetBeaconEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` @@ -579,6 +589,8 @@ type FullNodeMethods struct { StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"` + SubscribeActorEvents func(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) `perm:"read"` + SyncCheckBad func(p0 context.Context, p1 cid.Cid) (string, error) `perm:"read"` SyncCheckpoint func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"` @@ -745,6 +757,8 @@ type GatewayMethods struct { GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` + GetActorEvents func(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) `` + MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) `` MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `` @@ -819,6 +833,8 @@ type GatewayMethods struct { StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` + SubscribeActorEvents func(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) `` + Version func(p0 context.Context) (APIVersion, error) `` WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `` @@ -1099,7 +1115,7 @@ type StorageMinerMethods struct { SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` - SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"` + SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) `perm:"admin"` SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` @@ -2590,6 +2606,17 @@ func (s *FullNodeStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messa return nil, ErrNotSupported } +func (s *FullNodeStruct) GetActorEvents(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) { + if s.Internal.GetActorEvents == nil { + return *new([]*types.ActorEvent), ErrNotSupported + } + return s.Internal.GetActorEvents(p0, p1) +} + +func (s *FullNodeStub) GetActorEvents(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) { + return *new([]*types.ActorEvent), ErrNotSupported +} + func (s *FullNodeStruct) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { if s.Internal.MarketAddBalance == nil { return *new(cid.Cid), ErrNotSupported @@ -3393,6 +3420,28 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2 return nil, ErrNotSupported } +func (s *FullNodeStruct) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { + if s.Internal.StateGetAllAllocations == nil { + return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported + } + return s.Internal.StateGetAllAllocations(p0, p1) +} + +func (s *FullNodeStub) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { + return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported +} + +func (s *FullNodeStruct) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) { + if s.Internal.StateGetAllClaims == nil { + return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported + } + return s.Internal.StateGetAllClaims(p0, p1) +} + +func (s *FullNodeStub) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) { + return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported +} + func (s *FullNodeStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) { if s.Internal.StateGetAllocation == nil { return nil, ErrNotSupported @@ -3415,6 +3464,17 @@ func (s *FullNodeStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 a return nil, ErrNotSupported } +func (s *FullNodeStruct) StateGetAllocationIdForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) { + if s.Internal.StateGetAllocationIdForPendingDeal == nil { + return *new(verifreg.AllocationId), ErrNotSupported + } + return s.Internal.StateGetAllocationIdForPendingDeal(p0, p1, p2) +} + +func (s *FullNodeStub) StateGetAllocationIdForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) { + return *new(verifreg.AllocationId), ErrNotSupported +} + func (s *FullNodeStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { if s.Internal.StateGetAllocations == nil { return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported @@ -3932,6 +3992,17 @@ func (s *FullNodeStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p return nil, ErrNotSupported } +func (s *FullNodeStruct) SubscribeActorEvents(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + if s.Internal.SubscribeActorEvents == nil { + return nil, ErrNotSupported + } + return s.Internal.SubscribeActorEvents(p0, p1) +} + +func (s *FullNodeStub) SubscribeActorEvents(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + return nil, ErrNotSupported +} + func (s *FullNodeStruct) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { if s.Internal.SyncCheckBad == nil { return "", ErrNotSupported @@ -4779,6 +4850,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag return nil, ErrNotSupported } +func (s *GatewayStruct) GetActorEvents(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) { + if s.Internal.GetActorEvents == nil { + return *new([]*types.ActorEvent), ErrNotSupported + } + return s.Internal.GetActorEvents(p0, p1) +} + +func (s *GatewayStub) GetActorEvents(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) { + return *new([]*types.ActorEvent), ErrNotSupported +} + func (s *GatewayStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) { if s.Internal.MinerGetBaseInfo == nil { return nil, ErrNotSupported @@ -5186,6 +5268,17 @@ func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 return nil, ErrNotSupported } +func (s *GatewayStruct) SubscribeActorEvents(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + if s.Internal.SubscribeActorEvents == nil { + return nil, ErrNotSupported + } + return s.Internal.SubscribeActorEvents(p0, p1) +} + +func (s *GatewayStub) SubscribeActorEvents(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) Version(p0 context.Context) (APIVersion, error) { if s.Internal.Version == nil { return *new(APIVersion), ErrNotSupported @@ -6506,14 +6599,14 @@ func (s *StorageMinerStub) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorN return ErrNotSupported } -func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) { +func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) { if s.Internal.SectorAddPieceToAny == nil { return *new(SectorOffset), ErrNotSupported } return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3) } -func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) { +func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) { return *new(SectorOffset), ErrNotSupported } diff --git a/api/types.go b/api/types.go index 7fd60775019..b7dbe7b3625 100644 --- a/api/types.go +++ b/api/types.go @@ -344,6 +344,8 @@ type ForkUpgradeParams struct { UpgradeLightningHeight abi.ChainEpoch UpgradeThunderHeight abi.ChainEpoch UpgradeWatermelonHeight abi.ChainEpoch + UpgradeDragonHeight abi.ChainEpoch + UpgradePhoenixHeight abi.ChainEpoch } // ChainExportConfig holds configuration for chain ranged exports. diff --git a/api/v0api/full.go b/api/v0api/full.go index d92d5a95c8e..db84ddc8745 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -537,10 +537,14 @@ type FullNode interface { StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read // StateGetAllocations returns the all the allocations for a given client. StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read + // StateGetAllAllocations returns the all the allocations available in verified registry actor. + StateGetAllAllocations(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read // StateGetClaim returns the claim for a given address and claim ID. StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read // StateGetClaims returns the all the claims for a given provider. StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read + // StateGetAllClaims returns the all the claims available in verified registry actor. + StateGetAllClaims(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read // StateLookupID retrieves the ID address of the given address StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read // StateAccountKey returns the public key address of the given ID address diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index bd37f64298d..90c25d4a774 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -280,6 +280,10 @@ type FullNodeMethods struct { StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"` + StateGetAllAllocations func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"` + + StateGetAllClaims func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) `perm:"read"` + StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` @@ -1837,6 +1841,28 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2 return nil, ErrNotSupported } +func (s *FullNodeStruct) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { + if s.Internal.StateGetAllAllocations == nil { + return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported + } + return s.Internal.StateGetAllAllocations(p0, p1) +} + +func (s *FullNodeStub) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { + return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported +} + +func (s *FullNodeStruct) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) { + if s.Internal.StateGetAllClaims == nil { + return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported + } + return s.Internal.StateGetAllClaims(p0, p1) +} + +func (s *FullNodeStub) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) { + return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported +} + func (s *FullNodeStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) { if s.Internal.StateGetAllocation == nil { return nil, ErrNotSupported diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go index 151b1dbc124..df67d087656 100644 --- a/api/v0api/v0mocks/mock_full.go +++ b/api/v0api/v0mocks/mock_full.go @@ -26,7 +26,7 @@ import ( auth "github.com/filecoin-project/go-jsonrpc/auth" abi "github.com/filecoin-project/go-state-types/abi" big "github.com/filecoin-project/go-state-types/big" - miner "github.com/filecoin-project/go-state-types/builtin/v12/miner" + miner "github.com/filecoin-project/go-state-types/builtin/v13/miner" paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" miner0 "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" @@ -2339,6 +2339,36 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) } +// StateGetAllAllocations mocks base method. +func (m *MockFullNode) StateGetAllAllocations(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllAllocations", arg0, arg1) + ret0, _ := ret[0].(map[verifreg.AllocationId]verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllAllocations indicates an expected call of StateGetAllAllocations. +func (mr *MockFullNodeMockRecorder) StateGetAllAllocations(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllAllocations", reflect.TypeOf((*MockFullNode)(nil).StateGetAllAllocations), arg0, arg1) +} + +// StateGetAllClaims mocks base method. +func (m *MockFullNode) StateGetAllClaims(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllClaims", arg0, arg1) + ret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllClaims indicates an expected call of StateGetAllClaims. +func (mr *MockFullNodeMockRecorder) StateGetAllClaims(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllClaims", reflect.TypeOf((*MockFullNode)(nil).StateGetAllClaims), arg0, arg1) +} + // StateGetAllocation mocks base method. func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { m.ctrl.T.Helper() diff --git a/blockstore/cbor_gen.go b/blockstore/cbor_gen.go index 221f136762d..c53e9e85061 100644 --- a/blockstore/cbor_gen.go +++ b/blockstore/cbor_gen.go @@ -44,7 +44,7 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error { } // t.Cid ([]cid.Cid) (slice) - if len(t.Cid) > cbg.MaxLength { + if len(t.Cid) > 8192 { return xerrors.Errorf("Slice value in field t.Cid was too long") } @@ -60,7 +60,7 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error { } // t.Data ([][]uint8) (slice) - if len(t.Data) > cbg.MaxLength { + if len(t.Data) > 8192 { return xerrors.Errorf("Slice value in field t.Data was too long") } @@ -68,7 +68,7 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.Data { - if len(v) > cbg.ByteArrayMaxLen { + if len(v) > 2097152 { return xerrors.Errorf("Byte array in field v was too long") } @@ -76,9 +76,10 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(v[:]); err != nil { + if _, err := cw.Write(v); err != nil { return err } + } return nil } @@ -140,7 +141,7 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Cid: array too large (%d)", extra) } @@ -171,9 +172,9 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { t.Cid[i] = c } + } } - // t.Data ([][]uint8) (slice) maj, extra, err = cr.ReadHeader() @@ -181,7 +182,7 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Data: array too large (%d)", extra) } @@ -207,7 +208,7 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Data[i]: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -218,12 +219,12 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { t.Data[i] = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Data[i][:]); err != nil { + if _, err := io.ReadFull(cr, t.Data[i]); err != nil { return err } + } } - return nil } @@ -253,7 +254,7 @@ func (t *NetRpcResp) MarshalCBOR(w io.Writer) error { } // t.Data ([]uint8) (slice) - if len(t.Data) > cbg.ByteArrayMaxLen { + if len(t.Data) > 2097152 { return xerrors.Errorf("Byte array in field t.Data was too long") } @@ -261,9 +262,10 @@ func (t *NetRpcResp) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Data[:]); err != nil { + if _, err := cw.Write(t.Data); err != nil { return err } + return nil } @@ -324,7 +326,7 @@ func (t *NetRpcResp) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Data: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -335,9 +337,10 @@ func (t *NetRpcResp) UnmarshalCBOR(r io.Reader) (err error) { t.Data = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Data[:]); err != nil { + if _, err := io.ReadFull(cr, t.Data); err != nil { return err } + return nil } @@ -361,7 +364,7 @@ func (t *NetRpcErr) MarshalCBOR(w io.Writer) error { } // t.Msg (string) (string) - if len(t.Msg) > cbg.MaxLength { + if len(t.Msg) > 8192 { return xerrors.Errorf("Value in field t.Msg was too long") } @@ -426,7 +429,7 @@ func (t *NetRpcErr) UnmarshalCBOR(r io.Reader) (err error) { // t.Msg (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } diff --git a/build/actors/v13.tar.zst b/build/actors/v13.tar.zst new file mode 100644 index 00000000000..77565abc97b Binary files /dev/null and b/build/actors/v13.tar.zst differ diff --git a/build/builtin_actors_gen.go b/build/builtin_actors_gen.go index 11c70f9eca5..e8772ee4d4f 100644 --- a/build/builtin_actors_gen.go +++ b/build/builtin_actors_gen.go @@ -95,10 +95,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzaceb37hxeuoo5rgf6ansrdl2ykm5v5zp6kireubn4orcopr67jbxv6k"), }, }, { - Network: "butterflynet", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacectxvbk77ntedhztd6sszp2btrtvsmy7lp2ypnrk6yl74zb34t2cq"), + Network: "butterflynet", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzacectxvbk77ntedhztd6sszp2btrtvsmy7lp2ypnrk6yl74zb34t2cq"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacebp7anjdtg2sohyt6lromx4xs7nujtwdfcsffnptphaayabx7ysxs"), "cron": MustParseCid("bafk2bzacecu2y3awtemmglpkroiglulc2fj3gpdn6eazdqr6avcautiaighrg"), @@ -117,6 +117,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacec3vwj2chzaram3iqupkbfiein5h2l5qiltlrngbju2vg5umelclm"), "verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"), }, +}, { + Network: "butterflynet", + Version: 13, + BundleGitTag: "v13.0.0-rc.3", + ManifestCid: MustParseCid("bafy2bzaceaqx5xa4cwso24rjiu2ketjlztrqlac6dkyol7tlyuhzrle3zfbos"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacedl533kwbzouqxibejpwp6syfdekvmzy4vmmno6j4iaydbdmv4xek"), + "cron": MustParseCid("bafk2bzacecimv5xnuwyoqgxk26qt4xqpgntleret475pnh35s3vvhqtdct4ow"), + "datacap": MustParseCid("bafk2bzacebpdd4ctavhs7wkcykfahpifct3p4hbptgtf4jfrqcp2trtlygvow"), + "eam": MustParseCid("bafk2bzaceahw5rrgj7prgbnmn237di7ymjz2ssea32wr525jydpfrwpuhs67m"), + "ethaccount": MustParseCid("bafk2bzacebrslcbew5mq3le2zsn36xqxd4gt5hryeoslxnuqwgw3rhuwh6ygu"), + "evm": MustParseCid("bafk2bzaced5smz4lhpem4mbr7igcskv3e5qopbdp7dqshww2qs4ahacgzjzo4"), + "init": MustParseCid("bafk2bzacedgj6hawhdw2ot2ufisci374o2bq6bfkvlvdt6q7s3uoe5ffyv43k"), + "multisig": MustParseCid("bafk2bzacectnnnpwyqiccaymy3h6ghu74ghjrqyhtqv5odfd4opivzebjj6to"), + "paymentchannel": MustParseCid("bafk2bzaceckhx44jawhzhkz6k23gfnv2gcutgb4j4ekhonj2plwaent4b2tpk"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacebbs3rlg7y3wbvxrj4wgbsqmasw4ksbbr3lyqbkaxj2t25qz6zzuy"), + "storagemarket": MustParseCid("bafk2bzaced3zmxsmlhp2nsiwkxcp2ugonbsebcd53t7htzo2jcoidvu464xmm"), + "storageminer": MustParseCid("bafk2bzacebedx7iaa2ruspxvghkg46ez7un5b7oiijjtnvddq2aot5wk7p7ry"), + "storagepower": MustParseCid("bafk2bzacebvne7m2l3hxxw4xa6oujol75x35yqpnlqiwx74jilyrop4cs7cse"), + "system": MustParseCid("bafk2bzaceacjmlxrvydlud77ilpzbscez46yedx6zjsj6olxsdeuv6d4x4cwe"), + "verifiedregistry": MustParseCid("bafk2bzaceaf2po4fxf7gw7cdvulwxxtvnsvzfn4gff5w267qnz7r44ywk25c6"), + }, }, { Network: "calibrationnet", Version: 8, @@ -201,10 +224,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzaceceoo5jlom2zweh7kpye2vkj33wgqnkjshlsw2neemqkfg5g2rmvg"), }, }, { - Network: "calibrationnet", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacednzb3pkrfnbfhmoqtb3bc6dgvxszpqklf3qcc7qzcage4ewzxsca"), + Network: "calibrationnet", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzacednzb3pkrfnbfhmoqtb3bc6dgvxszpqklf3qcc7qzcage4ewzxsca"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacechwwxdqvggkdylm37zldjsra2ivkdzwp7fee56bzxbzs544wv6u6"), "cron": MustParseCid("bafk2bzacec4gdxxkqwxqqodsv6ug5dmdbqdfqwyqfek3yhxc2wweh5psxaeq6"), @@ -223,6 +246,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376"), "verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"), }, +}, { + Network: "calibrationnet", + Version: 13, + BundleGitTag: "v13.0.0-rc.3", + ManifestCid: MustParseCid("bafy2bzacea4firkyvt2zzdwqjrws5pyeluaesh6uaid246tommayr4337xpmi"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzaceb3j36ri5y5mfklgp5emlvrms6g4733ss2j3l7jismrxq6ng3tcc6"), + "cron": MustParseCid("bafk2bzaceaz6rocamdxehgpwcbku6wlapwpgzyyvkrploj66mlqptsulf52bs"), + "datacap": MustParseCid("bafk2bzacea22nv5g3yngpxvonqfj4r2nkfk64y6yw2malicm7odk77x7zuads"), + "eam": MustParseCid("bafk2bzaceatqtjzj7623i426noaslouvluhz6e3md3vvquqzku5qj3532uaxg"), + "ethaccount": MustParseCid("bafk2bzacean3hs7ga5csw6g3uu7watxfnqv5uvxviebn3ba6vg4sagwdur5pu"), + "evm": MustParseCid("bafk2bzacec5ibmbtzuzjgwjmksm2n6zfq3gkicxqywwu7tsscqgdzajpfctxk"), + "init": MustParseCid("bafk2bzaced5sq72oemz6qwi6yssxwlos2g54zfprslrx5qfhhx2vlgsbvdpcs"), + "multisig": MustParseCid("bafk2bzacedbgei6jkx36fwdgvoohce4aghvpohqdhoco7p4thszgssms7olv2"), + "paymentchannel": MustParseCid("bafk2bzaceasmgmfsi4mjanxlowsub65fmevhzky4toeqbtw4kp6tmu4kxjpgq"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacedjyp6ll5ez27dfgldjj4tntxfvyp4pa5zkk7s5uhipzqjyx2gmuc"), + "storagemarket": MustParseCid("bafk2bzaceabolct6qdnefwcrtati2us3sxtxfghyqk6aamfhl6byyefmtssqi"), + "storageminer": MustParseCid("bafk2bzaceckzw3v7wqliyggvjvihz4wywchnnsie4frfvkm3fm5znb64mofri"), + "storagepower": MustParseCid("bafk2bzacea7t4wynzjajl442mpdqbnh3wusjusqtnzgpvefvweh4n2tgzgqhu"), + "system": MustParseCid("bafk2bzacedjnrb5glewazsxpcx6rwiuhl4kwrfcqolyprn6rrjtlzmthlhdq6"), + "verifiedregistry": MustParseCid("bafk2bzacednskl3bykz5qpo54z2j2p4q44t5of4ktd6vs6ymmg2zebsbxazkm"), + }, }, { Network: "caterpillarnet", Version: 8, @@ -316,10 +362,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacedaws3or3twy45ltcxucgvqijsje4x675ph6vup2w35smlfneamno"), }, }, { - Network: "caterpillarnet", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacebxiub6qsy67asvl5cx33x5vjbuqinalmf3xtnbmokxmmklzdkvei"), + Network: "caterpillarnet", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzacebxiub6qsy67asvl5cx33x5vjbuqinalmf3xtnbmokxmmklzdkvei"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacecereuhejfvodut5357cai4lmhsyr7uenhcxvmw6jpmhe6auuly32"), "cron": MustParseCid("bafk2bzacebo2whgy6jla4jsf5j4ovlqm2e4eepedlpw5wadas33yxmunis4b4"), @@ -338,6 +384,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacedye5j5uxox7knb6zlnhseaadztyav76mjbyk5qslhhbpiy5cdtt2"), "verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"), }, +}, { + Network: "caterpillarnet", + Version: 13, + BundleGitTag: "v13.0.0-rc.3", + ManifestCid: MustParseCid("bafy2bzacecozgyaqlzq4qebq52uogmrk6ahk7z2i4qfkh5iv235bpqqv7w24m"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacecro3uo6ypqhfzwdhnamzcole5qmhrbkx7qny6t2qsrcpqxelt6s2"), + "cron": MustParseCid("bafk2bzaceam3kci46y4siltbw7f4itoap34kp7b7pvn2fco5s2bvnotomwdbe"), + "datacap": MustParseCid("bafk2bzacecmtdspcbqmmjtsaz4vucuqoqjqfsgxjonns7tom7eblkngbcm7bw"), + "eam": MustParseCid("bafk2bzaceaudqhrt7djewopqdnryvwxagfufyt7ja4gdvovrxbh6edh6evgrw"), + "ethaccount": MustParseCid("bafk2bzaced676ds3z6xe333wr7frwq3f2iq5kjwp4okl3te6rne3xf7kuqrwm"), + "evm": MustParseCid("bafk2bzacebeih4jt2s6mel6x4hje7xmnugh6twul2a5axx4iczu7fu4wcdi6k"), + "init": MustParseCid("bafk2bzaceba7vvuzzwj5wqnq2bvpbgtxup53mhr3qybezbllftnxvpqbfymxo"), + "multisig": MustParseCid("bafk2bzaceapkajhnqoczrgry5javqbl7uebgmsbpqqfemzc4yb5q2dqia2qog"), + "paymentchannel": MustParseCid("bafk2bzacebg7xq4ca22gafmdbkcq357x7v6slflib4h3fnj4amsovg6ulqg3o"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzaceajt4idf26ffnyipybcib55fykjxnek7oszkqzi7lu7mbgijmkgos"), + "storagemarket": MustParseCid("bafk2bzaceadfmay7pyl7osjsdmrireafasnjnoziacljy5ewrcsxpp56kzqbw"), + "storageminer": MustParseCid("bafk2bzaceardbn5a7aq5jxl7efr4btmsbl7txnxm4hrrd3llyhujuc2cr5vcs"), + "storagepower": MustParseCid("bafk2bzacear4563jznjqyseoy42xl6kenyqk6umv6xl3bp5bsjb3hbs6sp6bm"), + "system": MustParseCid("bafk2bzacecc5oavxivfnvirx2g7megpdf6lugooyoc2wijloju247xzjcdezy"), + "verifiedregistry": MustParseCid("bafk2bzacecpqldvrs6i7xzbyizkpdvrick3cahrbdptmimdsrpnxu6k4xs4pm"), + }, }, { Network: "devnet", Version: 8, @@ -422,10 +491,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacebdqi5tr5pjnem5nylg2zbqcugvi7oxi35bhnrfudx4y4ufhlit2k"), }, }, { - Network: "devnet", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzaceasjdukhhyjbegpli247vbf5h64f7uvxhhebdihuqsj2mwisdwa6o"), + Network: "devnet", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzaceasjdukhhyjbegpli247vbf5h64f7uvxhhebdihuqsj2mwisdwa6o"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacedki4apynvdxxuoigmqkgaktgy2erjftoxqxqaklnelgveyaqknfu"), "cron": MustParseCid("bafk2bzacebjpczf7qtcisy3zdp3sqoohxe75tgupmdo5dr26vh7orzrsjn3b2"), @@ -444,6 +513,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacecnau5wddulbsvwn75tc3w75jrlvkybgrlxs4ngonqab6xq3eowvg"), "verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"), }, +}, { + Network: "devnet", + Version: 13, + BundleGitTag: "v13.0.0-rc.3", + ManifestCid: MustParseCid("bafy2bzaceap34qfq4emg4fp3xd7bxtzt7pvkaj37kunqm2ccvttchtlljw7d4"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacebev3fu5geeehpx577b3kvza4xsmmggmepjj7rlsnr27hpoq27q2i"), + "cron": MustParseCid("bafk2bzacedalzqahtuz2bmnf7uawbcujfhhe5xzv5ys5ufadu6ggs3tcu6lsy"), + "datacap": MustParseCid("bafk2bzaceb7ou2vn7ac4xidespoowq2q5w7ognr7s4ujy3xzzgiishajpe7le"), + "eam": MustParseCid("bafk2bzacedqic2qskattorj4svf6mbto2k76ej3ll3ugsyorqramrg7rpq3by"), + "ethaccount": MustParseCid("bafk2bzaceaoad7iknpywijigv2h3jyvkijff2oxvohzue533v5hby3iix5vdu"), + "evm": MustParseCid("bafk2bzacecjgiw26gagsn6a7tffkrgoor4zfgzfokp76u6cwervtmvjbopmwg"), + "init": MustParseCid("bafk2bzaced2obubqojxggeddr246cpwtyzi6knnq52jsvsc2fs3tuk2kh6dtg"), + "multisig": MustParseCid("bafk2bzacebquruzb6zho45orbdkku624t6w6jt4tudaqzraz4yh3li3jfstpg"), + "paymentchannel": MustParseCid("bafk2bzaceaydrilyxvflsuzr24hmw32qwz6sy4hgls73bhpveydcsqskdgpca"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzaceb74owpuzdddqoj2tson6ymbyuguqrnqefyiaxqvwm4ygitpabjrq"), + "storagemarket": MustParseCid("bafk2bzaceaw6dslv6pfqha4ynghq2imij5khnnjrie22kmfgtpie3bvxho6jq"), + "storageminer": MustParseCid("bafk2bzacecsputz6xygjfyrvx2d7bxkpp7b5v4icrmpckec7gnbabx2w377qs"), + "storagepower": MustParseCid("bafk2bzaceceyaa5yjwhxvvcqouob4l746zp5nesivr6enhtpimakdtby6kafi"), + "system": MustParseCid("bafk2bzaceaxg6k5vuozxlemfi5hv663m6jcawzu5puboo4znj73i36e3tsovs"), + "verifiedregistry": MustParseCid("bafk2bzacebjwc4fp4n556agi5i4pccuzn4bhn2tl24l4cskgvmwgadycff3oo"), + }, }, { Network: "hyperspace", Version: 8, @@ -551,10 +643,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a"), }, }, { - Network: "mainnet", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzaceapkgfggvxyllnmuogtwasmsv5qi2qzhc2aybockd6kag2g5lzaio"), + Network: "mainnet", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzaceapkgfggvxyllnmuogtwasmsv5qi2qzhc2aybockd6kag2g5lzaio"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzaceboftg75mdiba7xbo2i3uvgtca4brhnr3u5ptihonixgpnrvhpxoa"), "cron": MustParseCid("bafk2bzacechxjkfe2cehx4s7skj3wzfpzf7zolds64khrrrs66bhazsemktls"), @@ -573,6 +665,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu"), "verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"), }, +}, { + Network: "mainnet", + Version: 13, + BundleGitTag: "v13.0.0-rc.3", + ManifestCid: MustParseCid("bafy2bzacecoplaet2m4kzueqgutjxpl76bhmuiq5hmo3ueighbnxas3rj4dvy"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacedxnbtlsqdk76fsfmnhyvsblwyfducerwwtp3mqtx2wbrvs5idl52"), + "cron": MustParseCid("bafk2bzacebbopddyn5csb3fsuhh2an4ttd23x6qnwixgohlirj5ahtcudphyc"), + "datacap": MustParseCid("bafk2bzaceah42tfnhd7xnztawgf46gbvc3m2gudoxshlba2ucmmo2vy67t7ci"), + "eam": MustParseCid("bafk2bzaceb23bhvvcjsth7cn7vp3gbaphrutsaz7v6hkls3ogotzs4bnhm4mk"), + "ethaccount": MustParseCid("bafk2bzaceautge6zhuy6jbj3uldwoxwhpywuon6z3xfvmdbzpbdribc6zzmei"), + "evm": MustParseCid("bafk2bzacedq6v2lyuhgywhlllwmudfj2zufzcauxcsvvd34m2ek5xr55mvh2q"), + "init": MustParseCid("bafk2bzacedr4xacm3fts4vilyeiacjr2hpmwzclyzulbdo24lrfxbtau2wbai"), + "multisig": MustParseCid("bafk2bzacecr5zqarfqak42xqcfeulsxlavcltawsx2fvc7zsjtby6ti4b3wqc"), + "paymentchannel": MustParseCid("bafk2bzacebntdhfmyc24e7tm52ggx5tnw4i3hrr3jmllsepv3mibez4hywsa2"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacedq4q2kwkruu4xm7rkyygumlbw2yt4nimna2ivea4qarvtkohnuwu"), + "storagemarket": MustParseCid("bafk2bzacebjtoltdviyznpj34hh5qp6u257jnnbjole5rhqfixm7ug3epvrfu"), + "storageminer": MustParseCid("bafk2bzacebf4rrqyk7gcfggggul6nfpzay7f2ordnkwm7z2wcf4mq6r7i77t2"), + "storagepower": MustParseCid("bafk2bzacecjy4dkulvxppg3ocbmeixe2wgg6yxoyjxrm4ko2fm3uhpvfvam6e"), + "system": MustParseCid("bafk2bzacecyf523quuq2kdjfdvyty446z2ounmamtgtgeqnr3ynlu5cqrlt6e"), + "verifiedregistry": MustParseCid("bafk2bzaceblqlrece7lezbp42lfba5ojlyxuv3vcbkldw45wpdadqwqslev3g"), + }, }, { Network: "testing", Version: 8, @@ -657,10 +772,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"), }, }, { - Network: "testing", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzaceaaxd6ytavsek5bi5soqo7qamezuqfyfjy42es2clpbzu3pwzcmye"), + Network: "testing", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzaceaaxd6ytavsek5bi5soqo7qamezuqfyfjy42es2clpbzu3pwzcmye"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"), "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"), @@ -679,6 +794,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), }, +}, { + Network: "testing", + Version: 13, + BundleGitTag: "v13.0.0-rc.3", + ManifestCid: MustParseCid("bafy2bzacedcrzpgb4jac75auzcjkh55bxipdiospgvjsivumnqlvg2rp2ahmg"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"), + "cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"), + "datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"), + "eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"), + "ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"), + "evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"), + "init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"), + "multisig": MustParseCid("bafk2bzacebmftoql6dcyqf54xznwjg2bfgdsi67spqquwslpvvtvcx6qenhz2"), + "paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"), + "storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"), + "storageminer": MustParseCid("bafk2bzaceailclue4dba2edjethfjw6ycufcwsx4qjjmgsh77xcyprmogdjvu"), + "storagepower": MustParseCid("bafk2bzaceaqw6dhdjlqovhk3p4lb4sb25i5d6mhln2ir5m7tj6m4fegkgkinw"), + "system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"), + "verifiedregistry": MustParseCid("bafk2bzacebqwmxch4np2nwzi2yt6vkciy2mp75otwoipulkmfxly3ifhj5g6i"), + }, }, { Network: "testing-fake-proofs", Version: 8, @@ -763,10 +901,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"), }, }, { - Network: "testing-fake-proofs", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacecver4l5d6jiuzubhrtcxjjfdx6jnxbmyp4bselol2atgkhz3e3um"), + Network: "testing-fake-proofs", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzacecver4l5d6jiuzubhrtcxjjfdx6jnxbmyp4bselol2atgkhz3e3um"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"), "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"), @@ -785,4 +923,27 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), }, +}, { + Network: "testing-fake-proofs", + Version: 13, + BundleGitTag: "v13.0.0-rc.3", + ManifestCid: MustParseCid("bafy2bzaceaeepylii2u3lvuvrbdureocn6cuizhaq6o6ivmtzldweqf675w5s"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"), + "cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"), + "datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"), + "eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"), + "ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"), + "evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"), + "init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"), + "multisig": MustParseCid("bafk2bzacedy4vldq4viv6bzzh4fueip3by3axsbgbh655lashddgumknc6pvs"), + "paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"), + "storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"), + "storageminer": MustParseCid("bafk2bzaceb6atn3k6yhmskgmc3lgfiwpzpfmaxzacohtnb2hivme2oroycqr6"), + "storagepower": MustParseCid("bafk2bzacedameh56mp2g4y7nprhax5sddbzcmpk5p7l523l45rtn2wjc6ah4e"), + "system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"), + "verifiedregistry": MustParseCid("bafk2bzacebqwmxch4np2nwzi2yt6vkciy2mp75otwoipulkmfxly3ifhj5g6i"), + }, }} diff --git a/build/drand.go b/build/drand.go index abf1a2846c1..c4ba4b3b7af 100644 --- a/build/drand.go +++ b/build/drand.go @@ -10,8 +10,8 @@ type DrandEnum int func DrandConfigSchedule() dtypes.DrandSchedule { out := dtypes.DrandSchedule{} - for start, config := range DrandSchedule { - out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[config]}) + for start, network := range DrandSchedule { + out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[network]}) } sort.Slice(out, func(i, j int) bool { @@ -27,6 +27,7 @@ const ( DrandDevnet DrandLocalnet DrandIncentinet + DrandQuicknet ) var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ @@ -36,14 +37,32 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ "https://api2.drand.sh", "https://api3.drand.sh", "https://drand.cloudflare.com", + "https://api.drand.secureweb3.com:6875", // Storswift }, Relays: []string{ "/dnsaddr/api.drand.sh/", "/dnsaddr/api2.drand.sh/", "/dnsaddr/api3.drand.sh/", }, + IsChained: true, ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`, }, + DrandQuicknet: { + Servers: []string{ + "https://api.drand.sh", + "https://api2.drand.sh", + "https://api3.drand.sh", + "https://drand.cloudflare.com", + "https://api.drand.secureweb3.com:6875", // Storswift + }, + Relays: []string{ + "/dnsaddr/api.drand.sh/", + "/dnsaddr/api2.drand.sh/", + "/dnsaddr/api3.drand.sh/", + }, + IsChained: false, + ChainInfoJSON: `{"public_key":"83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a","period":3,"genesis_time":1692803367,"hash":"52db9ba70e0cc0f6eaf7803dd07447a1f5477735fd3f661792ba94600c84e971","groupHash":"f477d5c89f21a17c863a7f937c6a6d15859414d2be09cd448d4279af331c5d3e","schemeID":"bls-unchained-g1-rfc9380","metadata":{"beaconID":"quicknet"}}`, + }, DrandTestnet: { Servers: []string{ "https://pl-eu.testnet.drand.sh", @@ -53,6 +72,7 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ "/dnsaddr/pl-eu.testnet.drand.sh/", "/dnsaddr/pl-us.testnet.drand.sh/", }, + IsChained: true, ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, }, DrandDevnet: { @@ -64,9 +84,11 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ "/dnsaddr/dev1.drand.sh/", "/dnsaddr/dev2.drand.sh/", }, + IsChained: true, ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, }, DrandIncentinet: { + IsChained: true, ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, }, } diff --git a/build/openrpc/full.json b/build/openrpc/full.json index 8b4eb15b948..fec670a4179 100644 --- a/build/openrpc/full.json +++ b/build/openrpc/full.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.25.3-dev" + "version": "1.27.0-dev" }, "methods": [ { @@ -37,7 +37,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1482" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1498" } }, { @@ -60,7 +60,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1493" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1509" } }, { @@ -103,7 +103,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1504" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1520" } }, { @@ -214,7 +214,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1526" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1542" } }, { @@ -454,7 +454,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1537" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1553" } }, { @@ -685,7 +685,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1548" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1564" } }, { @@ -784,7 +784,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1559" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1575" } }, { @@ -816,7 +816,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1570" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1586" } }, { @@ -922,7 +922,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1581" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1597" } }, { @@ -1019,7 +1019,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1592" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1608" } }, { @@ -1078,7 +1078,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1603" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1619" } }, { @@ -1171,7 +1171,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1614" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1630" } }, { @@ -1255,7 +1255,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1625" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1641" } }, { @@ -1355,7 +1355,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1636" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1652" } }, { @@ -1411,7 +1411,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1647" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1663" } }, { @@ -1484,7 +1484,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1658" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1674" } }, { @@ -1557,7 +1557,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1669" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1685" } }, { @@ -1604,7 +1604,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1680" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1696" } }, { @@ -1636,7 +1636,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1691" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1707" } }, { @@ -1691,7 +1691,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1702" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1718" } }, { @@ -1743,7 +1743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1724" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1740" } }, { @@ -1780,7 +1780,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1735" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1751" } }, { @@ -1827,7 +1827,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1746" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1762" } }, { @@ -1874,7 +1874,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1757" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1773" } }, { @@ -1954,7 +1954,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1768" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1784" } }, { @@ -2006,7 +2006,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1779" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1795" } }, { @@ -2065,7 +2065,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1790" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1806" } }, { @@ -2136,7 +2136,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1801" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1817" } }, { @@ -2177,7 +2177,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1812" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1828" } }, { @@ -2245,7 +2245,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1834" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1850" } }, { @@ -2306,7 +2306,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1845" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1861" } }, { @@ -2413,7 +2413,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1856" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1872" } }, { @@ -2569,7 +2569,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1867" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1883" } }, { @@ -2635,7 +2635,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1878" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1894" } }, { @@ -2976,7 +2976,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1889" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1905" } }, { @@ -3021,7 +3021,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1900" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1916" } }, { @@ -3068,7 +3068,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1933" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1949" } }, { @@ -3139,7 +3139,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1944" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1960" } }, { @@ -3282,7 +3282,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1955" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1971" } }, { @@ -3612,7 +3612,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1966" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1982" } }, { @@ -3680,7 +3680,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1977" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1993" } }, { @@ -3914,7 +3914,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1988" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2004" } }, { @@ -4077,7 +4077,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1999" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2015" } }, { @@ -4160,7 +4160,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2010" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2026" } }, { @@ -4201,7 +4201,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2021" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2037" } }, { @@ -4272,7 +4272,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2032" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2048" } }, { @@ -4416,7 +4416,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2043" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2059" } }, { @@ -4456,7 +4456,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2054" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2070" } }, { @@ -4497,7 +4497,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2065" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2081" } }, { @@ -4622,7 +4622,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2076" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2092" } }, { @@ -4747,7 +4747,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2087" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2103" } }, { @@ -4786,7 +4786,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2098" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2114" } }, { @@ -4833,7 +4833,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2109" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2125" } }, { @@ -4888,7 +4888,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2120" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2136" } }, { @@ -4917,7 +4917,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2131" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2147" } }, { @@ -5054,7 +5054,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2142" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2158" } }, { @@ -5083,7 +5083,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2153" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2169" } }, { @@ -5137,7 +5137,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2164" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2180" } }, { @@ -5228,7 +5228,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2175" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2191" } }, { @@ -5256,7 +5256,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2186" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2202" } }, { @@ -5346,7 +5346,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2197" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2213" } }, { @@ -5602,7 +5602,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2208" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2224" } }, { @@ -5847,7 +5847,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2219" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2235" } }, { @@ -5903,7 +5903,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2230" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2246" } }, { @@ -5950,7 +5950,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2241" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2257" } }, { @@ -6048,7 +6048,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2252" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2268" } }, { @@ -6114,7 +6114,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2263" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2279" } }, { @@ -6180,7 +6180,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2274" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2290" } }, { @@ -6289,7 +6289,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2285" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2301" } }, { @@ -6347,7 +6347,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2296" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2312" } }, { @@ -6469,7 +6469,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2307" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2323" } }, { @@ -6673,7 +6673,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2318" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2334" } }, { @@ -6868,7 +6868,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2329" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2345" } }, { @@ -7055,7 +7055,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2340" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2356" } }, { @@ -7259,7 +7259,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2351" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2367" } }, { @@ -7350,7 +7350,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2362" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2378" } }, { @@ -7408,7 +7408,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2373" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2389" } }, { @@ -7666,7 +7666,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2384" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2400" } }, { @@ -7941,7 +7941,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2395" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2411" } }, { @@ -7969,7 +7969,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2406" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2422" } }, { @@ -8007,7 +8007,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2417" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2433" } }, { @@ -8115,7 +8115,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2428" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2444" } }, { @@ -8153,7 +8153,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2439" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2455" } }, { @@ -8182,7 +8182,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2450" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2466" } }, { @@ -8245,7 +8245,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2461" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2477" } }, { @@ -8308,7 +8308,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2472" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2488" } }, { @@ -8353,7 +8353,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2483" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2499" } }, { @@ -8475,7 +8475,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2494" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2510" } }, { @@ -8630,7 +8630,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2505" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2521" } }, { @@ -8684,7 +8684,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2516" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2532" } }, { @@ -8738,7 +8738,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2527" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2543" } }, { @@ -8793,7 +8793,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2538" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2554" } }, { @@ -8936,7 +8936,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2549" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2565" } }, { @@ -9063,7 +9063,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2560" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2576" } }, { @@ -9165,7 +9165,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2571" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2587" } }, { @@ -9388,7 +9388,190 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2582" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2598" + } + }, + { + "name": "Filecoin.GetActorEvents", + "description": "```go\nfunc (s *FullNodeStruct) GetActorEvents(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) {\n\tif s.Internal.GetActorEvents == nil {\n\t\treturn *new([]*types.ActorEvent), ErrNotSupported\n\t}\n\treturn s.Internal.GetActorEvents(p0, p1)\n}\n```", + "summary": "GetActorEvents returns all user-programmed and built-in actor events that match the given\nfilter.\nThis is a request/response API.\nResults available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange\nconfiguration options and also the amount of historical data available in the node.\n\nThis is an EXPERIMENTAL API and may be subject to change.\n", + "paramStructure": "by-position", + "params": [ + { + "name": "p1", + "description": "*types.ActorEventFilter", + "summary": "", + "schema": { + "examples": [ + { + "addresses": [ + "f01234" + ], + "fields": { + "abc": [ + { + "codec": 81, + "value": "ZGRhdGE=" + } + ] + }, + "fromHeight": 1010, + "toHeight": 1020 + } + ], + "additionalProperties": false, + "properties": { + "addresses": { + "items": { + "additionalProperties": false, + "type": "object" + }, + "type": "array" + }, + "fields": { + "patternProperties": { + ".*": { + "items": { + "additionalProperties": false, + "properties": { + "codec": { + "title": "number", + "type": "number" + }, + "value": { + "media": { + "binaryEncoding": "base64" + }, + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "fromHeight": { + "title": "number", + "type": "number" + }, + "tipsetKey": { + "additionalProperties": false, + "type": "object" + }, + "toHeight": { + "title": "number", + "type": "number" + } + }, + "type": [ + "object" + ] + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "[]*types.ActorEvent", + "description": "[]*types.ActorEvent", + "summary": "", + "schema": { + "examples": [ + [ + { + "entries": [ + { + "Flags": 7, + "Key": "string value", + "Codec": 42, + "Value": "Ynl0ZSBhcnJheQ==" + } + ], + "emitter": "f01234", + "reverted": true, + "height": 10101, + "tipsetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "msgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ] + ], + "items": [ + { + "additionalProperties": false, + "properties": { + "emitter": { + "additionalProperties": false, + "type": "object" + }, + "entries": { + "items": { + "additionalProperties": false, + "properties": { + "Codec": { + "title": "number", + "type": "number" + }, + "Flags": { + "title": "number", + "type": "number" + }, + "Key": { + "type": "string" + }, + "Value": { + "media": { + "binaryEncoding": "base64" + }, + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "height": { + "title": "number", + "type": "number" + }, + "msgCid": { + "title": "Content Identifier", + "type": "string" + }, + "reverted": { + "type": "boolean" + }, + "tipsetKey": { + "additionalProperties": false, + "type": "object" + } + }, + "type": [ + "object" + ] + } + ], + "type": [ + "array" + ] + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "externalDocs": { + "description": "Github remote link", + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2609" } }, { @@ -9468,7 +9651,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2593" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2620" } }, { @@ -9513,7 +9696,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2604" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2631" } }, { @@ -9569,7 +9752,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2615" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2642" } }, { @@ -9649,7 +9832,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2626" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2653" } }, { @@ -9729,7 +9912,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2637" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2664" } }, { @@ -10214,7 +10397,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2648" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2675" } }, { @@ -10408,7 +10591,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2659" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2686" } }, { @@ -10563,7 +10746,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2670" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2697" } }, { @@ -10812,7 +10995,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2681" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2708" } }, { @@ -10967,7 +11150,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2692" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2719" } }, { @@ -11144,7 +11327,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2703" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2730" } }, { @@ -11242,7 +11425,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2714" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2741" } }, { @@ -11407,7 +11590,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2725" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2752" } }, { @@ -11446,7 +11629,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2736" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2763" } }, { @@ -11511,7 +11694,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2747" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2774" } }, { @@ -11557,7 +11740,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2758" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2785" } }, { @@ -11707,7 +11890,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2769" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2796" } }, { @@ -11844,7 +12027,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2780" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2807" } }, { @@ -12075,7 +12258,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2791" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2818" } }, { @@ -12212,7 +12395,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2802" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2829" } }, { @@ -12377,7 +12560,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2813" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2840" } }, { @@ -12454,7 +12637,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2824" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2851" } }, { @@ -12649,7 +12832,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2846" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2873" } }, { @@ -12828,7 +13011,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2857" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2884" } }, { @@ -12990,7 +13173,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2868" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2895" } }, { @@ -13138,7 +13321,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2879" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2906" } }, { @@ -13366,7 +13549,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2890" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2917" } }, { @@ -13514,7 +13697,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2901" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2928" } }, { @@ -13726,7 +13909,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2912" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2939" } }, { @@ -13932,7 +14115,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2923" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2950" } }, { @@ -14000,7 +14183,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2934" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2961" } }, { @@ -14117,7 +14300,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2945" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2972" } }, { @@ -14208,7 +14391,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2956" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2983" } }, { @@ -14294,7 +14477,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2967" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2994" } }, { @@ -14489,7 +14672,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2978" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3005" } }, { @@ -14651,7 +14834,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2989" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3016" } }, { @@ -14847,7 +15030,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3000" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3027" } }, { @@ -15027,7 +15210,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3011" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3038" } }, { @@ -15190,7 +15373,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3022" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3049" } }, { @@ -15217,7 +15400,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3033" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3060" } }, { @@ -15244,7 +15427,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3044" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3071" } }, { @@ -15343,7 +15526,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3055" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3082" } }, { @@ -15389,7 +15572,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3066" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3093" } }, { @@ -15489,7 +15672,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3077" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3104" } }, { @@ -15605,7 +15788,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3088" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3115" } }, { @@ -15653,7 +15836,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3099" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3126" } }, { @@ -15745,7 +15928,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3110" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3137" } }, { @@ -15860,7 +16043,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3121" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3148" } }, { @@ -15908,7 +16091,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3132" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3159" } }, { @@ -15945,7 +16128,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3143" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3170" } }, { @@ -16217,7 +16400,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3154" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3181" } }, { @@ -16265,7 +16448,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3165" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3192" } }, { @@ -16323,7 +16506,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3176" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3203" } }, { @@ -16528,7 +16711,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3187" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3214" } }, { @@ -16731,7 +16914,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3198" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3225" } }, { @@ -16900,7 +17083,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3209" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3236" } }, { @@ -17104,7 +17287,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3220" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3247" } }, { @@ -17271,7 +17454,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3231" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3258" } }, { @@ -17478,7 +17661,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3242" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3269" } }, { @@ -17546,7 +17729,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3253" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3280" } }, { @@ -17563,7 +17746,7 @@ "title": "number", "description": "Number is a number", "examples": [ - 21 + 22 ], "type": [ "number" @@ -17598,7 +17781,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3264" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3291" } }, { @@ -17615,7 +17798,7 @@ "title": "number", "description": "Number is a number", "examples": [ - 21 + 22 ], "type": [ "number" @@ -17647,7 +17830,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3275" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3302" } }, { @@ -17738,7 +17921,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3286" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3313" } }, { @@ -18244,7 +18427,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3297" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3324" } }, { @@ -18350,7 +18533,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3308" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3335" } }, { @@ -18402,7 +18585,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3319" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3346" } }, { @@ -18954,7 +19137,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3330" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3357" } }, { @@ -19068,7 +19251,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3341" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3368" } }, { @@ -19165,7 +19348,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3352" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3379" } }, { @@ -19265,7 +19448,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3363" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3390" } }, { @@ -19353,7 +19536,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3374" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3401" } }, { @@ -19453,7 +19636,185 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3385" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3412" + } + }, + { + "name": "Filecoin.StateGetAllAllocations", + "description": "```go\nfunc (s *FullNodeStruct) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {\n\tif s.Internal.StateGetAllAllocations == nil {\n\t\treturn *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported\n\t}\n\treturn s.Internal.StateGetAllAllocations(p0, p1)\n}\n```", + "summary": "StateGetAllAllocations returns the all the allocations available in verified registry actor.\n", + "paramStructure": "by-position", + "params": [ + { + "name": "p1", + "description": "types.TipSetKey", + "summary": "", + "schema": { + "examples": [ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] + ], + "additionalProperties": false, + "type": [ + "object" + ] + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "map[verifregtypes.AllocationId]verifregtypes.Allocation", + "description": "map[verifregtypes.AllocationId]verifregtypes.Allocation", + "summary": "", + "schema": { + "examples": [ + {} + ], + "patternProperties": { + ".*": { + "additionalProperties": false, + "properties": { + "Client": { + "title": "number", + "type": "number" + }, + "Data": { + "title": "Content Identifier", + "type": "string" + }, + "Expiration": { + "title": "number", + "type": "number" + }, + "Provider": { + "title": "number", + "type": "number" + }, + "Size": { + "title": "number", + "type": "number" + }, + "TermMax": { + "title": "number", + "type": "number" + }, + "TermMin": { + "title": "number", + "type": "number" + } + }, + "type": "object" + } + }, + "type": [ + "object" + ] + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "externalDocs": { + "description": "Github remote link", + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3423" + } + }, + { + "name": "Filecoin.StateGetAllClaims", + "description": "```go\nfunc (s *FullNodeStruct) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {\n\tif s.Internal.StateGetAllClaims == nil {\n\t\treturn *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported\n\t}\n\treturn s.Internal.StateGetAllClaims(p0, p1)\n}\n```", + "summary": "StateGetAllClaims returns the all the claims available in verified registry actor.\n", + "paramStructure": "by-position", + "params": [ + { + "name": "p1", + "description": "types.TipSetKey", + "summary": "", + "schema": { + "examples": [ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] + ], + "additionalProperties": false, + "type": [ + "object" + ] + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "map[verifregtypes.ClaimId]verifregtypes.Claim", + "description": "map[verifregtypes.ClaimId]verifregtypes.Claim", + "summary": "", + "schema": { + "examples": [ + {} + ], + "patternProperties": { + ".*": { + "additionalProperties": false, + "properties": { + "Client": { + "title": "number", + "type": "number" + }, + "Data": { + "title": "Content Identifier", + "type": "string" + }, + "Provider": { + "title": "number", + "type": "number" + }, + "Sector": { + "title": "number", + "type": "number" + }, + "Size": { + "title": "number", + "type": "number" + }, + "TermMax": { + "title": "number", + "type": "number" + }, + "TermMin": { + "title": "number", + "type": "number" + }, + "TermStart": { + "title": "number", + "type": "number" + } + }, + "type": "object" + } + }, + "type": [ + "object" + ] + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "externalDocs": { + "description": "Github remote link", + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3434" } }, { @@ -19578,7 +19939,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3396" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3445" } }, { @@ -19687,7 +20048,77 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3407" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3456" + } + }, + { + "name": "Filecoin.StateGetAllocationIdForPendingDeal", + "description": "```go\nfunc (s *FullNodeStruct) StateGetAllocationIdForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) {\n\tif s.Internal.StateGetAllocationIdForPendingDeal == nil {\n\t\treturn *new(verifreg.AllocationId), ErrNotSupported\n\t}\n\treturn s.Internal.StateGetAllocationIdForPendingDeal(p0, p1, p2)\n}\n```", + "summary": "StateGetAllocationIdForPendingDeal is like StateGetAllocationForPendingDeal except it returns the allocation ID\n", + "paramStructure": "by-position", + "params": [ + { + "name": "p1", + "description": "abi.DealID", + "summary": "", + "schema": { + "title": "number", + "description": "Number is a number", + "examples": [ + 5432 + ], + "type": [ + "number" + ] + }, + "required": true, + "deprecated": false + }, + { + "name": "p2", + "description": "types.TipSetKey", + "summary": "", + "schema": { + "examples": [ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] + ], + "additionalProperties": false, + "type": [ + "object" + ] + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "verifreg.AllocationId", + "description": "verifreg.AllocationId", + "summary": "", + "schema": { + "title": "number", + "description": "Number is a number", + "examples": [ + 0 + ], + "type": [ + "number" + ] + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "externalDocs": { + "description": "Github remote link", + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3467" } }, { @@ -19790,7 +20221,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3418" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3478" } }, { @@ -19851,7 +20282,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3429" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3489" } }, { @@ -19981,7 +20412,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3440" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3500" } }, { @@ -20088,7 +20519,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3451" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3511" } }, { @@ -20136,7 +20567,9 @@ "UpgradeHyggeHeight": 10101, "UpgradeLightningHeight": 10101, "UpgradeThunderHeight": 10101, - "UpgradeWatermelonHeight": 10101 + "UpgradeWatermelonHeight": 10101, + "UpgradeDragonHeight": 10101, + "UpgradePhoenixHeight": 10101 }, "Eip155ChainID": 123 } @@ -20182,6 +20615,10 @@ "title": "number", "type": "number" }, + "UpgradeDragonHeight": { + "title": "number", + "type": "number" + }, "UpgradeHyggeHeight": { "title": "number", "type": "number" @@ -20222,6 +20659,10 @@ "title": "number", "type": "number" }, + "UpgradePhoenixHeight": { + "title": "number", + "type": "number" + }, "UpgradeRefuelHeight": { "title": "number", "type": "number" @@ -20287,7 +20728,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3462" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3522" } }, { @@ -20364,7 +20805,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3473" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3533" } }, { @@ -20441,7 +20882,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3484" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3544" } }, { @@ -20550,7 +20991,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3495" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3555" } }, { @@ -20659,7 +21100,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3506" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3566" } }, { @@ -20720,7 +21161,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3517" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3577" } }, { @@ -20830,7 +21271,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3528" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3588" } }, { @@ -20891,7 +21332,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3539" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3599" } }, { @@ -20959,7 +21400,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3550" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3610" } }, { @@ -21027,7 +21468,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3561" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3621" } }, { @@ -21108,7 +21549,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3572" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3632" } }, { @@ -21167,8 +21608,7 @@ "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } } @@ -21240,10 +21680,6 @@ "SlashEpoch": { "title": "number", "type": "number" - }, - "VerifiedClaim": { - "title": "number", - "type": "number" } }, "type": "object" @@ -21262,7 +21698,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3583" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3643" } }, { @@ -21334,7 +21770,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3594" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3654" } }, { @@ -21409,8 +21845,7 @@ "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ], @@ -21479,10 +21914,6 @@ "SlashEpoch": { "title": "number", "type": "number" - }, - "VerifiedClaim": { - "title": "number", - "type": "number" } }, "type": "object" @@ -21498,7 +21929,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3605" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3665" } }, { @@ -21663,7 +22094,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3616" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3676" } }, { @@ -21733,7 +22164,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3627" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3687" } }, { @@ -21801,7 +22232,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3638" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3698" } }, { @@ -21894,7 +22325,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3649" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3709" } }, { @@ -21965,7 +22396,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3660" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3720" } }, { @@ -22166,7 +22597,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3671" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3731" } }, { @@ -22298,7 +22729,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3682" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3742" } }, { @@ -22435,7 +22866,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3693" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3753" } }, { @@ -22546,7 +22977,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3704" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3764" } }, { @@ -22678,7 +23109,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3715" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3775" } }, { @@ -22809,7 +23240,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3726" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3786" } }, { @@ -22880,7 +23311,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3737" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3797" } }, { @@ -22964,7 +23395,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3748" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3808" } }, { @@ -23050,7 +23481,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3759" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3819" } }, { @@ -23233,7 +23664,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3770" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3830" } }, { @@ -23260,7 +23691,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3781" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3841" } }, { @@ -23301,7 +23732,7 @@ "title": "number", "description": "Number is a number", "examples": [ - 21 + 22 ], "type": [ "number" @@ -23313,7 +23744,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3792" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3852" } }, { @@ -23401,7 +23832,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3803" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3863" } }, { @@ -23852,7 +24283,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3814" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3874" } }, { @@ -24019,7 +24450,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3825" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3885" } }, { @@ -24117,7 +24548,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3836" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3896" } }, { @@ -24290,7 +24721,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3847" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3907" } }, { @@ -24388,7 +24819,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3858" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3918" } }, { @@ -24539,7 +24970,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3869" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3929" } }, { @@ -24624,7 +25055,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3880" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3940" } }, { @@ -24692,7 +25123,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3891" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3951" } }, { @@ -24744,7 +25175,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3902" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3962" } }, { @@ -24812,7 +25243,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3913" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3973" } }, { @@ -24973,7 +25404,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3924" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3984" } }, { @@ -25020,7 +25451,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3935" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4006" } }, { @@ -25067,7 +25498,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3946" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4017" } }, { @@ -25110,7 +25541,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3968" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4039" } }, { @@ -25206,7 +25637,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3979" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4050" } }, { @@ -25472,7 +25903,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3990" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4061" } }, { @@ -25495,7 +25926,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4001" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4072" } }, { @@ -25538,7 +25969,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4012" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4083" } }, { @@ -25589,7 +26020,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4023" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4094" } }, { @@ -25634,7 +26065,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4034" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4105" } }, { @@ -25662,7 +26093,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4045" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4116" } }, { @@ -25702,7 +26133,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4056" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4127" } }, { @@ -25761,7 +26192,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4067" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4138" } }, { @@ -25805,7 +26236,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4078" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4149" } }, { @@ -25864,7 +26295,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4089" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4160" } }, { @@ -25901,7 +26332,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4100" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4171" } }, { @@ -25945,7 +26376,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4111" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4182" } }, { @@ -25985,7 +26416,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4122" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4193" } }, { @@ -26060,7 +26491,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4133" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4204" } }, { @@ -26268,7 +26699,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4144" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4215" } }, { @@ -26312,7 +26743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4155" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4226" } }, { @@ -26402,7 +26833,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4166" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4237" } }, { @@ -26429,7 +26860,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4177" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4248" } } ] diff --git a/build/openrpc/gateway.json b/build/openrpc/gateway.json index 3bfa391dcda..8c0f3e68b5a 100644 --- a/build/openrpc/gateway.json +++ b/build/openrpc/gateway.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.25.3-dev" + "version": "1.27.0-dev" }, "methods": [ { @@ -242,7 +242,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4188" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4259" } }, { @@ -473,7 +473,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4199" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4270" } }, { @@ -505,7 +505,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4210" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4281" } }, { @@ -611,7 +611,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4221" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4292" } }, { @@ -704,7 +704,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4232" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4303" } }, { @@ -788,7 +788,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4243" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4314" } }, { @@ -888,7 +888,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4254" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4325" } }, { @@ -944,7 +944,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4265" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4336" } }, { @@ -1017,7 +1017,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4276" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4347" } }, { @@ -1090,7 +1090,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4287" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4358" } }, { @@ -1137,7 +1137,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4298" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4369" } }, { @@ -1169,7 +1169,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4309" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4380" } }, { @@ -1206,7 +1206,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4331" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4402" } }, { @@ -1253,7 +1253,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4342" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4413" } }, { @@ -1293,7 +1293,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4353" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4424" } }, { @@ -1340,7 +1340,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4364" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4435" } }, { @@ -1369,7 +1369,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4375" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4446" } }, { @@ -1506,7 +1506,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4386" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4457" } }, { @@ -1535,7 +1535,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4397" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4468" } }, { @@ -1589,7 +1589,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4408" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4479" } }, { @@ -1680,7 +1680,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4419" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4490" } }, { @@ -1708,7 +1708,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4430" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4501" } }, { @@ -1798,7 +1798,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4441" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4512" } }, { @@ -2054,7 +2054,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4452" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4523" } }, { @@ -2299,7 +2299,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4463" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4534" } }, { @@ -2355,7 +2355,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4474" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4545" } }, { @@ -2402,7 +2402,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4485" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4556" } }, { @@ -2500,7 +2500,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4496" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4567" } }, { @@ -2566,7 +2566,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4507" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4578" } }, { @@ -2632,7 +2632,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4518" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4589" } }, { @@ -2741,7 +2741,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4529" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4600" } }, { @@ -2799,7 +2799,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4540" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4611" } }, { @@ -2921,7 +2921,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4551" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4622" } }, { @@ -3108,7 +3108,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4562" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4633" } }, { @@ -3312,7 +3312,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4573" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4644" } }, { @@ -3403,7 +3403,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4584" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4655" } }, { @@ -3461,7 +3461,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4595" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4666" } }, { @@ -3719,7 +3719,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4606" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4677" } }, { @@ -3994,7 +3994,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4617" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4688" } }, { @@ -4022,7 +4022,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4628" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4699" } }, { @@ -4060,7 +4060,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4639" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4710" } }, { @@ -4168,7 +4168,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4650" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4721" } }, { @@ -4206,7 +4206,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4661" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4732" } }, { @@ -4235,7 +4235,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4672" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4743" } }, { @@ -4298,7 +4298,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4683" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4754" } }, { @@ -4361,7 +4361,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4694" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4765" } }, { @@ -4406,7 +4406,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4705" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4776" } }, { @@ -4528,7 +4528,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4716" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4787" } }, { @@ -4683,7 +4683,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4727" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4798" } }, { @@ -4737,7 +4737,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4738" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4809" } }, { @@ -4791,7 +4791,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4749" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4820" } }, { @@ -4893,7 +4893,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4760" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4831" } }, { @@ -5116,7 +5116,190 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4771" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4842" + } + }, + { + "name": "Filecoin.GetActorEvents", + "description": "```go\nfunc (s *GatewayStruct) GetActorEvents(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) {\n\tif s.Internal.GetActorEvents == nil {\n\t\treturn *new([]*types.ActorEvent), ErrNotSupported\n\t}\n\treturn s.Internal.GetActorEvents(p0, p1)\n}\n```", + "summary": "There are not yet any comments for this method.", + "paramStructure": "by-position", + "params": [ + { + "name": "p1", + "description": "*types.ActorEventFilter", + "summary": "", + "schema": { + "examples": [ + { + "addresses": [ + "f01234" + ], + "fields": { + "abc": [ + { + "codec": 81, + "value": "ZGRhdGE=" + } + ] + }, + "fromHeight": 1010, + "toHeight": 1020 + } + ], + "additionalProperties": false, + "properties": { + "addresses": { + "items": { + "additionalProperties": false, + "type": "object" + }, + "type": "array" + }, + "fields": { + "patternProperties": { + ".*": { + "items": { + "additionalProperties": false, + "properties": { + "codec": { + "title": "number", + "type": "number" + }, + "value": { + "media": { + "binaryEncoding": "base64" + }, + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "fromHeight": { + "title": "number", + "type": "number" + }, + "tipsetKey": { + "additionalProperties": false, + "type": "object" + }, + "toHeight": { + "title": "number", + "type": "number" + } + }, + "type": [ + "object" + ] + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "[]*types.ActorEvent", + "description": "[]*types.ActorEvent", + "summary": "", + "schema": { + "examples": [ + [ + { + "entries": [ + { + "Flags": 7, + "Key": "string value", + "Codec": 42, + "Value": "Ynl0ZSBhcnJheQ==" + } + ], + "emitter": "f01234", + "reverted": true, + "height": 10101, + "tipsetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "msgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ] + ], + "items": [ + { + "additionalProperties": false, + "properties": { + "emitter": { + "additionalProperties": false, + "type": "object" + }, + "entries": { + "items": { + "additionalProperties": false, + "properties": { + "Codec": { + "title": "number", + "type": "number" + }, + "Flags": { + "title": "number", + "type": "number" + }, + "Key": { + "type": "string" + }, + "Value": { + "media": { + "binaryEncoding": "base64" + }, + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "height": { + "title": "number", + "type": "number" + }, + "msgCid": { + "title": "Content Identifier", + "type": "string" + }, + "reverted": { + "type": "boolean" + }, + "tipsetKey": { + "additionalProperties": false, + "type": "object" + } + }, + "type": [ + "object" + ] + } + ], + "type": [ + "array" + ] + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "externalDocs": { + "description": "Github remote link", + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4853" } }, { @@ -5310,7 +5493,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4782" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4864" } }, { @@ -5356,7 +5539,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4793" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4875" } }, { @@ -5506,7 +5689,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4804" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4886" } }, { @@ -5643,7 +5826,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4815" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4897" } }, { @@ -5711,7 +5894,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4826" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4908" } }, { @@ -5828,7 +6011,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4837" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4919" } }, { @@ -5919,7 +6102,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4848" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4930" } }, { @@ -6005,7 +6188,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4859" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4941" } }, { @@ -6032,7 +6215,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4870" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4952" } }, { @@ -6059,7 +6242,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4881" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4963" } }, { @@ -6127,7 +6310,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4892" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4974" } }, { @@ -6633,7 +6816,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4903" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4985" } }, { @@ -6730,7 +6913,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4914" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4996" } }, { @@ -6830,7 +7013,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4925" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5007" } }, { @@ -6930,7 +7113,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4936" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5018" } }, { @@ -7055,7 +7238,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4947" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5029" } }, { @@ -7164,7 +7347,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4958" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5040" } }, { @@ -7267,7 +7450,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4969" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5051" } }, { @@ -7397,7 +7580,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4980" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5062" } }, { @@ -7504,7 +7687,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4991" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5073" } }, { @@ -7565,7 +7748,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5002" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5084" } }, { @@ -7633,7 +7816,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5013" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5095" } }, { @@ -7714,7 +7897,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5024" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5106" } }, { @@ -7789,8 +7972,7 @@ "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ], @@ -7859,10 +8041,6 @@ "SlashEpoch": { "title": "number", "type": "number" - }, - "VerifiedClaim": { - "title": "number", - "type": "number" } }, "type": "object" @@ -7878,7 +8056,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5035" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5117" } }, { @@ -8079,7 +8257,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5046" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5128" } }, { @@ -8190,7 +8368,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5057" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5139" } }, { @@ -8321,7 +8499,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5068" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5150" } }, { @@ -8407,7 +8585,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5079" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5161" } }, { @@ -8434,7 +8612,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5090" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5172" } }, { @@ -8475,7 +8653,7 @@ "title": "number", "description": "Number is a number", "examples": [ - 21 + 22 ], "type": [ "number" @@ -8487,7 +8665,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5101" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5183" } }, { @@ -8575,7 +8753,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5112" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5194" } }, { @@ -9026,7 +9204,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5123" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5205" } }, { @@ -9193,7 +9371,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5134" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5216" } }, { @@ -9366,7 +9544,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5145" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5227" } }, { @@ -9434,7 +9612,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5156" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5238" } }, { @@ -9502,7 +9680,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5167" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5249" } }, { @@ -9663,7 +9841,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5178" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5260" } }, { @@ -9708,7 +9886,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5189" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5282" } }, { @@ -9753,7 +9931,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5200" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5293" } }, { @@ -9780,7 +9958,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5211" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5304" } } ] diff --git a/build/openrpc/miner.json b/build/openrpc/miner.json index 58807933b83..0686987c87e 100644 --- a/build/openrpc/miner.json +++ b/build/openrpc/miner.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.25.3-dev" + "version": "1.27.0-dev" }, "methods": [ { @@ -30,7 +30,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5607" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5700" } }, { @@ -109,7 +109,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5618" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5711" } }, { @@ -155,7 +155,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5629" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5722" } }, { @@ -203,7 +203,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5640" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5733" } }, { @@ -251,7 +251,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5651" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5744" } }, { @@ -354,7 +354,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5662" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5755" } }, { @@ -428,7 +428,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5673" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5766" } }, { @@ -538,7 +538,7 @@ "title": "number", "description": "Number is a number", "examples": [ - 21 + 22 ], "type": [ "number" @@ -591,7 +591,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5684" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5777" } }, { @@ -742,7 +742,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5695" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5788" } }, { @@ -781,7 +781,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5706" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5799" } }, { @@ -833,7 +833,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5717" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5810" } }, { @@ -872,7 +872,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5739" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5832" } }, { @@ -924,7 +924,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5750" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5843" } }, { @@ -996,7 +996,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5761" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5854" } }, { @@ -1035,7 +1035,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5772" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5865" } }, { @@ -1074,7 +1074,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5783" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5876" } }, { @@ -1101,7 +1101,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5794" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5887" } }, { @@ -1128,7 +1128,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5805" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5898" } }, { @@ -1155,7 +1155,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5816" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5909" } }, { @@ -1182,7 +1182,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5827" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5920" } }, { @@ -1209,7 +1209,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5838" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5931" } }, { @@ -1236,7 +1236,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5849" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5942" } }, { @@ -1294,7 +1294,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5860" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5953" } }, { @@ -1329,8 +1329,7 @@ "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ] @@ -1402,10 +1401,6 @@ "SlashEpoch": { "title": "number", "type": "number" - }, - "VerifiedClaim": { - "title": "number", - "type": "number" } }, "type": "object" @@ -1426,7 +1421,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5871" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5964" } }, { @@ -1466,7 +1461,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5882" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5975" } }, { @@ -1505,7 +1500,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5893" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5986" } }, { @@ -1544,7 +1539,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5904" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5997" } }, { @@ -1583,7 +1578,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5915" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6008" } }, { @@ -1622,7 +1617,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5926" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6019" } }, { @@ -1661,7 +1656,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5937" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6030" } }, { @@ -1700,7 +1695,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5948" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6041" } }, { @@ -1752,7 +1747,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5959" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6052" } }, { @@ -1775,7 +1770,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5970" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6063" } }, { @@ -1818,7 +1813,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5981" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6074" } }, { @@ -1889,7 +1884,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5992" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6085" } }, { @@ -2270,7 +2265,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6003" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6096" } }, { @@ -2369,7 +2364,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6025" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6118" } }, { @@ -2420,7 +2415,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6047" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6140" } }, { @@ -2478,7 +2473,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6058" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6151" } }, { @@ -2621,7 +2616,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6069" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6162" } }, { @@ -2656,8 +2651,7 @@ "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ] @@ -2729,10 +2723,6 @@ "SlashEpoch": { "title": "number", "type": "number" - }, - "VerifiedClaim": { - "title": "number", - "type": "number" } }, "type": "object" @@ -2753,7 +2743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6080" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6173" } }, { @@ -3017,7 +3007,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6091" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6184" } }, { @@ -3054,7 +3044,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6102" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6195" } }, { @@ -3192,7 +3182,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6113" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6206" } }, { @@ -3215,7 +3205,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6124" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6217" } }, { @@ -3286,7 +3276,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6135" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6228" } }, { @@ -3329,7 +3319,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6146" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6239" } }, { @@ -3436,7 +3426,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6157" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6250" } }, { @@ -3499,7 +3489,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6168" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6261" } }, { @@ -3531,7 +3521,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6179" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6272" } }, { @@ -3619,7 +3609,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6190" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6283" } }, { @@ -3710,7 +3700,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6201" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6294" } }, { @@ -3750,7 +3740,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6212" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6305" } }, { @@ -3790,7 +3780,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6223" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6316" } }, { @@ -3831,7 +3821,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6234" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6327" } }, { @@ -3899,7 +3889,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6245" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6338" } }, { @@ -4030,7 +4020,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6256" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6349" } }, { @@ -4161,7 +4151,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6267" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6360" } }, { @@ -4261,7 +4251,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6278" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6371" } }, { @@ -4361,7 +4351,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6289" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6382" } }, { @@ -4461,7 +4451,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6300" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6393" } }, { @@ -4561,7 +4551,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6311" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6404" } }, { @@ -4661,7 +4651,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6322" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6415" } }, { @@ -4761,7 +4751,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6333" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6426" } }, { @@ -4885,7 +4875,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6344" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6437" } }, { @@ -5009,7 +4999,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6355" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6448" } }, { @@ -5124,7 +5114,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6366" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6459" } }, { @@ -5224,7 +5214,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6377" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6470" } }, { @@ -5357,7 +5347,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6388" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6481" } }, { @@ -5481,7 +5471,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6399" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6492" } }, { @@ -5605,7 +5595,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6410" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6503" } }, { @@ -5729,7 +5719,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6421" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6514" } }, { @@ -5862,7 +5852,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6432" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6525" } }, { @@ -5962,7 +5952,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6443" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6536" } }, { @@ -6003,7 +5993,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6454" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6547" } }, { @@ -6075,7 +6065,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6465" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6558" } }, { @@ -6125,7 +6115,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6476" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6569" } }, { @@ -6169,7 +6159,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6487" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6580" } }, { @@ -6210,12 +6200,12 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6498" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6591" } }, { "name": "Filecoin.SectorAddPieceToAny", - "description": "```go\nfunc (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) {\n\tif s.Internal.SectorAddPieceToAny == nil {\n\t\treturn *new(SectorOffset), ErrNotSupported\n\t}\n\treturn s.Internal.SectorAddPieceToAny(p0, p1, p2, p3)\n}\n```", + "description": "```go\nfunc (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) {\n\tif s.Internal.SectorAddPieceToAny == nil {\n\t\treturn *new(SectorOffset), ErrNotSupported\n\t}\n\treturn s.Internal.SectorAddPieceToAny(p0, p1, p2, p3)\n}\n```", "summary": "Add piece to an open sector. If no sectors with enough space are open,\neither a new sector will be created, or this call will block until more\nsectors can be created.\n", "paramStructure": "by-position", "params": [ @@ -6251,7 +6241,7 @@ }, { "name": "p3", - "description": "PieceDealInfo", + "description": "piece.PieceDealInfo", "summary": "", "schema": { "examples": [ @@ -6279,6 +6269,14 @@ "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } ], @@ -6354,6 +6352,53 @@ "KeepUnsealed": { "type": "boolean" }, + "PieceActivationManifest": { + "additionalProperties": false, + "properties": { + "CID": { + "title": "Content Identifier", + "type": "string" + }, + "Notify": { + "items": { + "additionalProperties": false, + "properties": { + "Address": { + "additionalProperties": false, + "type": "object" + }, + "Payload": { + "media": { + "binaryEncoding": "base64" + }, + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "Size": { + "title": "number", + "type": "number" + }, + "VerifiedAllocationKey": { + "additionalProperties": false, + "properties": { + "Client": { + "title": "number", + "type": "number" + }, + "ID": { + "title": "number", + "type": "number" + } + }, + "type": "object" + } + }, + "type": "object" + }, "PublishCid": { "title": "Content Identifier", "type": "string" @@ -6399,7 +6444,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6509" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6602" } }, { @@ -6473,7 +6518,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6520" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6613" } }, { @@ -6523,7 +6568,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6531" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6624" } }, { @@ -6552,7 +6597,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6542" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6635" } }, { @@ -6581,7 +6626,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6553" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6646" } }, { @@ -6637,7 +6682,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6564" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6657" } }, { @@ -6660,7 +6705,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6575" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6668" } }, { @@ -6720,7 +6765,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6586" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6679" } }, { @@ -6759,7 +6804,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6597" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6690" } }, { @@ -6799,7 +6844,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6608" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6701" } }, { @@ -6872,7 +6917,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6619" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6712" } }, { @@ -6936,7 +6981,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6630" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6723" } }, { @@ -6999,7 +7044,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6641" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6734" } }, { @@ -7049,7 +7094,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6652" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6745" } }, { @@ -7103,6 +7148,14 @@ "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } } @@ -7392,6 +7445,53 @@ "KeepUnsealed": { "type": "boolean" }, + "PieceActivationManifest": { + "additionalProperties": false, + "properties": { + "CID": { + "title": "Content Identifier", + "type": "string" + }, + "Notify": { + "items": { + "additionalProperties": false, + "properties": { + "Address": { + "additionalProperties": false, + "type": "object" + }, + "Payload": { + "media": { + "binaryEncoding": "base64" + }, + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "Size": { + "title": "number", + "type": "number" + }, + "VerifiedAllocationKey": { + "additionalProperties": false, + "properties": { + "Client": { + "title": "number", + "type": "number" + }, + "ID": { + "title": "number", + "type": "number" + } + }, + "type": "object" + } + }, + "type": "object" + }, "PublishCid": { "title": "Content Identifier", "type": "string" @@ -7553,7 +7653,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6663" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6756" } }, { @@ -7594,7 +7694,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6674" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6767" } }, { @@ -7635,7 +7735,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6685" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6778" } }, { @@ -7676,7 +7776,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6696" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6789" } }, { @@ -7717,7 +7817,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6707" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6800" } }, { @@ -7758,7 +7858,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6718" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6811" } }, { @@ -7789,7 +7889,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6729" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6822" } }, { @@ -7839,7 +7939,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6740" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6833" } }, { @@ -7880,7 +7980,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6751" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6844" } }, { @@ -7919,7 +8019,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6762" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6855" } }, { @@ -7983,7 +8083,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6773" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6866" } }, { @@ -8041,7 +8141,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6784" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6877" } }, { @@ -8134,6 +8234,14 @@ "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } } @@ -8324,6 +8432,53 @@ "KeepUnsealed": { "type": "boolean" }, + "PieceActivationManifest": { + "additionalProperties": false, + "properties": { + "CID": { + "title": "Content Identifier", + "type": "string" + }, + "Notify": { + "items": { + "additionalProperties": false, + "properties": { + "Address": { + "additionalProperties": false, + "type": "object" + }, + "Payload": { + "media": { + "binaryEncoding": "base64" + }, + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "Size": { + "title": "number", + "type": "number" + }, + "VerifiedAllocationKey": { + "additionalProperties": false, + "properties": { + "Client": { + "title": "number", + "type": "number" + }, + "ID": { + "title": "number", + "type": "number" + } + }, + "type": "object" + } + }, + "type": "object" + }, "PublishCid": { "title": "Content Identifier", "type": "string" @@ -8433,7 +8588,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6795" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6888" } }, { @@ -8469,7 +8624,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6806" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6899" } }, { @@ -8612,7 +8767,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6817" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6910" } }, { @@ -8668,7 +8823,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6828" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6921" } }, { @@ -8707,7 +8862,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6839" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6932" } }, { @@ -8866,7 +9021,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6850" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6943" } }, { @@ -8918,7 +9073,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6861" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6954" } }, { @@ -9075,7 +9230,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6872" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6965" } }, { @@ -9175,7 +9330,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6883" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6976" } }, { @@ -9229,7 +9384,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6894" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6987" } }, { @@ -9268,7 +9423,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6905" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6998" } }, { @@ -9353,7 +9508,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6916" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7009" } }, { @@ -9529,7 +9684,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6927" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7020" } }, { @@ -9625,7 +9780,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6938" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7031" } }, { @@ -9739,7 +9894,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6949" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7042" } }, { @@ -9793,7 +9948,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6960" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7053" } }, { @@ -9827,7 +9982,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6971" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7064" } }, { @@ -9914,7 +10069,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6982" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7075" } }, { @@ -9968,7 +10123,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6993" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7086" } }, { @@ -10068,7 +10223,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7004" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7097" } }, { @@ -10145,7 +10300,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7015" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7108" } }, { @@ -10236,7 +10391,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7026" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7119" } }, { @@ -10275,7 +10430,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7037" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7130" } }, { @@ -10391,7 +10546,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7048" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7141" } }, { @@ -12491,7 +12646,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7059" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7152" } } ] diff --git a/build/openrpc/worker.json b/build/openrpc/worker.json index fb0eb99467f..2cea4e87775 100644 --- a/build/openrpc/worker.json +++ b/build/openrpc/worker.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.25.3-dev" + "version": "1.27.0-dev" }, "methods": [ { @@ -161,7 +161,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7147" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7240" } }, { @@ -252,7 +252,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7158" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7251" } }, { @@ -420,7 +420,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7169" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7262" } }, { @@ -447,7 +447,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7180" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7273" } }, { @@ -597,7 +597,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7191" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7284" } }, { @@ -700,7 +700,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7202" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7295" } }, { @@ -803,7 +803,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7213" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7306" } }, { @@ -925,7 +925,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7224" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7317" } }, { @@ -1135,7 +1135,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7235" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7328" } }, { @@ -1306,7 +1306,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7246" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7339" } }, { @@ -3350,7 +3350,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7257" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7350" } }, { @@ -3470,7 +3470,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7268" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7361" } }, { @@ -3531,7 +3531,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7279" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7372" } }, { @@ -3569,7 +3569,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7290" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7383" } }, { @@ -3729,7 +3729,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7301" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7394" } }, { @@ -3913,7 +3913,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7312" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7405" } }, { @@ -4054,7 +4054,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7323" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7416" } }, { @@ -4107,7 +4107,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7334" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7427" } }, { @@ -4250,7 +4250,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7345" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7438" } }, { @@ -4474,7 +4474,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7356" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7449" } }, { @@ -4601,7 +4601,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7367" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7460" } }, { @@ -4768,7 +4768,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7378" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7471" } }, { @@ -4895,7 +4895,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7389" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7482" } }, { @@ -4933,7 +4933,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7400" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7493" } }, { @@ -4972,7 +4972,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7411" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7504" } }, { @@ -4995,7 +4995,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7422" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7515" } }, { @@ -5034,7 +5034,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7433" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7526" } }, { @@ -5057,7 +5057,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7444" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7537" } }, { @@ -5096,7 +5096,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7455" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7548" } }, { @@ -5130,7 +5130,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7466" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7559" } }, { @@ -5184,7 +5184,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7477" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7570" } }, { @@ -5223,7 +5223,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7488" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7581" } }, { @@ -5262,7 +5262,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7499" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7592" } }, { @@ -5297,7 +5297,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7510" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7603" } }, { @@ -5477,7 +5477,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7521" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7614" } }, { @@ -5506,7 +5506,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7532" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7625" } }, { @@ -5529,7 +5529,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7543" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7636" } } ] diff --git a/build/params_2k.go b/build/params_2k.go index 4826d421d20..cfbfc83b6d0 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -23,7 +23,7 @@ var NetworkBundle = "devnet" var BundleOverrides map[actorstypes.Version]string var ActorDebugging = true -const GenesisNetworkVersion = network.Version20 +var GenesisNetworkVersion = network.Version21 var UpgradeBreezeHeight = abi.ChainEpoch(-1) @@ -65,7 +65,11 @@ var UpgradeLightningHeight = abi.ChainEpoch(-22) var UpgradeThunderHeight = abi.ChainEpoch(-23) -var UpgradeWatermelonHeight = abi.ChainEpoch(200) +var UpgradeWatermelonHeight = abi.ChainEpoch(-24) + +var UpgradeDragonHeight = abi.ChainEpoch(20) + +var UpgradePhoenixHeight = UpgradeDragonHeight + 120 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -100 @@ -74,7 +78,8 @@ const UpgradeWatermelonFixHeight = -100 const UpgradeWatermelonFix2Height = -101 var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandMainnet, + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } var SupportedProofTypes = []abi.RegisteredSealProof{ @@ -91,6 +96,22 @@ func init() { policy.SetMinVerifiedDealSize(MinVerifiedDealSize) policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay) + getGenesisNetworkVersion := func(ev string, def network.Version) network.Version { + hs, found := os.LookupEnv(ev) + if found { + h, err := strconv.Atoi(hs) + if err != nil { + log.Panicf("failed to parse %s env var", ev) + } + + return network.Version(h) + } + + return def + } + + GenesisNetworkVersion = getGenesisNetworkVersion("LOTUS_GENESIS_NETWORK_VERSION", GenesisNetworkVersion) + getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch { hs, found := os.LookupEnv(ev) if found { @@ -129,6 +150,13 @@ func init() { UpgradeLightningHeight = getUpgradeHeight("LOTUS_LIGHTNING_HEIGHT", UpgradeLightningHeight) UpgradeThunderHeight = getUpgradeHeight("LOTUS_THUNDER_HEIGHT", UpgradeThunderHeight) UpgradeWatermelonHeight = getUpgradeHeight("LOTUS_WATERMELON_HEIGHT", UpgradeWatermelonHeight) + UpgradeDragonHeight = getUpgradeHeight("LOTUS_DRAGON_HEIGHT", UpgradeDragonHeight) + + UpgradePhoenixHeight = getUpgradeHeight("LOTUS_PHOENIX_HEIGHT", UpgradePhoenixHeight) + DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, + } BuildType |= Build2k diff --git a/build/params_butterfly.go b/build/params_butterfly.go index 864518df5d1..361693c2f06 100644 --- a/build/params_butterfly.go +++ b/build/params_butterfly.go @@ -16,10 +16,11 @@ import ( ) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandMainnet, + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } -const GenesisNetworkVersion = network.Version20 +const GenesisNetworkVersion = network.Version21 var NetworkBundle = "butterflynet" var BundleOverrides map[actorstypes.Version]string @@ -54,8 +55,11 @@ const UpgradeSharkHeight = -20 const UpgradeHyggeHeight = -21 const UpgradeLightningHeight = -22 const UpgradeThunderHeight = -23 +const UpgradeWatermelonHeight = -24 -const UpgradeWatermelonHeight = 400 +const UpgradeDragonHeight = 5760 + +const UpgradePhoenixHeight = UpgradeDragonHeight + 120 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -100 diff --git a/build/params_calibnet.go b/build/params_calibnet.go index c22eef2fed6..6975adc9f92 100644 --- a/build/params_calibnet.go +++ b/build/params_calibnet.go @@ -19,7 +19,8 @@ import ( ) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandMainnet, + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } const GenesisNetworkVersion = network.Version0 @@ -88,6 +89,12 @@ const UpgradeWatermelonFixHeight = 1070494 // 2023-11-21T13:00:00Z const UpgradeWatermelonFix2Height = 1108174 +// 2024-03-11T14:00:00Z +const UpgradeDragonHeight = 1427974 + +// This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet +const UpgradePhoenixHeight = UpgradeDragonHeight + 120 + var SupportedProofTypes = []abi.RegisteredSealProof{ abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1, diff --git a/build/params_interop.go b/build/params_interop.go index 9fd0d0ff833..c30a8adcbed 100644 --- a/build/params_interop.go +++ b/build/params_interop.go @@ -53,8 +53,11 @@ var UpgradeSharkHeight = abi.ChainEpoch(-20) var UpgradeHyggeHeight = abi.ChainEpoch(-21) var UpgradeLightningHeight = abi.ChainEpoch(-22) var UpgradeThunderHeight = abi.ChainEpoch(-23) +var UpgradeWatermelonHeight = abi.ChainEpoch(-24) -const UpgradeWatermelonHeight = 50 +const UpgradeDragonHeight = 50 + +const UpgradePhoenixHeight = UpgradeDragonHeight + 100 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -1 @@ -63,7 +66,8 @@ const UpgradeWatermelonFixHeight = -1 const UpgradeWatermelonFix2Height = -2 var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandMainnet, + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } var SupportedProofTypes = []abi.RegisteredSealProof{ diff --git a/build/params_mainnet.go b/build/params_mainnet.go index 8176c4e6df9..c3c1b131bb1 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -16,8 +16,9 @@ import ( ) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandIncentinet, - UpgradeSmokeHeight: DrandMainnet, + 0: DrandIncentinet, + UpgradeSmokeHeight: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } var NetworkBundle = "mainnet" @@ -96,7 +97,14 @@ const UpgradeLightningHeight = 2809800 const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21 // 2023-12-12T13:30:00Z -var UpgradeWatermelonHeight = abi.ChainEpoch(3469380) +const UpgradeWatermelonHeight = 3469380 + +// 2024-04-02T14:00:00Z - Epoch will be updated in final release +var UpgradeDragonHeight = abi.ChainEpoch(999999999999999) + +// This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet +// 2024-04-02T15:00:00Z +var UpgradePhoenixHeight = UpgradeDragonHeight + 120 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -1 @@ -119,8 +127,9 @@ func init() { SetAddressNetwork(address.Mainnet) } - if os.Getenv("LOTUS_DISABLE_WATERMELON") == "1" { - UpgradeWatermelonHeight = math.MaxInt64 + if os.Getenv("LOTUS_DISABLE_DRAGON") == "1" { + UpgradeDragonHeight = math.MaxInt64 - 1 + UpgradePhoenixHeight = math.MaxInt64 } // NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however, diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 1d15c2fe8b6..8a3f6550124 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -30,7 +30,7 @@ const AllowableClockDriftSecs = uint64(1) /* inline-gen template const TestNetworkVersion = network.Version{{.latestNetworkVersion}} /* inline-gen start */ -const TestNetworkVersion = network.Version21 +const TestNetworkVersion = network.Version22 /* inline-gen end */ diff --git a/build/params_testground.go b/build/params_testground.go index 05249e7e268..63dafcaa864 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -112,9 +112,12 @@ var ( UpgradeWatermelonHeight abi.ChainEpoch = -23 UpgradeWatermelonFixHeight abi.ChainEpoch = -24 UpgradeWatermelonFix2Height abi.ChainEpoch = -25 + UpgradeDragonHeight abi.ChainEpoch = -26 + UpgradePhoenixHeight abi.ChainEpoch = -27 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandMainnet, + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } GenesisNetworkVersion = network.Version0 diff --git a/build/version.go b/build/version.go index 6ec1ecd7af4..4b18f642baa 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.25.3-dev" +const BuildVersion = "1.27.0-dev" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index dcb60f80186..0f7ac209359 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/go-address" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -22,7 +22,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -var Methods = builtin12.MethodsAccount +var Methods = builtin13.MethodsAccount func Load(store adt.Store, act *types.Actor) (State, error) { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { @@ -47,6 +47,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -117,6 +120,9 @@ func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (S case actorstypes.Version12: return make12(store, addr) + case actorstypes.Version13: + return make13(store, addr) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -146,5 +152,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/account/v13.go b/chain/actors/builtin/account/v13.go new file mode 100644 index 00000000000..f2f3b6f6676 --- /dev/null +++ b/chain/actors/builtin/account/v13.go @@ -0,0 +1,62 @@ +package account + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + actorstypes "github.com/filecoin-project/go-state-types/actors" + account13 "github.com/filecoin-project/go-state-types/builtin/v13/account" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, addr address.Address) (State, error) { + out := state13{store: store} + out.State = account13.State{Address: addr} + return &out, nil +} + +type state13 struct { + account13.State + store adt.Store +} + +func (s *state13) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ActorKey() string { + return manifest.AccountKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go index 17b29178816..0c69cfca87f 100644 --- a/chain/actors/builtin/cron/cron.go +++ b/chain/actors/builtin/cron/cron.go @@ -5,7 +5,7 @@ import ( "golang.org/x/xerrors" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -43,6 +43,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -113,13 +116,16 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) { case actorstypes.Version12: return make12(store) + case actorstypes.Version13: + return make13(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } var ( - Address = builtin12.CronActorAddr - Methods = builtin12.MethodsCron + Address = builtin13.CronActorAddr + Methods = builtin13.MethodsCron ) type State interface { @@ -144,5 +150,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/cron/v13.go b/chain/actors/builtin/cron/v13.go new file mode 100644 index 00000000000..d2ba0337896 --- /dev/null +++ b/chain/actors/builtin/cron/v13.go @@ -0,0 +1,57 @@ +package cron + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + cron13 "github.com/filecoin-project/go-state-types/builtin/v13/cron" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store) (State, error) { + out := state13{store: store} + out.State = *cron13.ConstructState(cron13.BuiltInEntries()) + return &out, nil +} + +type state13 struct { + cron13.State + store adt.Store +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ActorKey() string { + return manifest.CronKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/datacap/datacap.go b/chain/actors/builtin/datacap/datacap.go index 0c8f04bbf40..7f5ee6c0bd3 100644 --- a/chain/actors/builtin/datacap/datacap.go +++ b/chain/actors/builtin/datacap/datacap.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" @@ -17,8 +17,8 @@ import ( ) var ( - Address = builtin12.DatacapActorAddr - Methods = builtin12.MethodsDatacap + Address = builtin13.DatacapActorAddr + Methods = builtin13.MethodsDatacap ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -41,6 +41,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -62,6 +65,9 @@ func MakeState(store adt.Store, av actorstypes.Version, governor address.Address case actorstypes.Version12: return make12(store, governor, bitwidth) + case actorstypes.Version13: + return make13(store, governor, bitwidth) + default: return nil, xerrors.Errorf("datacap actor only valid for actors v9 and above, got %d", av) } @@ -86,5 +92,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/datacap/v13.go b/chain/actors/builtin/datacap/v13.go new file mode 100644 index 00000000000..3baf374a42e --- /dev/null +++ b/chain/actors/builtin/datacap/v13.go @@ -0,0 +1,82 @@ +package datacap + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + datacap13 "github.com/filecoin-project/go-state-types/builtin/v13/datacap" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, governor address.Address, bitwidth uint64) (State, error) { + out := state13{store: store} + s, err := datacap13.ConstructState(store, governor, bitwidth) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + datacap13.State + store adt.Store +} + +func (s *state13) Governor() (address.Address, error) { + return s.State.Governor, nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachClient(s.store, actors.Version13, s.verifiedClients, cb) +} + +func (s *state13) verifiedClients() (adt.Map, error) { + return adt13.AsMap(s.store, s.Token.Balances, int(s.Token.HamtBitWidth)) +} + +func (s *state13) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version13, s.verifiedClients, addr) +} + +func (s *state13) ActorKey() string { + return manifest.DatacapKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/evm/evm.go b/chain/actors/builtin/evm/evm.go index 774cded8d08..5bda457cd36 100644 --- a/chain/actors/builtin/evm/evm.go +++ b/chain/actors/builtin/evm/evm.go @@ -5,7 +5,7 @@ import ( "golang.org/x/xerrors" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/manifest" @@ -15,7 +15,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -var Methods = builtin12.MethodsEVM +var Methods = builtin13.MethodsEVM // See https://github.com/filecoin-project/builtin-actors/blob/6e781444cee5965278c46ef4ffe1fb1970f18d7d/actors/evm/src/lib.rs#L35-L42 const ( @@ -46,6 +46,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -64,6 +67,9 @@ func MakeState(store adt.Store, av actorstypes.Version, bytecode cid.Cid) (State case actorstypes.Version12: return make12(store, bytecode) + case actorstypes.Version13: + return make13(store, bytecode) + default: return nil, xerrors.Errorf("evm actor only valid for actors v10 and above, got %d", av) } diff --git a/chain/actors/builtin/evm/v13.go b/chain/actors/builtin/evm/v13.go new file mode 100644 index 00000000000..180c9f38ab5 --- /dev/null +++ b/chain/actors/builtin/evm/v13.go @@ -0,0 +1,72 @@ +package evm + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + evm13 "github.com/filecoin-project/go-state-types/builtin/v13/evm" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, bytecode cid.Cid) (State, error) { + out := state13{store: store} + s, err := evm13.ConstructState(store, bytecode) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + evm13.State + store adt.Store +} + +func (s *state13) Nonce() (uint64, error) { + return s.State.Nonce, nil +} + +func (s *state13) IsAlive() (bool, error) { + return s.State.Tombstone == nil, nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) GetBytecodeCID() (cid.Cid, error) { + return s.State.Bytecode, nil +} + +func (s *state13) GetBytecodeHash() ([32]byte, error) { + return s.State.BytecodeHash, nil +} + +func (s *state13) GetBytecode() ([]byte, error) { + bc, err := s.GetBytecodeCID() + if err != nil { + return nil, err + } + + var byteCode abi.CborBytesTransparent + if err := s.store.Get(s.store.Context(), bc, &byteCode); err != nil { + return nil, err + } + + return byteCode, nil +} diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index 41a763ecf61..de1c6274e21 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -25,8 +25,8 @@ import ( ) var ( - Address = builtin12.InitActorAddr - Methods = builtin12.MethodsInit + Address = builtin13.InitActorAddr + Methods = builtin13.MethodsInit ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -52,6 +52,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -122,6 +125,9 @@ func MakeState(store adt.Store, av actorstypes.Version, networkName string) (Sta case actorstypes.Version12: return make12(store, networkName) + case actorstypes.Version13: + return make13(store, networkName) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -174,5 +180,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/init/v13.go b/chain/actors/builtin/init/v13.go new file mode 100644 index 00000000000..227ce769fa8 --- /dev/null +++ b/chain/actors/builtin/init/v13.go @@ -0,0 +1,147 @@ +package init + +import ( + "crypto/sha256" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, networkName string) (State, error) { + out := state13{store: store} + + s, err := init13.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + init13.State + store adt.Store +} + +func (s *state13) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state13) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state13) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt13.AsMap(s.store, s.State.AddressMap, builtin13.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state13) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state13) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state13) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state13) Remove(addrs ...address.Address) (err error) { + m, err := adt13.AsMap(s.store, s.State.AddressMap, builtin13.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state13) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) AddressMap() (adt.Map, error) { + return adt13.AsMap(s.store, s.State.AddressMap, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) AddressMapBitWidth() int { + return builtin13.DefaultHamtBitwidth +} + +func (s *state13) AddressMapHashFunction() func(input []byte) []byte { + return func(input []byte) []byte { + res := sha256.Sum256(input) + return res[:] + } +} + +func (s *state13) ActorKey() string { + return manifest.InitKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/market/actor.go.template b/chain/actors/builtin/market/actor.go.template index a84c04ab9c5..0604737b357 100644 --- a/chain/actors/builtin/market/actor.go.template +++ b/chain/actors/builtin/market/actor.go.template @@ -103,10 +103,10 @@ type BalanceTable interface { type DealStates interface { ForEach(cb func(id abi.DealID, ds DealState) error) error - Get(id abi.DealID) (*DealState, bool, error) + Get(id abi.DealID) (DealState, bool, error) array() adt.Array - decode(*cbg.Deferred) (*DealState, error) + decode(*cbg.Deferred) (DealState, error) } type DealProposals interface { @@ -142,7 +142,17 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora type DealProposal = markettypes.DealProposal type DealLabel = markettypes.DealLabel -type DealState = markettypes.DealState +type DealState interface { + SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated + SlashEpoch() abi.ChainEpoch // -1 if deal never slashed + + Equals(other DealState) bool +} + +func DealStatesEqual(a, b DealState) bool { + return DealStatesEqual(a, b) +} type DealStateChanges struct { Added []DealIDState @@ -158,8 +168,8 @@ type DealIDState struct { // DealStateChange is a change in deal state from -> to type DealStateChange struct { ID abi.DealID - From *DealState - To *DealState + From DealState + To DealState } type DealProposalChanges struct { @@ -172,12 +182,36 @@ type ProposalIDState struct { Proposal markettypes.DealProposal } -func EmptyDealState() *DealState { - return &DealState{ - SectorStartEpoch: -1, - SlashEpoch: -1, - LastUpdatedEpoch: -1, + +type emptyDealState struct{} + +func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) LastUpdatedEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) SlashEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) Equals(other DealState) bool { + if e.SectorStartEpoch() != other.SectorStartEpoch() { + return false } + if e.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if e.SlashEpoch() != other.SlashEpoch() { + return false + } + return true +} + +func EmptyDealState() DealState { + return &emptyDealState{} } // returns the earned fees and pending fees for a given deal @@ -196,8 +230,8 @@ func GetDealFees(deal markettypes.DealProposal, height abi.ChainEpoch) (abi.Toke return ef, big.Sub(tf, ef) } -func IsDealActive(state markettypes.DealState) bool { - return state.SectorStartEpoch > -1 && state.SlashEpoch == -1 +func IsDealActive(state DealState) bool { + return state.SectorStartEpoch() > -1 && state.SlashEpoch() == -1 } func labelFromGoString(s string) (markettypes.DealLabel, error) { diff --git a/chain/actors/builtin/market/diff.go b/chain/actors/builtin/market/diff.go index ef3c2c28d7c..292299790e3 100644 --- a/chain/actors/builtin/market/diff.go +++ b/chain/actors/builtin/market/diff.go @@ -64,7 +64,7 @@ func (d *marketStatesDiffer) Add(key uint64, val *cbg.Deferred) error { if err != nil { return err } - d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), *ds}) + d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), ds}) return nil } @@ -77,7 +77,7 @@ func (d *marketStatesDiffer) Modify(key uint64, from, to *cbg.Deferred) error { if err != nil { return err } - if *dsFrom != *dsTo { + if !dsFrom.Equals(dsTo) { d.Results.Modified = append(d.Results.Modified, DealStateChange{abi.DealID(key), dsFrom, dsTo}) } return nil @@ -88,6 +88,6 @@ func (d *marketStatesDiffer) Remove(key uint64, val *cbg.Deferred) error { if err != nil { return err } - d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), *ds}) + d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), ds}) return nil } diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 39473d56062..13c09f91bcf 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -58,6 +58,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -128,6 +131,9 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) { case actorstypes.Version12: return make12(store) + case actorstypes.Version13: + return make13(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -162,10 +168,10 @@ type BalanceTable interface { type DealStates interface { ForEach(cb func(id abi.DealID, ds DealState) error) error - Get(id abi.DealID) (*DealState, bool, error) + Get(id abi.DealID) (DealState, bool, error) array() adt.Array - decode(*cbg.Deferred) (*DealState, error) + decode(*cbg.Deferred) (DealState, error) } type DealProposals interface { @@ -226,6 +232,9 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora case actorstypes.Version12: return decodePublishStorageDealsReturn12(b) + case actorstypes.Version13: + return decodePublishStorageDealsReturn13(b) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -233,7 +242,17 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora type DealProposal = markettypes.DealProposal type DealLabel = markettypes.DealLabel -type DealState = markettypes.DealState +type DealState interface { + SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated + SlashEpoch() abi.ChainEpoch // -1 if deal never slashed + + Equals(other DealState) bool +} + +func DealStatesEqual(a, b DealState) bool { + return DealStatesEqual(a, b) +} type DealStateChanges struct { Added []DealIDState @@ -249,8 +268,8 @@ type DealIDState struct { // DealStateChange is a change in deal state from -> to type DealStateChange struct { ID abi.DealID - From *DealState - To *DealState + From DealState + To DealState } type DealProposalChanges struct { @@ -263,12 +282,35 @@ type ProposalIDState struct { Proposal markettypes.DealProposal } -func EmptyDealState() *DealState { - return &DealState{ - SectorStartEpoch: -1, - SlashEpoch: -1, - LastUpdatedEpoch: -1, +type emptyDealState struct{} + +func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) LastUpdatedEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) SlashEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) Equals(other DealState) bool { + if e.SectorStartEpoch() != other.SectorStartEpoch() { + return false } + if e.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if e.SlashEpoch() != other.SlashEpoch() { + return false + } + return true +} + +func EmptyDealState() DealState { + return &emptyDealState{} } // returns the earned fees and pending fees for a given deal @@ -287,8 +329,8 @@ func GetDealFees(deal markettypes.DealProposal, height abi.ChainEpoch) (abi.Toke return ef, big.Sub(tf, ef) } -func IsDealActive(state markettypes.DealState) bool { - return state.SectorStartEpoch > -1 && state.SlashEpoch == -1 +func IsDealActive(state DealState) bool { + return state.SectorStartEpoch() > -1 && state.SlashEpoch() == -1 } func labelFromGoString(s string) (markettypes.DealLabel, error) { @@ -313,5 +355,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/market/state.go.template b/chain/actors/builtin/market/state.go.template index 1eab9d74335..4670576602d 100644 --- a/chain/actors/builtin/market/state.go.template +++ b/chain/actors/builtin/market/state.go.template @@ -175,7 +175,7 @@ type dealStates{{.v}} struct { adt.Array } -func (s *dealStates{{.v}}) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates{{.v}}) Get(dealID abi.DealID) (DealState, bool, error) { var deal{{.v}} market{{.v}}.DealState found, err := s.Array.Get(uint64(dealID), &deal{{.v}}) if err != nil { @@ -185,7 +185,7 @@ func (s *dealStates{{.v}}) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV{{.v}}DealState(deal{{.v}}) - return &deal, true, nil + return deal, true, nil } func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -195,31 +195,57 @@ func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) erro }) } -func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (DealState, error) { var ds{{.v}} market{{.v}}.DealState if err := ds{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV{{.v}}DealState(ds{{.v}}) - return &ds, nil + return ds, nil } func (s *dealStates{{.v}}) array() adt.Array { return s.Array } -func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v{{.v}}.SectorStartEpoch, - LastUpdatedEpoch: v{{.v}}.LastUpdatedEpoch, - SlashEpoch: v{{.v}}.SlashEpoch, - VerifiedClaim: 0, - } - {{if (ge .v 9)}} - ret.VerifiedClaim = verifregtypes.AllocationId(v{{.v}}.VerifiedClaim) - {{end}} +type dealStateV{{.v}} struct { + ds{{.v}} market{{.v}}.DealState +} + +func (d dealStateV{{.v}}) SectorStartEpoch() abi.ChainEpoch { + return d.ds{{.v}}.SectorStartEpoch +} + +func (d dealStateV{{.v}}) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds{{.v}}.LastUpdatedEpoch +} + +func (d dealStateV{{.v}}) SlashEpoch() abi.ChainEpoch { + return d.ds{{.v}}.SlashEpoch +} - return ret +func (d dealStateV{{.v}}) Equals(other DealState) bool { + if ov{{.v}}, ok := other.(dealStateV{{.v}}); ok { + return d.ds{{.v}} == ov{{.v}}.ds{{.v}} + } + + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV{{.v}})(nil) + +func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState { + return dealStateV{{.v}}{v{{.v}}} } type dealProposals{{.v}} struct { diff --git a/chain/actors/builtin/market/v0.go b/chain/actors/builtin/market/v0.go index ca6970dfaa1..d797d53f8c6 100644 --- a/chain/actors/builtin/market/v0.go +++ b/chain/actors/builtin/market/v0.go @@ -154,7 +154,7 @@ type dealStates0 struct { adt.Array } -func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates0) Get(dealID abi.DealID) (DealState, bool, error) { var deal0 market0.DealState found, err := s.Array.Get(uint64(dealID), &deal0) if err != nil { @@ -164,7 +164,7 @@ func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV0DealState(deal0) - return &deal, true, nil + return deal, true, nil } func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -174,28 +174,57 @@ func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates0) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates0) decode(val *cbg.Deferred) (DealState, error) { var ds0 market0.DealState if err := ds0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV0DealState(ds0) - return &ds, nil + return ds, nil } func (s *dealStates0) array() adt.Array { return s.Array } -func fromV0DealState(v0 market0.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v0.SectorStartEpoch, - LastUpdatedEpoch: v0.LastUpdatedEpoch, - SlashEpoch: v0.SlashEpoch, - VerifiedClaim: 0, +type dealStateV0 struct { + ds0 market0.DealState +} + +func (d dealStateV0) SectorStartEpoch() abi.ChainEpoch { + return d.ds0.SectorStartEpoch +} + +func (d dealStateV0) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds0.LastUpdatedEpoch +} + +func (d dealStateV0) SlashEpoch() abi.ChainEpoch { + return d.ds0.SlashEpoch +} + +func (d dealStateV0) Equals(other DealState) bool { + if ov0, ok := other.(dealStateV0); ok { + return d.ds0 == ov0.ds0 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV0)(nil) + +func fromV0DealState(v0 market0.DealState) DealState { + return dealStateV0{v0} } type dealProposals0 struct { diff --git a/chain/actors/builtin/market/v10.go b/chain/actors/builtin/market/v10.go index 878f0d46584..290c17d092f 100644 --- a/chain/actors/builtin/market/v10.go +++ b/chain/actors/builtin/market/v10.go @@ -153,7 +153,7 @@ type dealStates10 struct { adt.Array } -func (s *dealStates10) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates10) Get(dealID abi.DealID) (DealState, bool, error) { var deal10 market10.DealState found, err := s.Array.Get(uint64(dealID), &deal10) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates10) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV10DealState(deal10) - return &deal, true, nil + return deal, true, nil } func (s *dealStates10) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates10) ForEach(cb func(dealID abi.DealID, ds DealState) error) e }) } -func (s *dealStates10) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates10) decode(val *cbg.Deferred) (DealState, error) { var ds10 market10.DealState if err := ds10.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV10DealState(ds10) - return &ds, nil + return ds, nil } func (s *dealStates10) array() adt.Array { return s.Array } -func fromV10DealState(v10 market10.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v10.SectorStartEpoch, - LastUpdatedEpoch: v10.LastUpdatedEpoch, - SlashEpoch: v10.SlashEpoch, - VerifiedClaim: 0, +type dealStateV10 struct { + ds10 market10.DealState +} + +func (d dealStateV10) SectorStartEpoch() abi.ChainEpoch { + return d.ds10.SectorStartEpoch +} + +func (d dealStateV10) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds10.LastUpdatedEpoch +} + +func (d dealStateV10) SlashEpoch() abi.ChainEpoch { + return d.ds10.SlashEpoch +} + +func (d dealStateV10) Equals(other DealState) bool { + if ov10, ok := other.(dealStateV10); ok { + return d.ds10 == ov10.ds10 + } + + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} - ret.VerifiedClaim = verifregtypes.AllocationId(v10.VerifiedClaim) +var _ DealState = (*dealStateV10)(nil) - return ret +func fromV10DealState(v10 market10.DealState) DealState { + return dealStateV10{v10} } type dealProposals10 struct { diff --git a/chain/actors/builtin/market/v11.go b/chain/actors/builtin/market/v11.go index a6427220972..56a4c6038de 100644 --- a/chain/actors/builtin/market/v11.go +++ b/chain/actors/builtin/market/v11.go @@ -153,7 +153,7 @@ type dealStates11 struct { adt.Array } -func (s *dealStates11) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates11) Get(dealID abi.DealID) (DealState, bool, error) { var deal11 market11.DealState found, err := s.Array.Get(uint64(dealID), &deal11) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates11) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV11DealState(deal11) - return &deal, true, nil + return deal, true, nil } func (s *dealStates11) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates11) ForEach(cb func(dealID abi.DealID, ds DealState) error) e }) } -func (s *dealStates11) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates11) decode(val *cbg.Deferred) (DealState, error) { var ds11 market11.DealState if err := ds11.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV11DealState(ds11) - return &ds, nil + return ds, nil } func (s *dealStates11) array() adt.Array { return s.Array } -func fromV11DealState(v11 market11.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v11.SectorStartEpoch, - LastUpdatedEpoch: v11.LastUpdatedEpoch, - SlashEpoch: v11.SlashEpoch, - VerifiedClaim: 0, +type dealStateV11 struct { + ds11 market11.DealState +} + +func (d dealStateV11) SectorStartEpoch() abi.ChainEpoch { + return d.ds11.SectorStartEpoch +} + +func (d dealStateV11) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds11.LastUpdatedEpoch +} + +func (d dealStateV11) SlashEpoch() abi.ChainEpoch { + return d.ds11.SlashEpoch +} + +func (d dealStateV11) Equals(other DealState) bool { + if ov11, ok := other.(dealStateV11); ok { + return d.ds11 == ov11.ds11 + } + + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} - ret.VerifiedClaim = verifregtypes.AllocationId(v11.VerifiedClaim) +var _ DealState = (*dealStateV11)(nil) - return ret +func fromV11DealState(v11 market11.DealState) DealState { + return dealStateV11{v11} } type dealProposals11 struct { diff --git a/chain/actors/builtin/market/v12.go b/chain/actors/builtin/market/v12.go index 56e651a9be5..cf7687203f9 100644 --- a/chain/actors/builtin/market/v12.go +++ b/chain/actors/builtin/market/v12.go @@ -153,7 +153,7 @@ type dealStates12 struct { adt.Array } -func (s *dealStates12) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates12) Get(dealID abi.DealID) (DealState, bool, error) { var deal12 market12.DealState found, err := s.Array.Get(uint64(dealID), &deal12) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates12) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV12DealState(deal12) - return &deal, true, nil + return deal, true, nil } func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) e }) } -func (s *dealStates12) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates12) decode(val *cbg.Deferred) (DealState, error) { var ds12 market12.DealState if err := ds12.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV12DealState(ds12) - return &ds, nil + return ds, nil } func (s *dealStates12) array() adt.Array { return s.Array } -func fromV12DealState(v12 market12.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v12.SectorStartEpoch, - LastUpdatedEpoch: v12.LastUpdatedEpoch, - SlashEpoch: v12.SlashEpoch, - VerifiedClaim: 0, +type dealStateV12 struct { + ds12 market12.DealState +} + +func (d dealStateV12) SectorStartEpoch() abi.ChainEpoch { + return d.ds12.SectorStartEpoch +} + +func (d dealStateV12) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds12.LastUpdatedEpoch +} + +func (d dealStateV12) SlashEpoch() abi.ChainEpoch { + return d.ds12.SlashEpoch +} + +func (d dealStateV12) Equals(other DealState) bool { + if ov12, ok := other.(dealStateV12); ok { + return d.ds12 == ov12.ds12 + } + + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} - ret.VerifiedClaim = verifregtypes.AllocationId(v12.VerifiedClaim) +var _ DealState = (*dealStateV12)(nil) - return ret +func fromV12DealState(v12 market12.DealState) DealState { + return dealStateV12{v12} } type dealProposals12 struct { diff --git a/chain/actors/builtin/market/v13.go b/chain/actors/builtin/market/v13.go new file mode 100644 index 00000000000..d270319ce6f --- /dev/null +++ b/chain/actors/builtin/market/v13.go @@ -0,0 +1,404 @@ +package market + +import ( + "bytes" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/builtin" + market13 "github.com/filecoin-project/go-state-types/builtin/v13/market" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store) (State, error) { + out := state13{store: store} + + s, err := market13.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + market13.State + store adt.Store +} + +func (s *state13) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state13) BalancesChanged(otherState State) (bool, error) { + otherState13, ok := otherState.(*state13) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState13.State.EscrowTable) || !s.State.LockedTable.Equals(otherState13.State.LockedTable), nil +} + +func (s *state13) StatesChanged(otherState State) (bool, error) { + otherState13, ok := otherState.(*state13) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState13.State.States), nil +} + +func (s *state13) States() (DealStates, error) { + stateArray, err := adt13.AsArray(s.store, s.State.States, market13.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates13{stateArray}, nil +} + +func (s *state13) ProposalsChanged(otherState State) (bool, error) { + otherState13, ok := otherState.(*state13) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState13.State.Proposals), nil +} + +func (s *state13) Proposals() (DealProposals, error) { + proposalArray, err := adt13.AsArray(s.store, s.State.Proposals, market13.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals13{proposalArray}, nil +} + +func (s *state13) EscrowTable() (BalanceTable, error) { + bt, err := adt13.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable13{bt}, nil +} + +func (s *state13) LockedTable() (BalanceTable, error) { + bt, err := adt13.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable13{bt}, nil +} + +func (s *state13) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market13.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state13) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable13 struct { + *adt13.BalanceTable +} + +func (bt *balanceTable13) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt13.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates13 struct { + adt.Array +} + +func (s *dealStates13) Get(dealID abi.DealID) (DealState, bool, error) { + var deal13 market13.DealState + found, err := s.Array.Get(uint64(dealID), &deal13) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV13DealState(deal13) + return deal, true, nil +} + +func (s *dealStates13) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds13 market13.DealState + return s.Array.ForEach(&ds13, func(idx int64) error { + return cb(abi.DealID(idx), fromV13DealState(ds13)) + }) +} + +func (s *dealStates13) decode(val *cbg.Deferred) (DealState, error) { + var ds13 market13.DealState + if err := ds13.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV13DealState(ds13) + return ds, nil +} + +func (s *dealStates13) array() adt.Array { + return s.Array +} + +type dealStateV13 struct { + ds13 market13.DealState +} + +func (d dealStateV13) SectorStartEpoch() abi.ChainEpoch { + return d.ds13.SectorStartEpoch +} + +func (d dealStateV13) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds13.LastUpdatedEpoch +} + +func (d dealStateV13) SlashEpoch() abi.ChainEpoch { + return d.ds13.SlashEpoch +} + +func (d dealStateV13) Equals(other DealState) bool { + if ov13, ok := other.(dealStateV13); ok { + return d.ds13 == ov13.ds13 + } + + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV13)(nil) + +func fromV13DealState(v13 market13.DealState) DealState { + return dealStateV13{v13} +} + +type dealProposals13 struct { + adt.Array +} + +func (s *dealProposals13) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal13 market13.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal13) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV13DealProposal(proposal13) + if err != nil { + return nil, true, xerrors.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals13) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp13 market13.DealProposal + return s.Array.ForEach(&dp13, func(idx int64) error { + dp, err := fromV13DealProposal(dp13) + if err != nil { + return xerrors.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals13) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp13 market13.DealProposal + if err := dp13.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV13DealProposal(dp13) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals13) array() adt.Array { + return s.Array +} + +func fromV13DealProposal(v13 market13.DealProposal) (DealProposal, error) { + + label, err := fromV13Label(v13.Label) + + if err != nil { + return DealProposal{}, xerrors.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v13.PieceCID, + PieceSize: v13.PieceSize, + VerifiedDeal: v13.VerifiedDeal, + Client: v13.Client, + Provider: v13.Provider, + + Label: label, + + StartEpoch: v13.StartEpoch, + EndEpoch: v13.EndEpoch, + StoragePricePerEpoch: v13.StoragePricePerEpoch, + + ProviderCollateral: v13.ProviderCollateral, + ClientCollateral: v13.ClientCollateral, + }, nil +} + +func fromV13Label(v13 market13.DealLabel) (DealLabel, error) { + if v13.IsString() { + str, err := v13.ToString() + if err != nil { + return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert string label to string: %w", err) + } + return markettypes.NewLabelFromString(str) + } + + bs, err := v13.ToBytes() + if err != nil { + return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert bytes label to bytes: %w", err) + } + return markettypes.NewLabelFromBytes(bs) +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn13)(nil) + +func decodePublishStorageDealsReturn13(b []byte) (PublishStorageDealsReturn, error) { + var retval market13.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn13{retval}, nil +} + +type publishStorageDealsReturn13 struct { + market13.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn13) IsDealValid(index uint64) (bool, int, error) { + + set, err := r.ValidDeals.IsSet(index) + if err != nil || !set { + return false, -1, err + } + maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}}) + if err != nil { + return false, -1, err + } + before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals) + if err != nil { + return false, -1, err + } + outIdx, err := before.Count() + if err != nil { + return false, -1, err + } + return set, int(outIdx), nil + +} + +func (r *publishStorageDealsReturn13) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state13) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + allocations, err := adt13.AsMap(s.store, s.PendingDealAllocationIds, builtin.DefaultHamtBitwidth) + if err != nil { + return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err) + } + + var allocationId cbg.CborInt + found, err := allocations.Get(abi.UIntKey(uint64(dealId)), &allocationId) + if err != nil { + return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err) + } + if !found { + return verifregtypes.NoAllocationID, nil + } + + return verifregtypes.AllocationId(allocationId), nil + +} + +func (s *state13) ActorKey() string { + return manifest.MarketKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/market/v2.go b/chain/actors/builtin/market/v2.go index ba84e3b03a0..5ced3c8a337 100644 --- a/chain/actors/builtin/market/v2.go +++ b/chain/actors/builtin/market/v2.go @@ -154,7 +154,7 @@ type dealStates2 struct { adt.Array } -func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates2) Get(dealID abi.DealID) (DealState, bool, error) { var deal2 market2.DealState found, err := s.Array.Get(uint64(dealID), &deal2) if err != nil { @@ -164,7 +164,7 @@ func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV2DealState(deal2) - return &deal, true, nil + return deal, true, nil } func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -174,28 +174,57 @@ func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates2) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates2) decode(val *cbg.Deferred) (DealState, error) { var ds2 market2.DealState if err := ds2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV2DealState(ds2) - return &ds, nil + return ds, nil } func (s *dealStates2) array() adt.Array { return s.Array } -func fromV2DealState(v2 market2.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v2.SectorStartEpoch, - LastUpdatedEpoch: v2.LastUpdatedEpoch, - SlashEpoch: v2.SlashEpoch, - VerifiedClaim: 0, +type dealStateV2 struct { + ds2 market2.DealState +} + +func (d dealStateV2) SectorStartEpoch() abi.ChainEpoch { + return d.ds2.SectorStartEpoch +} + +func (d dealStateV2) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds2.LastUpdatedEpoch +} + +func (d dealStateV2) SlashEpoch() abi.ChainEpoch { + return d.ds2.SlashEpoch +} + +func (d dealStateV2) Equals(other DealState) bool { + if ov2, ok := other.(dealStateV2); ok { + return d.ds2 == ov2.ds2 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV2)(nil) + +func fromV2DealState(v2 market2.DealState) DealState { + return dealStateV2{v2} } type dealProposals2 struct { diff --git a/chain/actors/builtin/market/v3.go b/chain/actors/builtin/market/v3.go index f6a0891e730..35dd9c29a55 100644 --- a/chain/actors/builtin/market/v3.go +++ b/chain/actors/builtin/market/v3.go @@ -149,7 +149,7 @@ type dealStates3 struct { adt.Array } -func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates3) Get(dealID abi.DealID) (DealState, bool, error) { var deal3 market3.DealState found, err := s.Array.Get(uint64(dealID), &deal3) if err != nil { @@ -159,7 +159,7 @@ func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV3DealState(deal3) - return &deal, true, nil + return deal, true, nil } func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -169,28 +169,57 @@ func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates3) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates3) decode(val *cbg.Deferred) (DealState, error) { var ds3 market3.DealState if err := ds3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV3DealState(ds3) - return &ds, nil + return ds, nil } func (s *dealStates3) array() adt.Array { return s.Array } -func fromV3DealState(v3 market3.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v3.SectorStartEpoch, - LastUpdatedEpoch: v3.LastUpdatedEpoch, - SlashEpoch: v3.SlashEpoch, - VerifiedClaim: 0, +type dealStateV3 struct { + ds3 market3.DealState +} + +func (d dealStateV3) SectorStartEpoch() abi.ChainEpoch { + return d.ds3.SectorStartEpoch +} + +func (d dealStateV3) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds3.LastUpdatedEpoch +} + +func (d dealStateV3) SlashEpoch() abi.ChainEpoch { + return d.ds3.SlashEpoch +} + +func (d dealStateV3) Equals(other DealState) bool { + if ov3, ok := other.(dealStateV3); ok { + return d.ds3 == ov3.ds3 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV3)(nil) + +func fromV3DealState(v3 market3.DealState) DealState { + return dealStateV3{v3} } type dealProposals3 struct { diff --git a/chain/actors/builtin/market/v4.go b/chain/actors/builtin/market/v4.go index 629e833b67b..bc9e61c8888 100644 --- a/chain/actors/builtin/market/v4.go +++ b/chain/actors/builtin/market/v4.go @@ -149,7 +149,7 @@ type dealStates4 struct { adt.Array } -func (s *dealStates4) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates4) Get(dealID abi.DealID) (DealState, bool, error) { var deal4 market4.DealState found, err := s.Array.Get(uint64(dealID), &deal4) if err != nil { @@ -159,7 +159,7 @@ func (s *dealStates4) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV4DealState(deal4) - return &deal, true, nil + return deal, true, nil } func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -169,28 +169,57 @@ func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates4) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates4) decode(val *cbg.Deferred) (DealState, error) { var ds4 market4.DealState if err := ds4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV4DealState(ds4) - return &ds, nil + return ds, nil } func (s *dealStates4) array() adt.Array { return s.Array } -func fromV4DealState(v4 market4.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v4.SectorStartEpoch, - LastUpdatedEpoch: v4.LastUpdatedEpoch, - SlashEpoch: v4.SlashEpoch, - VerifiedClaim: 0, +type dealStateV4 struct { + ds4 market4.DealState +} + +func (d dealStateV4) SectorStartEpoch() abi.ChainEpoch { + return d.ds4.SectorStartEpoch +} + +func (d dealStateV4) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds4.LastUpdatedEpoch +} + +func (d dealStateV4) SlashEpoch() abi.ChainEpoch { + return d.ds4.SlashEpoch +} + +func (d dealStateV4) Equals(other DealState) bool { + if ov4, ok := other.(dealStateV4); ok { + return d.ds4 == ov4.ds4 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV4)(nil) + +func fromV4DealState(v4 market4.DealState) DealState { + return dealStateV4{v4} } type dealProposals4 struct { diff --git a/chain/actors/builtin/market/v5.go b/chain/actors/builtin/market/v5.go index 8925889791f..63743ba8d10 100644 --- a/chain/actors/builtin/market/v5.go +++ b/chain/actors/builtin/market/v5.go @@ -149,7 +149,7 @@ type dealStates5 struct { adt.Array } -func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates5) Get(dealID abi.DealID) (DealState, bool, error) { var deal5 market5.DealState found, err := s.Array.Get(uint64(dealID), &deal5) if err != nil { @@ -159,7 +159,7 @@ func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV5DealState(deal5) - return &deal, true, nil + return deal, true, nil } func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -169,28 +169,57 @@ func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates5) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates5) decode(val *cbg.Deferred) (DealState, error) { var ds5 market5.DealState if err := ds5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV5DealState(ds5) - return &ds, nil + return ds, nil } func (s *dealStates5) array() adt.Array { return s.Array } -func fromV5DealState(v5 market5.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v5.SectorStartEpoch, - LastUpdatedEpoch: v5.LastUpdatedEpoch, - SlashEpoch: v5.SlashEpoch, - VerifiedClaim: 0, +type dealStateV5 struct { + ds5 market5.DealState +} + +func (d dealStateV5) SectorStartEpoch() abi.ChainEpoch { + return d.ds5.SectorStartEpoch +} + +func (d dealStateV5) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds5.LastUpdatedEpoch +} + +func (d dealStateV5) SlashEpoch() abi.ChainEpoch { + return d.ds5.SlashEpoch +} + +func (d dealStateV5) Equals(other DealState) bool { + if ov5, ok := other.(dealStateV5); ok { + return d.ds5 == ov5.ds5 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV5)(nil) + +func fromV5DealState(v5 market5.DealState) DealState { + return dealStateV5{v5} } type dealProposals5 struct { diff --git a/chain/actors/builtin/market/v6.go b/chain/actors/builtin/market/v6.go index b57d49f9117..5900eace953 100644 --- a/chain/actors/builtin/market/v6.go +++ b/chain/actors/builtin/market/v6.go @@ -151,7 +151,7 @@ type dealStates6 struct { adt.Array } -func (s *dealStates6) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates6) Get(dealID abi.DealID) (DealState, bool, error) { var deal6 market6.DealState found, err := s.Array.Get(uint64(dealID), &deal6) if err != nil { @@ -161,7 +161,7 @@ func (s *dealStates6) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV6DealState(deal6) - return &deal, true, nil + return deal, true, nil } func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -171,28 +171,57 @@ func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates6) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates6) decode(val *cbg.Deferred) (DealState, error) { var ds6 market6.DealState if err := ds6.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV6DealState(ds6) - return &ds, nil + return ds, nil } func (s *dealStates6) array() adt.Array { return s.Array } -func fromV6DealState(v6 market6.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v6.SectorStartEpoch, - LastUpdatedEpoch: v6.LastUpdatedEpoch, - SlashEpoch: v6.SlashEpoch, - VerifiedClaim: 0, +type dealStateV6 struct { + ds6 market6.DealState +} + +func (d dealStateV6) SectorStartEpoch() abi.ChainEpoch { + return d.ds6.SectorStartEpoch +} + +func (d dealStateV6) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds6.LastUpdatedEpoch +} + +func (d dealStateV6) SlashEpoch() abi.ChainEpoch { + return d.ds6.SlashEpoch +} + +func (d dealStateV6) Equals(other DealState) bool { + if ov6, ok := other.(dealStateV6); ok { + return d.ds6 == ov6.ds6 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV6)(nil) + +func fromV6DealState(v6 market6.DealState) DealState { + return dealStateV6{v6} } type dealProposals6 struct { diff --git a/chain/actors/builtin/market/v7.go b/chain/actors/builtin/market/v7.go index 56a1db328f9..f51f070c7f2 100644 --- a/chain/actors/builtin/market/v7.go +++ b/chain/actors/builtin/market/v7.go @@ -151,7 +151,7 @@ type dealStates7 struct { adt.Array } -func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates7) Get(dealID abi.DealID) (DealState, bool, error) { var deal7 market7.DealState found, err := s.Array.Get(uint64(dealID), &deal7) if err != nil { @@ -161,7 +161,7 @@ func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV7DealState(deal7) - return &deal, true, nil + return deal, true, nil } func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -171,28 +171,57 @@ func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates7) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates7) decode(val *cbg.Deferred) (DealState, error) { var ds7 market7.DealState if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV7DealState(ds7) - return &ds, nil + return ds, nil } func (s *dealStates7) array() adt.Array { return s.Array } -func fromV7DealState(v7 market7.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v7.SectorStartEpoch, - LastUpdatedEpoch: v7.LastUpdatedEpoch, - SlashEpoch: v7.SlashEpoch, - VerifiedClaim: 0, +type dealStateV7 struct { + ds7 market7.DealState +} + +func (d dealStateV7) SectorStartEpoch() abi.ChainEpoch { + return d.ds7.SectorStartEpoch +} + +func (d dealStateV7) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds7.LastUpdatedEpoch +} + +func (d dealStateV7) SlashEpoch() abi.ChainEpoch { + return d.ds7.SlashEpoch +} + +func (d dealStateV7) Equals(other DealState) bool { + if ov7, ok := other.(dealStateV7); ok { + return d.ds7 == ov7.ds7 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV7)(nil) + +func fromV7DealState(v7 market7.DealState) DealState { + return dealStateV7{v7} } type dealProposals7 struct { diff --git a/chain/actors/builtin/market/v8.go b/chain/actors/builtin/market/v8.go index 9c68ee1fd86..f9bf25f9c7f 100644 --- a/chain/actors/builtin/market/v8.go +++ b/chain/actors/builtin/market/v8.go @@ -152,7 +152,7 @@ type dealStates8 struct { adt.Array } -func (s *dealStates8) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates8) Get(dealID abi.DealID) (DealState, bool, error) { var deal8 market8.DealState found, err := s.Array.Get(uint64(dealID), &deal8) if err != nil { @@ -162,7 +162,7 @@ func (s *dealStates8) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV8DealState(deal8) - return &deal, true, nil + return deal, true, nil } func (s *dealStates8) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -172,28 +172,57 @@ func (s *dealStates8) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates8) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates8) decode(val *cbg.Deferred) (DealState, error) { var ds8 market8.DealState if err := ds8.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV8DealState(ds8) - return &ds, nil + return ds, nil } func (s *dealStates8) array() adt.Array { return s.Array } -func fromV8DealState(v8 market8.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v8.SectorStartEpoch, - LastUpdatedEpoch: v8.LastUpdatedEpoch, - SlashEpoch: v8.SlashEpoch, - VerifiedClaim: 0, +type dealStateV8 struct { + ds8 market8.DealState +} + +func (d dealStateV8) SectorStartEpoch() abi.ChainEpoch { + return d.ds8.SectorStartEpoch +} + +func (d dealStateV8) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds8.LastUpdatedEpoch +} + +func (d dealStateV8) SlashEpoch() abi.ChainEpoch { + return d.ds8.SlashEpoch +} + +func (d dealStateV8) Equals(other DealState) bool { + if ov8, ok := other.(dealStateV8); ok { + return d.ds8 == ov8.ds8 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV8)(nil) + +func fromV8DealState(v8 market8.DealState) DealState { + return dealStateV8{v8} } type dealProposals8 struct { diff --git a/chain/actors/builtin/market/v9.go b/chain/actors/builtin/market/v9.go index d692c15ccb7..3b5be4dfa26 100644 --- a/chain/actors/builtin/market/v9.go +++ b/chain/actors/builtin/market/v9.go @@ -153,7 +153,7 @@ type dealStates9 struct { adt.Array } -func (s *dealStates9) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates9) Get(dealID abi.DealID) (DealState, bool, error) { var deal9 market9.DealState found, err := s.Array.Get(uint64(dealID), &deal9) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates9) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV9DealState(deal9) - return &deal, true, nil + return deal, true, nil } func (s *dealStates9) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates9) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates9) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates9) decode(val *cbg.Deferred) (DealState, error) { var ds9 market9.DealState if err := ds9.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV9DealState(ds9) - return &ds, nil + return ds, nil } func (s *dealStates9) array() adt.Array { return s.Array } -func fromV9DealState(v9 market9.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v9.SectorStartEpoch, - LastUpdatedEpoch: v9.LastUpdatedEpoch, - SlashEpoch: v9.SlashEpoch, - VerifiedClaim: 0, +type dealStateV9 struct { + ds9 market9.DealState +} + +func (d dealStateV9) SectorStartEpoch() abi.ChainEpoch { + return d.ds9.SectorStartEpoch +} + +func (d dealStateV9) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds9.LastUpdatedEpoch +} + +func (d dealStateV9) SlashEpoch() abi.ChainEpoch { + return d.ds9.SlashEpoch +} + +func (d dealStateV9) Equals(other DealState) bool { + if ov9, ok := other.(dealStateV9); ok { + return d.ds9 == ov9.ds9 + } + + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} - ret.VerifiedClaim = verifregtypes.AllocationId(v9.VerifiedClaim) +var _ DealState = (*dealStateV9)(nil) - return ret +func fromV9DealState(v9 market9.DealState) DealState { + return dealStateV9{v9} } type dealProposals9 struct { diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index ead254847de..0f7204ec733 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -17,7 +17,8 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" - miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner" + + minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/manifest" @@ -153,7 +154,7 @@ type Partition interface { UnprovenSectors() (bitfield.BitField, error) } -type SectorOnChainInfo = miner12.SectorOnChainInfo +type SectorOnChainInfo = minertypes13.SectorOnChainInfo func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) { // We added support for the new proofs in network version 7, and removed support for the old @@ -240,7 +241,9 @@ type DeclareFaultsParams = minertypes.DeclareFaultsParams type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams type ProveCommitSectorParams = minertypes.ProveCommitSectorParams type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams +type ProveReplicaUpdatesParams2 = minertypes.ProveReplicaUpdatesParams2 type ReplicaUpdate = minertypes.ReplicaUpdate +type ReplicaUpdate2 = minertypes.ReplicaUpdate2 type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2 type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params @@ -249,6 +252,13 @@ type ExpirationExtension2 = minertypes.ExpirationExtension2 type CompactPartitionsParams = minertypes.CompactPartitionsParams type WithdrawBalanceParams = minertypes.WithdrawBalanceParams +type PieceActivationManifest = minertypes13.PieceActivationManifest +type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params +type SectorActivationManifest = minertypes13.SectorActivationManifest +type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params +type SectorUpdateManifest = minertypes13.SectorUpdateManifest +type SectorOnChainInfoFlags = minertypes13.SectorOnChainInfoFlags + var QAPowerMax = minertypes.QAPowerMax type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 151688d048b..cdf0046f587 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" - miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner" + minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/dline" @@ -52,6 +52,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -122,6 +125,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version12: return make12(store) + case actors.Version13: + return make13(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -213,7 +219,7 @@ type Partition interface { UnprovenSectors() (bitfield.BitField, error) } -type SectorOnChainInfo = miner12.SectorOnChainInfo +type SectorOnChainInfo = minertypes13.SectorOnChainInfo func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) { // We added support for the new proofs in network version 7, and removed support for the old @@ -300,7 +306,9 @@ type DeclareFaultsParams = minertypes.DeclareFaultsParams type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams type ProveCommitSectorParams = minertypes.ProveCommitSectorParams type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams +type ProveReplicaUpdatesParams2 = minertypes.ProveReplicaUpdatesParams2 type ReplicaUpdate = minertypes.ReplicaUpdate +type ReplicaUpdate2 = minertypes.ReplicaUpdate2 type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2 type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params @@ -309,6 +317,13 @@ type ExpirationExtension2 = minertypes.ExpirationExtension2 type CompactPartitionsParams = minertypes.CompactPartitionsParams type WithdrawBalanceParams = minertypes.WithdrawBalanceParams +type PieceActivationManifest = minertypes13.PieceActivationManifest +type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params +type SectorActivationManifest = minertypes13.SectorActivationManifest +type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params +type SectorUpdateManifest = minertypes13.SectorUpdateManifest +type SectorOnChainInfoFlags = minertypes13.SectorOnChainInfoFlags + var QAPowerMax = minertypes.QAPowerMax type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo @@ -375,5 +390,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/miner/v13.go b/chain/actors/builtin/miner/v13.go new file mode 100644 index 00000000000..d61c2a9a222 --- /dev/null +++ b/chain/actors/builtin/miner/v13.go @@ -0,0 +1,594 @@ +package miner + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store) (State, error) { + out := state13{store: store} + out.State = miner13.State{} + return &out, nil +} + +type state13 struct { + miner13.State + store adt.Store +} + +type deadline13 struct { + miner13.Deadline + store adt.Store +} + +type partition13 struct { + miner13.Partition + store adt.Store +} + +func (s *state13) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesn't have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state13) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state13) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state13) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state13) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state13) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +// Returns nil, nil if sector is not found +func (s *state13) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV13SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state13) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state13) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner13.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state13) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner13.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner13.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner13.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner13.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner13.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state13) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV13SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state13) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt13.AsMap(s.store, s.State.PreCommittedSectors, builtin13.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner13.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV13SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state13) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner13.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info13 miner13.SectorOnChainInfo + if err := sectors.ForEach(&info13, func(_ int64) error { + info := fromV13SectorOnChainInfo(info13) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos13, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos13)) + for i, info13 := range infos13 { + info := fromV13SectorOnChainInfo(*info13) + infos[i] = &info + } + return infos, nil +} + +func (s *state13) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state13) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state13) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state13) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state13) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state13) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline13{*dl, s.store}, nil +} + +func (s *state13) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner13.Deadline) error { + return cb(i, &deadline13{*dl, s.store}) + }) +} + +func (s *state13) NumDeadlines() (uint64, error) { + return miner13.WPoStPeriodDeadlines, nil +} + +func (s *state13) DeadlinesChanged(other State) (bool, error) { + other13, ok := other.(*state13) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other13.Deadlines), nil +} + +func (s *state13) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state13) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state13) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + + Beneficiary: info.Beneficiary, + BeneficiaryTerm: BeneficiaryTerm(info.BeneficiaryTerm), + PendingBeneficiaryTerm: (*PendingBeneficiaryChange)(info.PendingBeneficiaryTerm), + } + + return mi, nil +} + +func (s *state13) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state13) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state13) sectors() (adt.Array, error) { + return adt13.AsArray(s.store, s.Sectors, miner13.SectorsAmtBitwidth) +} + +func (s *state13) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner13.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV13SectorOnChainInfo(si), nil +} + +func (s *state13) precommits() (adt.Map, error) { + return adt13.AsMap(s.store, s.PreCommittedSectors, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner13.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV13SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state13) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner13.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner13.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline13) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition13{*p, d.store}, nil +} + +func (d *deadline13) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner13.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition13{part, d.store}) + }) +} + +func (d *deadline13) PartitionsChanged(other Deadline) (bool, error) { + other13, ok := other.(*deadline13) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other13.Deadline.Partitions), nil +} + +func (d *deadline13) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline13) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition13) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition13) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition13) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition13) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV13SectorOnChainInfo(v13 miner13.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v13.SectorNumber, + SealProof: v13.SealProof, + SealedCID: v13.SealedCID, + DealIDs: v13.DealIDs, + Activation: v13.Activation, + Expiration: v13.Expiration, + DealWeight: v13.DealWeight, + VerifiedDealWeight: v13.VerifiedDealWeight, + InitialPledge: v13.InitialPledge, + ExpectedDayReward: v13.ExpectedDayReward, + ExpectedStoragePledge: v13.ExpectedStoragePledge, + + SectorKeyCID: v13.SectorKeyCID, + + PowerBaseEpoch: v13.PowerBaseEpoch, + ReplacedDayReward: v13.ReplacedDayReward, + } + return info +} + +func fromV13SectorPreCommitOnChainInfo(v13 miner13.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + ret := SectorPreCommitOnChainInfo{ + Info: SectorPreCommitInfo{ + SealProof: v13.Info.SealProof, + SectorNumber: v13.Info.SectorNumber, + SealedCID: v13.Info.SealedCID, + SealRandEpoch: v13.Info.SealRandEpoch, + DealIDs: v13.Info.DealIDs, + Expiration: v13.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v13.PreCommitDeposit, + PreCommitEpoch: v13.PreCommitEpoch, + } + + ret.Info.UnsealedCid = v13.Info.UnsealedCid + + return ret +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ActorKey() string { + return manifest.MinerKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/multisig/message10.go b/chain/actors/builtin/multisig/message10.go index 8f7bb5a6f2e..59dd4dde016 100644 --- a/chain/actors/builtin/multisig/message10.go +++ b/chain/actors/builtin/multisig/message10.go @@ -8,7 +8,7 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" multisig10 "github.com/filecoin-project/go-state-types/builtin/v10/multisig" - init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -57,7 +57,7 @@ func (m message10) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init12.ExecParams{ + execParams := &init13.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message11.go b/chain/actors/builtin/multisig/message11.go index 4c7520d5dea..89bee0255af 100644 --- a/chain/actors/builtin/multisig/message11.go +++ b/chain/actors/builtin/multisig/message11.go @@ -8,7 +8,7 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" multisig11 "github.com/filecoin-project/go-state-types/builtin/v11/multisig" - init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -57,7 +57,7 @@ func (m message11) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init12.ExecParams{ + execParams := &init13.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message12.go b/chain/actors/builtin/multisig/message12.go index 43658c04b03..326026c93ff 100644 --- a/chain/actors/builtin/multisig/message12.go +++ b/chain/actors/builtin/multisig/message12.go @@ -7,8 +7,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" multisig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -57,7 +57,7 @@ func (m message12) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init12.ExecParams{ + execParams := &init13.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message13.go b/chain/actors/builtin/multisig/message13.go new file mode 100644 index 00000000000..94a9cbfbf7e --- /dev/null +++ b/chain/actors/builtin/multisig/message13.go @@ -0,0 +1,77 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtintypes "github.com/filecoin-project/go-state-types/builtin" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + multisig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message13 struct{ message0 } + +func (m message13) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig13.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + code, ok := actors.GetActorCodeID(actorstypes.Version13, manifest.MultisigKey) + if !ok { + return nil, xerrors.Errorf("failed to get multisig code ID") + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init13.ExecParams{ + CodeCID: code, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/message8.go b/chain/actors/builtin/multisig/message8.go index 390c94691e4..5d79fe6c5a7 100644 --- a/chain/actors/builtin/multisig/message8.go +++ b/chain/actors/builtin/multisig/message8.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" multisig8 "github.com/filecoin-project/go-state-types/builtin/v8/multisig" "github.com/filecoin-project/go-state-types/manifest" @@ -57,7 +57,7 @@ func (m message8) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init12.ExecParams{ + execParams := &init13.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message9.go b/chain/actors/builtin/multisig/message9.go index 907bec7d556..9003b7e38d8 100644 --- a/chain/actors/builtin/multisig/message9.go +++ b/chain/actors/builtin/multisig/message9.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" multisig9 "github.com/filecoin-project/go-state-types/builtin/v9/multisig" "github.com/filecoin-project/go-state-types/manifest" @@ -57,7 +57,7 @@ func (m message9) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init12.ExecParams{ + execParams := &init13.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go index 71a3b7b2237..08da9bd2dca 100644 --- a/chain/actors/builtin/multisig/multisig.go +++ b/chain/actors/builtin/multisig/multisig.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - msig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig" + msig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -51,6 +51,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -121,6 +124,9 @@ func MakeState(store adt.Store, av actorstypes.Version, signers []address.Addres case actorstypes.Version12: return make12(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + case actorstypes.Version13: + return make13(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -147,7 +153,7 @@ type State interface { GetState() interface{} } -type Transaction = msig12.Transaction +type Transaction = msig13.Transaction var Methods = builtintypes.MethodsMultisig @@ -189,6 +195,9 @@ func Message(version actorstypes.Version, from address.Address) MessageBuilder { case actorstypes.Version12: return message12{message0{from}} + + case actorstypes.Version13: + return message13{message0{from}} default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -212,13 +221,13 @@ type MessageBuilder interface { } // this type is the same between v0 and v2 -type ProposalHashData = msig12.ProposalHashData -type ProposeReturn = msig12.ProposeReturn -type ProposeParams = msig12.ProposeParams -type ApproveReturn = msig12.ApproveReturn +type ProposalHashData = msig13.ProposalHashData +type ProposeReturn = msig13.ProposeReturn +type ProposeParams = msig13.ProposeParams +type ApproveReturn = msig13.ApproveReturn func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { - params := msig12.TxnIDParams{ID: msig12.TxnID(id)} + params := msig13.TxnIDParams{ID: msig13.TxnID(id)} if data != nil { if data.Requester.Protocol() != address.ID { return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) @@ -254,5 +263,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/multisig/v13.go b/chain/actors/builtin/multisig/v13.go new file mode 100644 index 00000000000..57dd66976d0 --- /dev/null +++ b/chain/actors/builtin/multisig/v13.go @@ -0,0 +1,138 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + msig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state13{store: store} + out.State = msig13.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt13.StoreEmptyMap(store, builtin13.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state13 struct { + msig13.State + store adt.Store +} + +func (s *state13) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state13) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state13) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state13) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state13) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state13) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state13) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt13.AsMap(s.store, s.State.PendingTxns, builtin13.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig13.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state13) PendingTxnChanged(other State) (bool, error) { + other13, ok := other.(*state13) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other13.PendingTxns), nil +} + +func (s *state13) transactions() (adt.Map, error) { + return adt13.AsMap(s.store, s.PendingTxns, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig13.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ActorKey() string { + return manifest.MultisigKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/paych/message13.go b/chain/actors/builtin/paych/message13.go new file mode 100644 index 00000000000..1614ec60827 --- /dev/null +++ b/chain/actors/builtin/paych/message13.go @@ -0,0 +1,109 @@ +package paych + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + paych13 "github.com/filecoin-project/go-state-types/builtin/v13/paych" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message13 struct{ from address.Address } + +func (m message13) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID, ok := actors.GetActorCodeID(actorstypes.Version13, "paymentchannel") + if !ok { + return nil, xerrors.Errorf("error getting actor paymentchannel code id for actor version %d", 13) + } + + params, aerr := actors.SerializeParams(&paych13.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init13.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin13.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message13) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych13.UpdateChannelStateParams{ + + Sv: toV13SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin13.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func toV13SignedVoucher(sv paychtypes.SignedVoucher) paych13.SignedVoucher { + merges := make([]paych13.Merge, len(sv.Merges)) + for i := range sv.Merges { + merges[i] = paych13.Merge{ + Lane: sv.Merges[i].Lane, + Nonce: sv.Merges[i].Nonce, + } + } + + return paych13.SignedVoucher{ + ChannelAddr: sv.ChannelAddr, + TimeLockMin: sv.TimeLockMin, + TimeLockMax: sv.TimeLockMax, + SecretHash: sv.SecretHash, + Extra: (*paych13.ModVerifyParams)(sv.Extra), + Lane: sv.Lane, + Nonce: sv.Nonce, + Amount: sv.Amount, + MinSettleHeight: sv.MinSettleHeight, + Merges: merges, + Signature: sv.Signature, + } +} + +func (m message13) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin13.MethodsPaych.Settle, + }, nil +} + +func (m message13) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin13.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go index fc8908cb4e1..2b5c78edfee 100644 --- a/chain/actors/builtin/paych/paych.go +++ b/chain/actors/builtin/paych/paych.go @@ -53,6 +53,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -173,6 +176,9 @@ func Message(version actorstypes.Version, from address.Address) MessageBuilder { case actorstypes.Version12: return message12{from} + case actorstypes.Version13: + return message13{from} + default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -215,5 +221,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/paych/v13.go b/chain/actors/builtin/paych/v13.go new file mode 100644 index 00000000000..c5a10c571eb --- /dev/null +++ b/chain/actors/builtin/paych/v13.go @@ -0,0 +1,135 @@ +package paych + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + paych13 "github.com/filecoin-project/go-state-types/builtin/v13/paych" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store) (State, error) { + out := state13{store: store} + out.State = paych13.State{} + return &out, nil +} + +type state13 struct { + paych13.State + store adt.Store + lsAmt *adt13.Array +} + +// Channel owner, who has funded the actor +func (s *state13) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state13) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state13) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state13) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state13) getOrLoadLsAmt() (*adt13.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt13.AsArray(s.store, s.State.LaneStates, paych13.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state13) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state13) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych13.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState13{ls}) + }) +} + +type laneState13 struct { + paych13.LaneState +} + +func (ls *laneState13) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState13) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} + +func (s *state13) ActorKey() string { + return manifest.PaychKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index 9b64ded3877..e263e3f8788 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -27,8 +27,8 @@ import ( ) var ( - Address = builtin12.StoragePowerActorAddr - Methods = builtin12.MethodsPower + Address = builtin13.StoragePowerActorAddr + Methods = builtin13.MethodsPower ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -54,6 +54,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -124,6 +127,9 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) { case actorstypes.Version12: return make12(store) + case actorstypes.Version13: + return make13(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -190,5 +196,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/power/v13.go b/chain/actors/builtin/power/v13.go new file mode 100644 index 00000000000..4cf761b16d5 --- /dev/null +++ b/chain/actors/builtin/power/v13.go @@ -0,0 +1,207 @@ +package power + +import ( + "bytes" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + power13 "github.com/filecoin-project/go-state-types/builtin/v13/power" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store) (State, error) { + out := state13{store: store} + + s, err := power13.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + power13.State + store adt.Store +} + +func (s *state13) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state13) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state13) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state13) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power13.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state13) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state13) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state13) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state13) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state13) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power13.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state13) ClaimsChanged(other State) (bool, error) { + other13, ok := other.(*state13) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other13.State.Claims), nil +} + +func (s *state13) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state13) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state13) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state13) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) claims() (adt.Map, error) { + return adt13.AsMap(s.store, s.Claims, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power13.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV13Claim(ci), nil +} + +func fromV13Claim(v13 power13.Claim) Claim { + return Claim{ + RawBytePower: v13.RawBytePower, + QualityAdjPower: v13.QualityAdjPower, + } +} + +func (s *state13) ActorKey() string { + return manifest.PowerKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/registry.go b/chain/actors/builtin/registry.go index 6ba5fef03b2..93768580b08 100644 --- a/chain/actors/builtin/registry.go +++ b/chain/actors/builtin/registry.go @@ -58,6 +58,22 @@ import ( reward12 "github.com/filecoin-project/go-state-types/builtin/v12/reward" system12 "github.com/filecoin-project/go-state-types/builtin/v12/system" verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg" + account13 "github.com/filecoin-project/go-state-types/builtin/v13/account" + cron13 "github.com/filecoin-project/go-state-types/builtin/v13/cron" + datacap13 "github.com/filecoin-project/go-state-types/builtin/v13/datacap" + eam13 "github.com/filecoin-project/go-state-types/builtin/v13/eam" + ethaccount13 "github.com/filecoin-project/go-state-types/builtin/v13/ethaccount" + evm13 "github.com/filecoin-project/go-state-types/builtin/v13/evm" + _init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + market13 "github.com/filecoin-project/go-state-types/builtin/v13/market" + miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + multisig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig" + paych13 "github.com/filecoin-project/go-state-types/builtin/v13/paych" + placeholder13 "github.com/filecoin-project/go-state-types/builtin/v13/placeholder" + power13 "github.com/filecoin-project/go-state-types/builtin/v13/power" + reward13 "github.com/filecoin-project/go-state-types/builtin/v13/reward" + system13 "github.com/filecoin-project/go-state-types/builtin/v13/system" + verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" account8 "github.com/filecoin-project/go-state-types/builtin/v8/account" cron8 "github.com/filecoin-project/go-state-types/builtin/v8/cron" _init8 "github.com/filecoin-project/go-state-types/builtin/v8/init" @@ -617,6 +633,110 @@ func MakeRegistry(av actorstypes.Version) []RegistryEntry { } } + case actorstypes.Version13: + for key, codeID := range codeIDs { + switch key { + case manifest.AccountKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: account13.Methods, + state: new(account13.State), + }) + case manifest.CronKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: cron13.Methods, + state: new(cron13.State), + }) + case manifest.InitKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: _init13.Methods, + state: new(_init13.State), + }) + case manifest.MarketKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: market13.Methods, + state: new(market13.State), + }) + case manifest.MinerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: miner13.Methods, + state: new(miner13.State), + }) + case manifest.MultisigKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: multisig13.Methods, + state: new(multisig13.State), + }) + case manifest.PaychKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: paych13.Methods, + state: new(paych13.State), + }) + case manifest.PowerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: power13.Methods, + state: new(power13.State), + }) + case manifest.RewardKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: reward13.Methods, + state: new(reward13.State), + }) + case manifest.SystemKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: system13.Methods, + state: new(system13.State), + }) + case manifest.VerifregKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: verifreg13.Methods, + state: new(verifreg13.State), + }) + case manifest.DatacapKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: datacap13.Methods, + state: new(datacap13.State), + }) + + case manifest.EvmKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: evm13.Methods, + state: new(evm13.State), + }) + case manifest.EamKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: eam13.Methods, + state: nil, + }) + case manifest.PlaceholderKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: placeholder13.Methods, + state: nil, + }) + case manifest.EthAccountKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: ethaccount13.Methods, + state: nil, + }) + + } + } + default: panic("expected version v8 and up only, use specs-actors for v0-7") } diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index 3c646364585..a6c8bff5b55 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -25,8 +25,8 @@ import ( ) var ( - Address = builtin12.RewardActorAddr - Methods = builtin12.MethodsReward + Address = builtin13.RewardActorAddr + Methods = builtin13.MethodsReward ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -52,6 +52,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -122,6 +125,9 @@ func MakeState(store adt.Store, av actorstypes.Version, currRealizedPower abi.St case actorstypes.Version12: return make12(store, currRealizedPower) + case actorstypes.Version13: + return make13(store, currRealizedPower) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -166,5 +172,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/reward/v13.go b/chain/actors/builtin/reward/v13.go new file mode 100644 index 00000000000..e8d343cab29 --- /dev/null +++ b/chain/actors/builtin/reward/v13.go @@ -0,0 +1,120 @@ +package reward + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + reward13 "github.com/filecoin-project/go-state-types/builtin/v13/reward" + smoothing13 "github.com/filecoin-project/go-state-types/builtin/v13/util/smoothing" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state13{store: store} + out.State = *reward13.ConstructState(currRealizedPower) + return &out, nil +} + +type state13 struct { + reward13.State + store adt.Store +} + +func (s *state13) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state13) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state13) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state13) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state13) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state13) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state13) CumsumBaseline() (reward13.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state13) CumsumRealized() (reward13.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state13) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner13.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing13.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state13) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner13.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing13.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ActorKey() string { + return manifest.RewardKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go index 2a2b703bb1c..1526a1fc0a5 100644 --- a/chain/actors/builtin/system/system.go +++ b/chain/actors/builtin/system/system.go @@ -5,7 +5,7 @@ import ( "golang.org/x/xerrors" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -21,7 +21,7 @@ import ( ) var ( - Address = builtin12.SystemActorAddr + Address = builtin13.SystemActorAddr ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -47,6 +47,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -117,6 +120,9 @@ func MakeState(store adt.Store, av actorstypes.Version, builtinActors cid.Cid) ( case actorstypes.Version12: return make12(store, builtinActors) + case actorstypes.Version13: + return make13(store, builtinActors) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -145,5 +151,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/system/v13.go b/chain/actors/builtin/system/v13.go new file mode 100644 index 00000000000..8facf0033cb --- /dev/null +++ b/chain/actors/builtin/system/v13.go @@ -0,0 +1,72 @@ +package system + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + system13 "github.com/filecoin-project/go-state-types/builtin/v13/system" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, builtinActors cid.Cid) (State, error) { + out := state13{store: store} + out.State = system13.State{ + BuiltinActors: builtinActors, + } + return &out, nil +} + +type state13 struct { + system13.State + store adt.Store +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) GetBuiltinActors() cid.Cid { + + return s.State.BuiltinActors + +} + +func (s *state13) SetBuiltinActors(c cid.Cid) error { + + s.State.BuiltinActors = c + return nil + +} + +func (s *state13) ActorKey() string { + return manifest.SystemKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/verifreg/actor.go.template b/chain/actors/builtin/verifreg/actor.go.template index 991c6717bd7..f4467d979bf 100644 --- a/chain/actors/builtin/verifreg/actor.go.template +++ b/chain/actors/builtin/verifreg/actor.go.template @@ -81,8 +81,10 @@ type State interface { ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error GetAllocation(clientIdAddr address.Address, allocationId AllocationId) (*Allocation, bool, error) GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) + GetAllAllocations() (map[AllocationId]Allocation, error) GetClaim(providerIdAddr address.Address, claimId ClaimId) (*Claim, bool, error) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) + GetAllClaims() (map[ClaimId]Claim, error) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) GetState() interface{} } diff --git a/chain/actors/builtin/verifreg/state.go.template b/chain/actors/builtin/verifreg/state.go.template index adcbc22c2e4..7835b16b640 100644 --- a/chain/actors/builtin/verifreg/state.go.template +++ b/chain/actors/builtin/verifreg/state.go.template @@ -145,6 +145,21 @@ func (s *state{{.v}}) GetAllocations(clientIdAddr address.Address) (map[Allocati {{end}} } +func (s *state{{.v}}) GetAllAllocations() (map[AllocationId]Allocation, error) { +{{if (le .v 8)}} + return nil, xerrors.Errorf("unsupported in actors v{{.v}}") +{{else}} + v{{.v}}Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v{{.v}}Map)) + for k, v := range v{{.v}}Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err +{{end}} +} + func (s *state{{.v}}) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { {{if (le .v 8)}} return nil, false, xerrors.Errorf("unsupported in actors v{{.v}}") @@ -170,6 +185,22 @@ func (s *state{{.v}}) GetClaims(providerIdAddr address.Address) (map[ClaimId]Cla {{end}} } +func (s *state{{.v}}) GetAllClaims() (map[ClaimId]Claim, error) { +{{if (le .v 8)}} + return nil, xerrors.Errorf("unsupported in actors v{{.v}}") +{{else}} + v{{.v}}Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v{{.v}}Map)) + for k, v := range v{{.v}}Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +{{end}} +} + func (s *state{{.v}}) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { {{if (le .v 8)}} return nil, xerrors.Errorf("unsupported in actors v{{.v}}") diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go index 9913c42c0c6..4129e7a2dae 100644 --- a/chain/actors/builtin/verifreg/v0.go +++ b/chain/actors/builtin/verifreg/v0.go @@ -106,6 +106,12 @@ func (s *state0) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state0) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v0") + +} + func (s *state0) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v0") @@ -118,6 +124,12 @@ func (s *state0) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state0) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v0") + +} + func (s *state0) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v0") diff --git a/chain/actors/builtin/verifreg/v10.go b/chain/actors/builtin/verifreg/v10.go index 256f4d2f888..85f85c7f2c9 100644 --- a/chain/actors/builtin/verifreg/v10.go +++ b/chain/actors/builtin/verifreg/v10.go @@ -114,6 +114,19 @@ func (s *state10) GetAllocations(clientIdAddr address.Address) (map[AllocationId } +func (s *state10) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v10Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v10Map)) + for k, v := range v10Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + func (s *state10) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg10.ClaimId(claimId)) @@ -134,6 +147,19 @@ func (s *state10) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, } +func (s *state10) GetAllClaims() (map[ClaimId]Claim, error) { + + v10Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v10Map)) + for k, v := range v10Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + func (s *state10) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { v10Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) diff --git a/chain/actors/builtin/verifreg/v11.go b/chain/actors/builtin/verifreg/v11.go index 7b7b9e4c0a8..5a8e1cb0cc8 100644 --- a/chain/actors/builtin/verifreg/v11.go +++ b/chain/actors/builtin/verifreg/v11.go @@ -114,6 +114,19 @@ func (s *state11) GetAllocations(clientIdAddr address.Address) (map[AllocationId } +func (s *state11) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v11Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v11Map)) + for k, v := range v11Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + func (s *state11) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg11.ClaimId(claimId)) @@ -134,6 +147,19 @@ func (s *state11) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, } +func (s *state11) GetAllClaims() (map[ClaimId]Claim, error) { + + v11Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v11Map)) + for k, v := range v11Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + func (s *state11) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { v11Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) diff --git a/chain/actors/builtin/verifreg/v12.go b/chain/actors/builtin/verifreg/v12.go index 77a113fbe86..7c9a493f169 100644 --- a/chain/actors/builtin/verifreg/v12.go +++ b/chain/actors/builtin/verifreg/v12.go @@ -114,6 +114,19 @@ func (s *state12) GetAllocations(clientIdAddr address.Address) (map[AllocationId } +func (s *state12) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v12Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v12Map)) + for k, v := range v12Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + func (s *state12) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg12.ClaimId(claimId)) @@ -134,6 +147,19 @@ func (s *state12) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, } +func (s *state12) GetAllClaims() (map[ClaimId]Claim, error) { + + v12Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v12Map)) + for k, v := range v12Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + func (s *state12) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { v12Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) diff --git a/chain/actors/builtin/verifreg/v13.go b/chain/actors/builtin/verifreg/v13.go new file mode 100644 index 00000000000..0c487a2f7e0 --- /dev/null +++ b/chain/actors/builtin/verifreg/v13.go @@ -0,0 +1,196 @@ +package verifreg + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state13{store: store} + + s, err := verifreg13.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + verifreg13.State + store adt.Store +} + +func (s *state13) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state13) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return false, big.Zero(), xerrors.Errorf("unsupported in actors v13") + +} + +func (s *state13) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version13, s.verifiers, addr) +} + +func (s *state13) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version13, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state13) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version13, s.verifiers, cb) +} + +func (s *state13) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return xerrors.Errorf("unsupported in actors v13") + +} + +func (s *state13) verifiedClients() (adt.Map, error) { + + return nil, xerrors.Errorf("unsupported in actors v13") + +} + +func (s *state13) verifiers() (adt.Map, error) { + return adt13.AsMap(s.store, s.Verifiers, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) removeDataCapProposalIDs() (adt.Map, error) { + return adt13.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*Allocation, bool, error) { + + alloc, ok, err := s.FindAllocation(s.store, clientIdAddr, verifreg13.AllocationId(allocationId)) + return (*Allocation)(alloc), ok, err +} + +func (s *state13) GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) { + + v13Map, err := s.LoadAllocationsToMap(s.store, clientIdAddr) + + retMap := make(map[AllocationId]Allocation, len(v13Map)) + for k, v := range v13Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + +func (s *state13) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v13Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v13Map)) + for k, v := range v13Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + +func (s *state13) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { + + claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg13.ClaimId(claimId)) + return (*Claim)(claim), ok, err + +} + +func (s *state13) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) { + + v13Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) + + retMap := make(map[ClaimId]Claim, len(v13Map)) + for k, v := range v13Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + +func (s *state13) GetAllClaims() (map[ClaimId]Claim, error) { + + v13Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v13Map)) + for k, v := range v13Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + +func (s *state13) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + v13Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) + + retMap := make(map[abi.SectorNumber][]ClaimId) + for k, v := range v13Map { + claims, ok := retMap[v.Sector] + if !ok { + retMap[v.Sector] = []ClaimId{ClaimId(k)} + } else { + retMap[v.Sector] = append(claims, ClaimId(k)) + } + } + + return retMap, err + +} + +func (s *state13) ActorKey() string { + return manifest.VerifregKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/verifreg/v2.go b/chain/actors/builtin/verifreg/v2.go index 31f7f775df2..7f71639e668 100644 --- a/chain/actors/builtin/verifreg/v2.go +++ b/chain/actors/builtin/verifreg/v2.go @@ -106,6 +106,12 @@ func (s *state2) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state2) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v2") + +} + func (s *state2) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v2") @@ -118,6 +124,12 @@ func (s *state2) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state2) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v2") + +} + func (s *state2) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v2") diff --git a/chain/actors/builtin/verifreg/v3.go b/chain/actors/builtin/verifreg/v3.go index 3ea016fd5c3..3e8ea9a1f7a 100644 --- a/chain/actors/builtin/verifreg/v3.go +++ b/chain/actors/builtin/verifreg/v3.go @@ -107,6 +107,12 @@ func (s *state3) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state3) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v3") + +} + func (s *state3) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v3") @@ -119,6 +125,12 @@ func (s *state3) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state3) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v3") + +} + func (s *state3) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v3") diff --git a/chain/actors/builtin/verifreg/v4.go b/chain/actors/builtin/verifreg/v4.go index 464cc9fdc20..1dc43886422 100644 --- a/chain/actors/builtin/verifreg/v4.go +++ b/chain/actors/builtin/verifreg/v4.go @@ -107,6 +107,12 @@ func (s *state4) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state4) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v4") + +} + func (s *state4) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v4") @@ -119,6 +125,12 @@ func (s *state4) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state4) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v4") + +} + func (s *state4) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v4") diff --git a/chain/actors/builtin/verifreg/v5.go b/chain/actors/builtin/verifreg/v5.go index 17901dd23a9..a7505330c26 100644 --- a/chain/actors/builtin/verifreg/v5.go +++ b/chain/actors/builtin/verifreg/v5.go @@ -107,6 +107,12 @@ func (s *state5) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state5) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v5") + +} + func (s *state5) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v5") @@ -119,6 +125,12 @@ func (s *state5) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state5) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v5") + +} + func (s *state5) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v5") diff --git a/chain/actors/builtin/verifreg/v6.go b/chain/actors/builtin/verifreg/v6.go index 68fac64cb4d..93424152e4b 100644 --- a/chain/actors/builtin/verifreg/v6.go +++ b/chain/actors/builtin/verifreg/v6.go @@ -107,6 +107,12 @@ func (s *state6) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state6) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v6") + +} + func (s *state6) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v6") @@ -119,6 +125,12 @@ func (s *state6) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state6) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v6") + +} + func (s *state6) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v6") diff --git a/chain/actors/builtin/verifreg/v7.go b/chain/actors/builtin/verifreg/v7.go index e8f3ac73984..bd67aee5ff6 100644 --- a/chain/actors/builtin/verifreg/v7.go +++ b/chain/actors/builtin/verifreg/v7.go @@ -106,6 +106,12 @@ func (s *state7) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state7) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v7") + +} + func (s *state7) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v7") @@ -118,6 +124,12 @@ func (s *state7) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state7) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v7") + +} + func (s *state7) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v7") diff --git a/chain/actors/builtin/verifreg/v8.go b/chain/actors/builtin/verifreg/v8.go index 89393c4d9cf..1515c1c5bd3 100644 --- a/chain/actors/builtin/verifreg/v8.go +++ b/chain/actors/builtin/verifreg/v8.go @@ -106,6 +106,12 @@ func (s *state8) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state8) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v8") + +} + func (s *state8) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v8") @@ -118,6 +124,12 @@ func (s *state8) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state8) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v8") + +} + func (s *state8) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v8") diff --git a/chain/actors/builtin/verifreg/v9.go b/chain/actors/builtin/verifreg/v9.go index ce63c7f94b4..41422615bcc 100644 --- a/chain/actors/builtin/verifreg/v9.go +++ b/chain/actors/builtin/verifreg/v9.go @@ -113,6 +113,19 @@ func (s *state9) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state9) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v9Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v9Map)) + for k, v := range v9Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + func (s *state9) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg9.ClaimId(claimId)) @@ -133,6 +146,19 @@ func (s *state9) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state9) GetAllClaims() (map[ClaimId]Claim, error) { + + v9Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v9Map)) + for k, v := range v9Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + func (s *state9) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { v9Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index de906f52127..2d66d90282d 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" @@ -25,8 +25,8 @@ import ( ) var ( - Address = builtin12.VerifiedRegistryActorAddr - Methods = builtin12.MethodsVerifiedRegistry + Address = builtin13.VerifiedRegistryActorAddr + Methods = builtin13.MethodsVerifiedRegistry ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -52,6 +52,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -122,6 +125,9 @@ func MakeState(store adt.Store, av actorstypes.Version, rootKeyAddress address.A case actorstypes.Version12: return make12(store, rootKeyAddress) + case actorstypes.Version13: + return make13(store, rootKeyAddress) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -141,8 +147,10 @@ type State interface { ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error GetAllocation(clientIdAddr address.Address, allocationId AllocationId) (*Allocation, bool, error) GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) + GetAllAllocations() (map[AllocationId]Allocation, error) GetClaim(providerIdAddr address.Address, claimId ClaimId) (*Claim, bool, error) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) + GetAllClaims() (map[ClaimId]Claim, error) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) GetState() interface{} } @@ -161,6 +169,7 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index 6d2b411546a..b8d23903c44 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -9,6 +9,7 @@ import ( builtin10 "github.com/filecoin-project/go-state-types/builtin" builtin11 "github.com/filecoin-project/go-state-types/builtin" builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" builtin8 "github.com/filecoin-project/go-state-types/builtin" builtin9 "github.com/filecoin-project/go-state-types/builtin" market10 "github.com/filecoin-project/go-state-types/builtin/v10/market" @@ -19,8 +20,11 @@ import ( verifreg11 "github.com/filecoin-project/go-state-types/builtin/v11/verifreg" market12 "github.com/filecoin-project/go-state-types/builtin/v12/market" miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner" - paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych" verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg" + market13 "github.com/filecoin-project/go-state-types/builtin/v13/market" + miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + paych13 "github.com/filecoin-project/go-state-types/builtin/v13/paych" + verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner" verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg" @@ -59,14 +63,14 @@ import ( ) const ( - ChainFinality = miner12.ChainFinality + ChainFinality = miner13.ChainFinality SealRandomnessLookback = ChainFinality - PaychSettleDelay = paych12.SettleDelay - MaxPreCommitRandomnessLookback = builtin12.EpochsInDay + SealRandomnessLookback + PaychSettleDelay = paych13.SettleDelay + MaxPreCommitRandomnessLookback = builtin13.EpochsInDay + SealRandomnessLookback ) var ( - MarketDefaultAllocationTermBuffer = market12.MarketDefaultAllocationTermBuffer + MarketDefaultAllocationTermBuffer = market13.MarketDefaultAllocationTermBuffer ) // SetSupportedProofTypes sets supported proof types, across all actor versions. @@ -181,11 +185,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { miner12.PreCommitChallengeDelay = delay + miner13.PreCommitChallengeDelay = delay + } // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. func GetPreCommitChallengeDelay() abi.ChainEpoch { - return miner12.PreCommitChallengeDelay + return miner13.PreCommitChallengeDelay } // SetConsensusMinerMinPower sets the minimum power of an individual miner must @@ -239,6 +245,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) { policy.ConsensusMinerMinPower = p } + for _, policy := range builtin13.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + } // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should @@ -269,6 +279,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) { verifreg12.MinVerifiedDealSize = size + verifreg13.MinVerifiedDealSize = size + } func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) { @@ -322,6 +334,10 @@ func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProo return miner12.MaxProveCommitDuration[t], nil + case actorstypes.Version13: + + return miner13.MaxProveCommitDuration[t], nil + default: return 0, xerrors.Errorf("unsupported actors version") } @@ -387,6 +403,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) { Denominator: denom, } + market13.ProviderCollateralSupplyTarget = builtin13.BigFrac{ + Numerator: num, + Denominator: denom, + } + } func DealProviderCollateralBounds( @@ -460,13 +481,18 @@ func DealProviderCollateralBounds( min, max := market12.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) return min, max, nil + case actorstypes.Version13: + + min, max := market13.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + default: return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version") } } func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { - return market12.DealDurationBounds(pieceSize) + return market13.DealDurationBounds(pieceSize) } // Sets the challenge window and scales the proving period to match (such that @@ -549,6 +575,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) { // scale it if we're scaling the challenge period. miner12.WPoStDisputeWindow = period * 30 + miner13.WPoStChallengeWindow = period + miner13.WPoStProvingPeriod = period * abi.ChainEpoch(miner13.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner13.WPoStDisputeWindow = period * 30 + } func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { @@ -603,6 +636,9 @@ func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) case actorstypes.Version12: return miner12.MaxSectorExpirationExtension, nil + case actorstypes.Version13: + return miner13.MaxSectorExpirationExtension, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -610,11 +646,11 @@ func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) } func GetMinSectorExpiration() abi.ChainEpoch { - return miner12.MinSectorExpiration + return miner13.MinSectorExpiration } func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) { - sectorsPerPart, err := builtin12.PoStProofWindowPoStPartitionSectors(p) + sectorsPerPart, err := builtin13.PoStProofWindowPoStPartitionSectors(p) if err != nil { return 0, err } @@ -623,7 +659,7 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e return 0, err } - return min(miner12.PoStedPartitionsMax, int(uint64(maxSectors)/sectorsPerPart)), nil + return min(miner13.PoStedPartitionsMax, int(uint64(maxSectors)/sectorsPerPart)), nil } func GetDefaultAggregationProof() abi.RegisteredAggregationProof { @@ -635,7 +671,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime } - return builtin12.SealProofPoliciesV11[proof].SectorMaxLifetime + return builtin13.SealProofPoliciesV11[proof].SectorMaxLifetime } func GetAddressedSectorsMax(nwVer network.Version) (int, error) { @@ -681,6 +717,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) { case actorstypes.Version12: return miner12.AddressedSectorsMax, nil + case actorstypes.Version13: + return miner13.AddressedSectorsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -742,6 +781,10 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) { return miner12.DeclarationsMax, nil + case actorstypes.Version13: + + return miner13.DeclarationsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -802,6 +845,10 @@ func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, ba return miner12.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + case actorstypes.Version13: + + return miner13.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } @@ -862,6 +909,10 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base return miner12.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + case actorstypes.Version13: + + return miner13.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } diff --git a/chain/actors/version.go b/chain/actors/version.go index 92c0da00687..8d84bbc1d0c 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -14,9 +14,9 @@ const ({{range .actorVersions}} /* inline-gen start */ -var LatestVersion = 12 +var LatestVersion = 13 -var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12} +var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} const ( Version0 Version = 0 @@ -31,6 +31,7 @@ const ( Version10 Version = 10 Version11 Version = 11 Version12 Version = 12 + Version13 Version = 13 ) /* inline-gen end */ diff --git a/chain/beacon/beacon.go b/chain/beacon/beacon.go index ff75b85794c..45baa662438 100644 --- a/chain/beacon/beacon.go +++ b/chain/beacon/beacon.go @@ -43,31 +43,31 @@ type BeaconPoint struct { // been posted on chain. type RandomBeacon interface { Entry(context.Context, uint64) <-chan Response - VerifyEntry(types.BeaconEntry, types.BeaconEntry) error + VerifyEntry(entry types.BeaconEntry, prevEntrySig []byte) error MaxBeaconRoundForEpoch(network.Version, abi.ChainEpoch) uint64 + IsChained() bool } func ValidateBlockValues(bSchedule Schedule, nv network.Version, h *types.BlockHeader, parentEpoch abi.ChainEpoch, prevEntry types.BeaconEntry) error { - { - parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) - currBeacon := bSchedule.BeaconForEpoch(h.Height) - if parentBeacon != currBeacon { - if len(h.BeaconEntries) != 2 { - return xerrors.Errorf("expected two beacon entries at beacon fork, got %d", len(h.BeaconEntries)) - } - err := currBeacon.VerifyEntry(h.BeaconEntries[1], h.BeaconEntries[0]) - if err != nil { - return xerrors.Errorf("beacon at fork point invalid: (%v, %v): %w", - h.BeaconEntries[1], h.BeaconEntries[0], err) - } - return nil + + parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) + currBeacon := bSchedule.BeaconForEpoch(h.Height) + // When we have "chained" beacons, two entries at a fork are required. + if parentBeacon != currBeacon && currBeacon.IsChained() { + if len(h.BeaconEntries) != 2 { + return xerrors.Errorf("expected two beacon entries at beacon fork, got %d", len(h.BeaconEntries)) + } + err := currBeacon.VerifyEntry(h.BeaconEntries[1], h.BeaconEntries[0].Data) + if err != nil { + return xerrors.Errorf("beacon at fork point invalid: (%v, %v): %w", + h.BeaconEntries[1], h.BeaconEntries[0], err) } + return nil } - // TODO: fork logic - b := bSchedule.BeaconForEpoch(h.Height) - maxRound := b.MaxBeaconRoundForEpoch(nv, h.Height) + maxRound := currBeacon.MaxBeaconRoundForEpoch(nv, h.Height) + // We don't expect to ever actually meet this condition if maxRound == prevEntry.Round { if len(h.BeaconEntries) != 0 { return xerrors.Errorf("expected not to have any beacon entries in this block, got %d", len(h.BeaconEntries)) @@ -79,23 +79,31 @@ func ValidateBlockValues(bSchedule Schedule, nv network.Version, h *types.BlockH return xerrors.Errorf("expected to have beacon entries in this block, but didn't find any") } - // Verify that the last beacon entry's round corresponds to the round we expect + // We skip verifying the genesis entry when randomness is "chained". + if currBeacon.IsChained() && prevEntry.Round == 0 { + return nil + } + last := h.BeaconEntries[len(h.BeaconEntries)-1] if last.Round != maxRound { return xerrors.Errorf("expected final beacon entry in block to be at round %d, got %d", maxRound, last.Round) } - // Verify that all other entries' rounds are as expected for the epochs in between parentEpoch and h.Height - for i, e := range h.BeaconEntries { - correctRound := b.MaxBeaconRoundForEpoch(nv, parentEpoch+abi.ChainEpoch(i)+1) - if e.Round != correctRound { - return xerrors.Errorf("unexpected beacon round %d, expected %d for epoch %d", e.Round, correctRound, parentEpoch+abi.ChainEpoch(i)) + // If the beacon is UNchained, verify that the block only includes the rounds we want for the epochs in between parentEpoch and h.Height + // For chained beacons, you must have all the rounds forming a valid chain with prevEntry, so we can skip this step + if !currBeacon.IsChained() { + // Verify that all other entries' rounds are as expected for the epochs in between parentEpoch and h.Height + for i, e := range h.BeaconEntries { + correctRound := currBeacon.MaxBeaconRoundForEpoch(nv, parentEpoch+abi.ChainEpoch(i)+1) + if e.Round != correctRound { + return xerrors.Errorf("unexpected beacon round %d, expected %d for epoch %d", e.Round, correctRound, parentEpoch+abi.ChainEpoch(i)) + } } } // Verify the beacon entries themselves for i, e := range h.BeaconEntries { - if err := b.VerifyEntry(e, prevEntry); err != nil { + if err := currBeacon.VerifyEntry(e, prevEntry.Data); err != nil { return xerrors.Errorf("beacon entry %d (%d - %x (%d)) was invalid: %w", i, e.Round, e.Data, len(e.Data), err) } prevEntry = e @@ -105,34 +113,32 @@ func ValidateBlockValues(bSchedule Schedule, nv network.Version, h *types.BlockH } func BeaconEntriesForBlock(ctx context.Context, bSchedule Schedule, nv network.Version, epoch abi.ChainEpoch, parentEpoch abi.ChainEpoch, prev types.BeaconEntry) ([]types.BeaconEntry, error) { - { - parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) - currBeacon := bSchedule.BeaconForEpoch(epoch) - if parentBeacon != currBeacon { - // Fork logic - round := currBeacon.MaxBeaconRoundForEpoch(nv, epoch) - out := make([]types.BeaconEntry, 2) - rch := currBeacon.Entry(ctx, round-1) - res := <-rch - if res.Err != nil { - return nil, xerrors.Errorf("getting entry %d returned error: %w", round-1, res.Err) - } - out[0] = res.Entry - rch = currBeacon.Entry(ctx, round) - res = <-rch - if res.Err != nil { - return nil, xerrors.Errorf("getting entry %d returned error: %w", round, res.Err) - } - out[1] = res.Entry - return out, nil + // When we have "chained" beacons, two entries at a fork are required. + parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) + currBeacon := bSchedule.BeaconForEpoch(epoch) + if parentBeacon != currBeacon && currBeacon.IsChained() { + // Fork logic + round := currBeacon.MaxBeaconRoundForEpoch(nv, epoch) + out := make([]types.BeaconEntry, 2) + rch := currBeacon.Entry(ctx, round-1) + res := <-rch + if res.Err != nil { + return nil, xerrors.Errorf("getting entry %d returned error: %w", round-1, res.Err) + } + out[0] = res.Entry + rch = currBeacon.Entry(ctx, round) + res = <-rch + if res.Err != nil { + return nil, xerrors.Errorf("getting entry %d returned error: %w", round, res.Err) } + out[1] = res.Entry + return out, nil } - beacon := bSchedule.BeaconForEpoch(epoch) - start := build.Clock.Now() - maxRound := beacon.MaxBeaconRoundForEpoch(nv, epoch) + maxRound := currBeacon.MaxBeaconRoundForEpoch(nv, epoch) + // We don't expect this to ever be the case if maxRound == prev.Round { return nil, nil } @@ -144,8 +150,8 @@ func BeaconEntriesForBlock(ctx context.Context, bSchedule Schedule, nv network.V var out []types.BeaconEntry for currEpoch := epoch; currEpoch > parentEpoch; currEpoch-- { - currRound := beacon.MaxBeaconRoundForEpoch(nv, currEpoch) - rch := beacon.Entry(ctx, currRound) + currRound := currBeacon.MaxBeaconRoundForEpoch(nv, currEpoch) + rch := currBeacon.Entry(ctx, currRound) select { case resp := <-rch: if resp.Err != nil { diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go index 1b78daaddb1..852a1a31ce5 100644 --- a/chain/beacon/drand/drand.go +++ b/chain/beacon/drand/drand.go @@ -8,7 +8,7 @@ import ( dchain "github.com/drand/drand/chain" dclient "github.com/drand/drand/client" hclient "github.com/drand/drand/client/http" - "github.com/drand/drand/common/scheme" + dcrypto "github.com/drand/drand/crypto" dlog "github.com/drand/drand/log" gclient "github.com/drand/drand/lp2p/client" "github.com/drand/kyber" @@ -37,7 +37,8 @@ var log = logging.Logger("drand") // // The root trust for the Drand chain is configured from build.DrandChain. type DrandBeacon struct { - client dclient.Client + isChained bool + client dclient.Client pubkey kyber.Point @@ -47,10 +48,18 @@ type DrandBeacon struct { drandGenTime uint64 filGenTime uint64 filRoundTime uint64 + scheme *dcrypto.Scheme localCache *lru.Cache[uint64, *types.BeaconEntry] } +// IsChained tells us whether this particular beacon operates in "chained mode". Prior to Drand +// quicknet, beacons form a chain. After the introduction of quicknet, they do not, so we need to +// change how we interact with beacon entries. (See FIP-0063) +func (db *DrandBeacon) IsChained() bool { + return db.isChained +} + // DrandHTTPClient interface overrides the user agent used by drand type DrandHTTPClient interface { SetUserAgent(string) @@ -68,6 +77,10 @@ func (l *logger) Named(s string) dlog.Logger { return &logger{l.SugaredLogger.Named(s)} } +func (l *logger) AddCallerSkip(skip int) dlog.Logger { + return &logger{l.SugaredLogger.With(zap.AddCallerSkip(skip))} +} + func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) { if genesisTs == 0 { panic("what are you doing this cant be zero") @@ -112,10 +125,16 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes } db := &DrandBeacon{ + isChained: config.IsChained, client: client, localCache: lc, } + sch, err := dcrypto.GetSchemeByIDWithDefault(drandChain.Scheme) + if err != nil { + return nil, err + } + db.scheme = sch db.pubkey = drandChain.PublicKey db.interval = drandChain.Period db.drandGenTime = uint64(drandChain.GenesisTime) @@ -164,29 +183,26 @@ func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry { return v } -func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error { - if prev.Round == 0 { - // TODO handle genesis better - return nil - } - - if be := db.getCachedValue(curr.Round); be != nil { - if !bytes.Equal(curr.Data, be.Data) { +func (db *DrandBeacon) VerifyEntry(entry types.BeaconEntry, prevEntrySig []byte) error { + if be := db.getCachedValue(entry.Round); be != nil { + if !bytes.Equal(entry.Data, be.Data) { return xerrors.New("invalid beacon value, does not match cached good value") } // return no error if the value is in the cache already return nil } b := &dchain.Beacon{ - PreviousSig: prev.Data, - Round: curr.Round, - Signature: curr.Data, + PreviousSig: prevEntrySig, + Round: entry.Round, + Signature: entry.Data, } - err := dchain.NewVerifier(scheme.GetSchemeFromEnv()).VerifyBeacon(*b, db.pubkey) - if err == nil { - db.cacheValue(curr) + + err := db.scheme.VerifyBeacon(b, db.pubkey) + if err != nil { + return xerrors.Errorf("failed to verify beacon: %w", err) } + db.cacheValue(entry) return nil } diff --git a/chain/beacon/drand/drand_test.go b/chain/beacon/drand/drand_test.go index 355d6937f8e..c35c0da18f5 100644 --- a/chain/beacon/drand/drand_test.go +++ b/chain/beacon/drand/drand_test.go @@ -39,7 +39,15 @@ func TestMaxBeaconRoundForEpoch(t *testing.T) { todayTs := uint64(1652222222) db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandTestnet]) assert.NoError(t, err) + assert.True(t, db.IsChained()) mbr15 := db.MaxBeaconRoundForEpoch(network.Version15, 100) mbr16 := db.MaxBeaconRoundForEpoch(network.Version16, 100) assert.Equal(t, mbr15+1, mbr16) } + +func TestQuicknetIsChained(t *testing.T) { + todayTs := uint64(1652222222) + db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandQuicknet]) + assert.NoError(t, err) + assert.False(t, db.IsChained()) +} diff --git a/chain/beacon/mock.go b/chain/beacon/mock.go index 3f26da1097d..ab6a98ebfe9 100644 --- a/chain/beacon/mock.go +++ b/chain/beacon/mock.go @@ -20,6 +20,10 @@ type mockBeacon struct { interval time.Duration } +func (mb *mockBeacon) IsChained() bool { + return true +} + func NewMockBeacon(interval time.Duration) RandomBeacon { mb := &mockBeacon{interval: interval} @@ -47,7 +51,7 @@ func (mb *mockBeacon) Entry(ctx context.Context, index uint64) <-chan Response { return out } -func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, to types.BeaconEntry) error { +func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, _prevEntrySig []byte) error { // TODO: cache this, especially for bls oe := mb.entryForIndex(from.Round) if !bytes.Equal(from.Data, oe.Data) { diff --git a/chain/consensus/compute_state.go b/chain/consensus/compute_state.go index 4b993b3e72d..78369ec20b4 100644 --- a/chain/consensus/compute_state.go +++ b/chain/consensus/compute_state.go @@ -53,6 +53,7 @@ func NewActorRegistry() *vm.ActorRegistry { inv.Register(actorstypes.Version10, vm.ActorsVersionPredicate(actorstypes.Version10), builtin.MakeRegistry(actorstypes.Version10)) inv.Register(actorstypes.Version11, vm.ActorsVersionPredicate(actorstypes.Version11), builtin.MakeRegistry(actorstypes.Version11)) inv.Register(actorstypes.Version12, vm.ActorsVersionPredicate(actorstypes.Version12), builtin.MakeRegistry(actorstypes.Version12)) + inv.Register(actorstypes.Version13, vm.ActorsVersionPredicate(actorstypes.Version13), builtin.MakeRegistry(actorstypes.Version13)) return inv } diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index a6883a41afe..8565f361358 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -129,6 +129,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) return xerrors.Errorf("failed to get lookback tipset for block: %w", err) } + // TODO: Optimization: See https://github.com/filecoin-project/lotus/issues/11597 prevBeacon, err := filec.store.GetLatestBeaconEntry(ctx, baseTs) if err != nil { return xerrors.Errorf("failed to get latest beacon entry: %w", err) diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go index f411c40b9d3..6478bf1d417 100644 --- a/chain/consensus/filcns/upgrades.go +++ b/chain/consensus/filcns/upgrades.go @@ -24,6 +24,7 @@ import ( nv19 "github.com/filecoin-project/go-state-types/builtin/v11/migration" system11 "github.com/filecoin-project/go-state-types/builtin/v11/system" nv21 "github.com/filecoin-project/go-state-types/builtin/v12/migration" + nv22 "github.com/filecoin-project/go-state-types/builtin/v13/migration" nv17 "github.com/filecoin-project/go-state-types/builtin/v9/migration" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/migration" @@ -280,12 +281,22 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { Height: build.UpgradeWatermelonFixHeight, Network: network.Version21, Migration: buildUpgradeActorsV12MinerFix(calibnetv12BuggyMinerCID1, calibnetv12BuggyManifestCID2), + }, { + Height: build.UpgradeWatermelonFix2Height, + Network: network.Version21, + Migration: buildUpgradeActorsV12MinerFix(calibnetv12BuggyMinerCID2, calibnetv12CorrectManifestCID1), + }, { + Height: build.UpgradeDragonHeight, + Network: network.Version22, + Migration: UpgradeActorsV13, + PreMigrations: []stmgr.PreMigration{{ + PreMigration: PreUpgradeActorsV13, + StartWithin: 120, + DontStartWithin: 15, + StopWithin: 10, + }}, + Expensive: true, }, - { - Height: build.UpgradeWatermelonFix2Height, - Network: network.Version21, - Migration: buildUpgradeActorsV12MinerFix(calibnetv12BuggyMinerCID2, calibnetv12CorrectManifestCID1), - }, } for _, u := range updates { @@ -2146,6 +2157,110 @@ func buildUpgradeActorsV12MinerFix(oldBuggyMinerCID, newManifestCID cid.Cid) fun } } +func PreUpgradeActorsV13(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := MigrationMaxWorkerCount + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + + lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch) + if err != nil { + return xerrors.Errorf("error getting lookback ts for premigration: %w", err) + } + + config := migration.Config{ + MaxWorkers: uint(workerCount), + ProgressLogPeriod: time.Minute * 5, + UpgradeEpoch: build.UpgradeDragonHeight, + } + + _, err = upgradeActorsV13Common(ctx, sm, cache, lbRoot, epoch, lbts, config) + return err +} + +func UpgradeActorsV13(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 2. + workerCount := MigrationMaxWorkerCount - 3 + if workerCount <= 0 { + workerCount = 1 + } + config := migration.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + UpgradeEpoch: build.UpgradeDragonHeight, + } + newRoot, err := upgradeActorsV13Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v11 state: %w", err) + } + return newRoot, nil +} + +func upgradeActorsV13Common( + ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config migration.Config, +) (cid.Cid, error) { + writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4) + adtStore := store.ActorStore(ctx, writeStore) + // ensure that the manifest is loaded in the blockstore + if err := bundle.LoadBundles(ctx, writeStore, actorstypes.Version13); err != nil { + return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err) + } + + // Load the state root. + var stateRoot types.StateRoot + if err := adtStore.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion5 { + return cid.Undef, xerrors.Errorf( + "expected state root version 5 for actors v13 upgrade, got %d", + stateRoot.Version, + ) + } + + manifest, ok := actors.GetManifest(actorstypes.Version13) + if !ok { + return cid.Undef, xerrors.Errorf("no manifest CID for v13 upgrade") + } + + // Perform the migration + newHamtRoot, err := nv22.MigrateStateTree(ctx, adtStore, manifest, stateRoot.Actors, epoch, config, + migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v11: %w", err) + } + + // Persist the result. + newRoot, err := adtStore.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion5, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persists the new tree and shuts down the flush worker + if err := writeStore.Flush(ctx); err != nil { + return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err) + } + + if err := writeStore.Shutdown(ctx); err != nil { + return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err) + } + + return newRoot, nil +} + //////////////////// // Example upgrade function if upgrade requires only code changes diff --git a/chain/events/cache.go b/chain/events/cache.go index 2eba1f085b7..67c59cb5052 100644 --- a/chain/events/cache.go +++ b/chain/events/cache.go @@ -26,7 +26,7 @@ type cache struct { uncachedAPI } -func newCache(api EventAPI, gcConfidence abi.ChainEpoch) *cache { +func newCache(api EventHelperAPI, gcConfidence abi.ChainEpoch) *cache { return &cache{ newTSCache(api, gcConfidence), newMessageCache(api), diff --git a/chain/events/events.go b/chain/events/events.go index c68b62a64e0..a9da32cbba2 100644 --- a/chain/events/events.go +++ b/chain/events/events.go @@ -28,7 +28,7 @@ type TipSetObserver interface { Revert(ctx context.Context, from, to *types.TipSet) error } -type EventAPI interface { +type EventHelperAPI interface { ChainNotify(context.Context) (<-chan []*api.HeadChange, error) ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) @@ -47,7 +47,7 @@ type Events struct { *hcEvents } -func newEventsWithGCConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) (*Events, error) { +func newEventsWithGCConfidence(ctx context.Context, api EventHelperAPI, gcConfidence abi.ChainEpoch) (*Events, error) { cache := newCache(api, gcConfidence) ob := newObserver(cache, gcConfidence) @@ -61,7 +61,7 @@ func newEventsWithGCConfidence(ctx context.Context, api EventAPI, gcConfidence a return &Events{ob, he, headChange}, nil } -func NewEvents(ctx context.Context, api EventAPI) (*Events, error) { +func NewEvents(ctx context.Context, api EventHelperAPI) (*Events, error) { gcConfidence := 2 * build.ForkLengthThreshold return newEventsWithGCConfidence(ctx, api, gcConfidence) } diff --git a/chain/events/events_called.go b/chain/events/events_called.go index 3ac02b2f7fd..98e594c476b 100644 --- a/chain/events/events_called.go +++ b/chain/events/events_called.go @@ -69,7 +69,7 @@ type queuedEvent struct { // Manages chain head change events, which may be forward (new tipset added to // chain) or backward (chain branch discarded in favour of heavier branch) type hcEvents struct { - cs EventAPI + cs EventHelperAPI lk sync.Mutex lastTs *types.TipSet @@ -94,7 +94,7 @@ type hcEvents struct { watcherEvents } -func newHCEvents(api EventAPI, obs *observer) *hcEvents { +func newHCEvents(api EventHelperAPI, obs *observer) *hcEvents { e := &hcEvents{ cs: api, confQueue: map[triggerH]map[msgH][]*queuedEvent{}, @@ -326,14 +326,14 @@ type headChangeAPI interface { // watcherEvents watches for a state change type watcherEvents struct { - cs EventAPI + cs EventHelperAPI hcAPI headChangeAPI lk sync.RWMutex matchers map[triggerID]StateMatchFunc } -func newWatcherEvents(hcAPI headChangeAPI, cs EventAPI) watcherEvents { +func newWatcherEvents(hcAPI headChangeAPI, cs EventHelperAPI) watcherEvents { return watcherEvents{ cs: cs, hcAPI: hcAPI, @@ -426,14 +426,14 @@ func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, // messageEvents watches for message calls to actors type messageEvents struct { - cs EventAPI + cs EventHelperAPI hcAPI headChangeAPI lk sync.RWMutex matchers map[triggerID]MsgMatchFunc } -func newMessageEvents(hcAPI headChangeAPI, cs EventAPI) messageEvents { +func newMessageEvents(hcAPI headChangeAPI, cs EventHelperAPI) messageEvents { return messageEvents{ cs: cs, hcAPI: hcAPI, diff --git a/chain/events/events_height.go b/chain/events/events_height.go index 5789be753a9..1296a7f2527 100644 --- a/chain/events/events_height.go +++ b/chain/events/events_height.go @@ -22,7 +22,7 @@ type heightHandler struct { } type heightEvents struct { - api EventAPI + api EventHelperAPI gcConfidence abi.ChainEpoch lk sync.Mutex @@ -31,7 +31,7 @@ type heightEvents struct { lastGc abi.ChainEpoch //nolint:structcheck } -func newHeightEvents(api EventAPI, obs *observer, gcConfidence abi.ChainEpoch) *heightEvents { +func newHeightEvents(api EventHelperAPI, obs *observer, gcConfidence abi.ChainEpoch) *heightEvents { he := &heightEvents{ api: api, gcConfidence: gcConfidence, diff --git a/chain/events/events_test.go b/chain/events/events_test.go index f16434355ee..a1309b90ab5 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -358,7 +358,7 @@ func (fcs *fakeCS) advance(rev, app, drop int, msgs map[int]cid.Cid, nulls ...in fcs.sub(nil, nil) } -var _ EventAPI = &fakeCS{} +var _ EventHelperAPI = &fakeCS{} func TestAt(t *testing.T) { //stm: @EVENTS_HEIGHT_CHAIN_AT_001, @EVENTS_HEIGHT_REVERT_001 diff --git a/chain/events/filter/event.go b/chain/events/filter/event.go index 24192a53e72..1669d840eec 100644 --- a/chain/events/filter/event.go +++ b/chain/events/filter/event.go @@ -27,14 +27,24 @@ func isIndexedValue(b uint8) bool { return b&(types.EventFlagIndexedKey|types.EventFlagIndexedValue) > 0 } -type EventFilter struct { - id types.FilterID - minHeight abi.ChainEpoch // minimum epoch to apply filter or -1 if no minimum - maxHeight abi.ChainEpoch // maximum epoch to apply filter or -1 if no maximum - tipsetCid cid.Cid - addresses []address.Address // list of f4 actor addresses that are extpected to emit the event - keys map[string][][]byte // map of key names to a list of alternate values that may match - maxResults int // maximum number of results to collect, 0 is unlimited +type AddressResolver func(context.Context, abi.ActorID, *types.TipSet) (address.Address, bool) + +type EventFilter interface { + Filter + + TakeCollectedEvents(context.Context) []*CollectedEvent + CollectEvents(context.Context, *TipSetEvents, bool, AddressResolver) error +} + +type eventFilter struct { + id types.FilterID + minHeight abi.ChainEpoch // minimum epoch to apply filter or -1 if no minimum + maxHeight abi.ChainEpoch // maximum epoch to apply filter or -1 if no maximum + tipsetCid cid.Cid + addresses []address.Address // list of actor addresses that are extpected to emit the event + + keysWithCodec map[string][]types.ActorEventBlock // map of key names to a list of alternate values that may match + maxResults int // maximum number of results to collect, 0 is unlimited mu sync.Mutex collected []*CollectedEvent @@ -42,11 +52,11 @@ type EventFilter struct { ch chan<- interface{} } -var _ Filter = (*EventFilter)(nil) +var _ Filter = (*eventFilter)(nil) type CollectedEvent struct { Entries []types.EventEntry - EmitterAddr address.Address // f4 address of emitter + EmitterAddr address.Address // address of emitter EventIdx int // index of the event within the list of emitted events Reverted bool Height abi.ChainEpoch @@ -55,24 +65,24 @@ type CollectedEvent struct { MsgCid cid.Cid // cid of message that produced event } -func (f *EventFilter) ID() types.FilterID { +func (f *eventFilter) ID() types.FilterID { return f.id } -func (f *EventFilter) SetSubChannel(ch chan<- interface{}) { +func (f *eventFilter) SetSubChannel(ch chan<- interface{}) { f.mu.Lock() defer f.mu.Unlock() f.ch = ch f.collected = nil } -func (f *EventFilter) ClearSubChannel() { +func (f *eventFilter) ClearSubChannel() { f.mu.Lock() defer f.mu.Unlock() f.ch = nil } -func (f *EventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool)) error { +func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver AddressResolver) error { if !f.matchTipset(te) { return nil } @@ -137,13 +147,13 @@ func (f *EventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever return nil } -func (f *EventFilter) setCollectedEvents(ces []*CollectedEvent) { +func (f *eventFilter) setCollectedEvents(ces []*CollectedEvent) { f.mu.Lock() f.collected = ces f.mu.Unlock() } -func (f *EventFilter) TakeCollectedEvents(ctx context.Context) []*CollectedEvent { +func (f *eventFilter) TakeCollectedEvents(ctx context.Context) []*CollectedEvent { f.mu.Lock() collected := f.collected f.collected = nil @@ -153,14 +163,14 @@ func (f *EventFilter) TakeCollectedEvents(ctx context.Context) []*CollectedEvent return collected } -func (f *EventFilter) LastTaken() time.Time { +func (f *eventFilter) LastTaken() time.Time { f.mu.Lock() defer f.mu.Unlock() return f.lastTaken } // matchTipset reports whether this filter matches the given tipset -func (f *EventFilter) matchTipset(te *TipSetEvents) bool { +func (f *eventFilter) matchTipset(te *TipSetEvents) bool { if f.tipsetCid != cid.Undef { tsCid, err := te.Cid() if err != nil { @@ -178,7 +188,7 @@ func (f *EventFilter) matchTipset(te *TipSetEvents) bool { return true } -func (f *EventFilter) matchAddress(o address.Address) bool { +func (f *eventFilter) matchAddress(o address.Address) bool { if len(f.addresses) == 0 { return true } @@ -193,8 +203,8 @@ func (f *EventFilter) matchAddress(o address.Address) bool { return false } -func (f *EventFilter) matchKeys(ees []types.EventEntry) bool { - if len(f.keys) == 0 { +func (f *eventFilter) matchKeys(ees []types.EventEntry) bool { + if len(f.keysWithCodec) == 0 { return true } // TODO: optimize this naive algorithm @@ -216,19 +226,19 @@ func (f *EventFilter) matchKeys(ees []types.EventEntry) bool { continue } - wantlist, ok := f.keys[keyname] + wantlist, ok := f.keysWithCodec[keyname] if !ok || len(wantlist) == 0 { continue } for _, w := range wantlist { - if bytes.Equal(w, ee.Value) { + if bytes.Equal(w.Value, ee.Value) && w.Codec == ee.Codec { matched[keyname] = true break } } - if len(matched) == len(f.keys) { + if len(matched) == len(f.keysWithCodec) { // all keys have been matched return true } @@ -296,7 +306,7 @@ type EventFilterManager struct { EventIndex *EventIndex mu sync.Mutex // guards mutations to filters - filters map[types.FilterID]*EventFilter + filters map[types.FilterID]EventFilter currentHeight abi.ChainEpoch } @@ -362,7 +372,8 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet) return nil } -func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight abi.ChainEpoch, tipsetCid cid.Cid, addresses []address.Address, keys map[string][][]byte) (*EventFilter, error) { +func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight abi.ChainEpoch, tipsetCid cid.Cid, addresses []address.Address, + keysWithCodec map[string][]types.ActorEventBlock, excludeReverted bool) (EventFilter, error) { m.mu.Lock() currentHeight := m.currentHeight m.mu.Unlock() @@ -376,26 +387,26 @@ func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight a return nil, xerrors.Errorf("new filter id: %w", err) } - f := &EventFilter{ - id: id, - minHeight: minHeight, - maxHeight: maxHeight, - tipsetCid: tipsetCid, - addresses: addresses, - keys: keys, - maxResults: m.MaxFilterResults, + f := &eventFilter{ + id: id, + minHeight: minHeight, + maxHeight: maxHeight, + tipsetCid: tipsetCid, + addresses: addresses, + keysWithCodec: keysWithCodec, + maxResults: m.MaxFilterResults, } if m.EventIndex != nil && minHeight != -1 && minHeight < currentHeight { // Filter needs historic events - if err := m.EventIndex.PrefillFilter(ctx, f, true); err != nil { + if err := m.EventIndex.prefillFilter(ctx, f, excludeReverted); err != nil { return nil, err } } m.mu.Lock() if m.filters == nil { - m.filters = make(map[types.FilterID]*EventFilter) + m.filters = make(map[types.FilterID]EventFilter) } m.filters[id] = f m.mu.Unlock() diff --git a/chain/events/filter/event_test.go b/chain/events/filter/event_test.go index 329573bc13d..c650b71eb6f 100644 --- a/chain/events/filter/event_test.go +++ b/chain/events/filter/event_test.go @@ -22,6 +22,19 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) +func keysToKeysWithCodec(keys map[string][][]byte) map[string][]types.ActorEventBlock { + keysWithCodec := make(map[string][]types.ActorEventBlock) + for k, v := range keys { + for _, vv := range v { + keysWithCodec[k] = append(keysWithCodec[k], types.ActorEventBlock{ + Codec: cid.Raw, + Value: vv, + }) + } + } + return keysWithCodec +} + func TestEventFilterCollectEvents(t *testing.T) { rng := pseudo.New(pseudo.NewSource(299792458)) a1 := randomF4Addr(t, rng) @@ -73,13 +86,13 @@ func TestEventFilterCollectEvents(t *testing.T) { testCases := []struct { name string - filter *EventFilter + filter *eventFilter te *TipSetEvents want []*CollectedEvent }{ { name: "nomatch tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14001, maxHeight: -1, }, @@ -88,7 +101,7 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "nomatch tipset max height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: 13999, }, @@ -97,7 +110,7 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "match tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14000, maxHeight: -1, }, @@ -106,7 +119,7 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "match tipset cid", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: cid14000, @@ -116,7 +129,7 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "nomatch address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a2}, @@ -126,7 +139,7 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "match address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a1}, @@ -136,124 +149,124 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "match one entry", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "match one entry with alternate values", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry by missing value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry by missing key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "method": { []byte("approval"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "match one entry with multiple keys", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr1"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry with one mismatching key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "approver": { []byte("addr1"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one mismatching value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr2"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one unindexed key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "amount": { []byte("2988181"), }, - }, + }), }, te: events14000, want: noCollectedEvents, diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go index 0f4a54c5b95..49be57c7964 100644 --- a/chain/events/filter/index.go +++ b/chain/events/filter/index.go @@ -501,7 +501,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever } // PrefillFilter fills a filter's collection of events from the historic index -func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, excludeReverted bool) error { +func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, excludeReverted bool) error { clauses := []string{} values := []any{} joins := []string{} @@ -534,9 +534,9 @@ func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, exclude clauses = append(clauses, "("+strings.Join(subclauses, " OR ")+")") } - if len(f.keys) > 0 { + if len(f.keysWithCodec) > 0 { join := 0 - for key, vals := range f.keys { + for key, vals := range f.keysWithCodec { if len(vals) > 0 { join++ joinAlias := fmt.Sprintf("ee%d", join) @@ -545,8 +545,8 @@ func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, exclude values = append(values, key) subclauses := []string{} for _, val := range vals { - subclauses = append(subclauses, fmt.Sprintf("%s.value=?", joinAlias)) - values = append(values, val) + subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias)) + values = append(values, val.Value, val.Codec) } clauses = append(clauses, "("+strings.Join(subclauses, " OR ")+")") } diff --git a/chain/events/filter/index_test.go b/chain/events/filter/index_test.go index f9b1b14adae..ce3f7b78a03 100644 --- a/chain/events/filter/index_test.go +++ b/chain/events/filter/index_test.go @@ -82,13 +82,13 @@ func TestEventIndexPrefillFilter(t *testing.T) { testCases := []struct { name string - filter *EventFilter + filter *eventFilter te *TipSetEvents want []*CollectedEvent }{ { name: "nomatch tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14001, maxHeight: -1, }, @@ -97,7 +97,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "nomatch tipset max height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: 13999, }, @@ -106,7 +106,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "match tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14000, maxHeight: -1, }, @@ -115,7 +115,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "match tipset cid", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: cid14000, @@ -125,7 +125,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "nomatch address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a2}, @@ -135,7 +135,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "match address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a1}, @@ -145,124 +145,124 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "match one entry", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "match one entry with alternate values", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry by missing value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry by missing key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "method": { []byte("approval"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "match one entry with multiple keys", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr1"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry with one mismatching key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "approver": { []byte("addr1"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one mismatching value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr2"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one unindexed key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "amount": { []byte("2988181"), }, - }, + }), }, te: events14000, want: noCollectedEvents, @@ -272,7 +272,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { for _, tc := range testCases { tc := tc // appease lint t.Run(tc.name, func(t *testing.T) { - if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil { + if err := ei.prefillFilter(context.Background(), tc.filter, false); err != nil { require.NoError(t, err, "prefill filter events") } @@ -409,13 +409,13 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { inclusiveTestCases := []struct { name string - filter *EventFilter + filter *eventFilter te *TipSetEvents want []*CollectedEvent }{ { name: "nomatch tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14001, maxHeight: -1, }, @@ -424,7 +424,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "nomatch tipset max height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: 13999, }, @@ -433,7 +433,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14000, maxHeight: -1, }, @@ -442,7 +442,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset cid", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: cid14000, @@ -452,7 +452,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset cid", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: reveredCID14000, @@ -462,7 +462,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "nomatch address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a3}, @@ -472,7 +472,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match address 2", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a2}, @@ -482,7 +482,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match address 1", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a1}, @@ -492,155 +492,155 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match one entry", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, - }, + }), }, te: events14000, want: twoCollectedEvent, }, { name: "match one entry with alternate values", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), []byte("approval"), }, - }, + }), }, te: events14000, want: twoCollectedEvent, }, { name: "nomatch one entry by missing value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry by missing key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "method": { []byte("approval"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "match one entry with multiple keys", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr1"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "match one entry with multiple keys", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr2"), }, - }, + }), }, te: revertedEvents14000, want: oneCollectedRevertedEvent, }, { name: "nomatch one entry with one mismatching key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "approver": { []byte("addr1"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one mismatching value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr3"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one unindexed key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "amount": { []byte("2988181"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one unindexed key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "amount": { []byte("2988182"), }, - }, + }), }, te: events14000, want: noCollectedEvents, @@ -649,13 +649,13 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { exclusiveTestCases := []struct { name string - filter *EventFilter + filter *eventFilter te *TipSetEvents want []*CollectedEvent }{ { name: "nomatch tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14001, maxHeight: -1, }, @@ -664,7 +664,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "nomatch tipset max height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: 13999, }, @@ -673,7 +673,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14000, maxHeight: -1, }, @@ -682,7 +682,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset cid", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: cid14000, @@ -692,7 +692,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset cid but reverted", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: reveredCID14000, @@ -702,7 +702,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "nomatch address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a3}, @@ -712,7 +712,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "nomatch address 2 but reverted", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a2}, @@ -722,7 +722,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a1}, @@ -732,141 +732,141 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match one entry", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "match one entry with alternate values", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry by missing value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry by missing key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "method": { []byte("approval"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "match one entry with multiple keys", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr1"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry with one mismatching key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "approver": { []byte("addr1"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with matching reverted value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr2"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one mismatching value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr3"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one unindexed key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "amount": { []byte("2988181"), }, - }, + }), }, te: events14000, want: noCollectedEvents, @@ -876,7 +876,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { for _, tc := range inclusiveTestCases { tc := tc // appease lint t.Run(tc.name, func(t *testing.T) { - if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil { + if err := ei.prefillFilter(context.Background(), tc.filter, false); err != nil { require.NoError(t, err, "prefill filter events") } @@ -888,7 +888,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { for _, tc := range exclusiveTestCases { tc := tc // appease lint t.Run(tc.name, func(t *testing.T) { - if err := ei.PrefillFilter(context.Background(), tc.filter, true); err != nil { + if err := ei.prefillFilter(context.Background(), tc.filter, true); err != nil { require.NoError(t, err, "prefill filter events") } diff --git a/chain/events/message_cache.go b/chain/events/message_cache.go index 96f6bcbd761..24b3c934aaa 100644 --- a/chain/events/message_cache.go +++ b/chain/events/message_cache.go @@ -11,13 +11,13 @@ import ( ) type messageCache struct { - api EventAPI + api EventHelperAPI blockMsgLk sync.Mutex blockMsgCache *arc.ARCCache[cid.Cid, *api.BlockMessages] } -func newMessageCache(a EventAPI) *messageCache { +func newMessageCache(a EventHelperAPI) *messageCache { blsMsgCache, _ := arc.NewARC[cid.Cid, *api.BlockMessages](500) return &messageCache{ diff --git a/chain/events/observer.go b/chain/events/observer.go index 4462185858f..0b021f9965b 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -17,7 +17,7 @@ import ( ) type observer struct { - api EventAPI + api EventHelperAPI gcConfidence abi.ChainEpoch diff --git a/chain/events/state/predicates.go b/chain/events/state/predicates.go index ff05156a6f9..e4e8b8f7e41 100644 --- a/chain/events/state/predicates.go +++ b/chain/events/state/predicates.go @@ -242,7 +242,7 @@ func (sp *StatePredicates) DealStateChangedForIDs(dealIds []abi.DealID) DiffDeal } existenceChanged := oldFound != newFound - valueChanged := (oldFound && newFound) && *oldDeal != *newDeal + valueChanged := (oldFound && newFound) && !oldDeal.Equals(newDeal) if existenceChanged || valueChanged { changedDeals[dealID] = market.DealStateChange{ID: dealID, From: oldDeal, To: newDeal} } diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go index 52fc2668a2b..79c1d2e0eae 100644 --- a/chain/events/state/predicates_test.go +++ b/chain/events/state/predicates_test.go @@ -177,11 +177,11 @@ func TestMarketPredicates(t *testing.T) { require.Contains(t, changedDealIDs, abi.DealID(1)) require.Contains(t, changedDealIDs, abi.DealID(2)) deal1 := changedDealIDs[abi.DealID(1)] - if deal1.From.LastUpdatedEpoch != 2 || deal1.To.LastUpdatedEpoch != 3 { + if deal1.From.LastUpdatedEpoch() != 2 || deal1.To.LastUpdatedEpoch() != 3 { t.Fatal("Unexpected change to LastUpdatedEpoch") } deal2 := changedDealIDs[abi.DealID(2)] - if deal2.From.LastUpdatedEpoch != 5 || deal2.To != nil { + if deal2.From.LastUpdatedEpoch() != 5 || deal2.To != nil { t.Fatal("Expected To to be nil") } @@ -243,8 +243,8 @@ func TestMarketPredicates(t *testing.T) { require.Len(t, changedDeals.Modified, 1) require.Equal(t, abi.DealID(1), changedDeals.Modified[0].ID) - require.True(t, dealEquality(*newDeal1, *changedDeals.Modified[0].To)) - require.True(t, dealEquality(*oldDeal1, *changedDeals.Modified[0].From)) + require.True(t, dealEquality(*newDeal1, changedDeals.Modified[0].To)) + require.True(t, dealEquality(*oldDeal1, changedDeals.Modified[0].From)) require.Equal(t, abi.DealID(2), changedDeals.Removed[0].ID) }) @@ -579,7 +579,7 @@ func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiratio } func dealEquality(expected market2.DealState, actual market.DealState) bool { - return expected.LastUpdatedEpoch == actual.LastUpdatedEpoch && - expected.SectorStartEpoch == actual.SectorStartEpoch && - expected.SlashEpoch == actual.SlashEpoch + return expected.LastUpdatedEpoch == actual.LastUpdatedEpoch() && + expected.SectorStartEpoch == actual.SectorStartEpoch() && + expected.SlashEpoch == actual.SlashEpoch() } diff --git a/chain/exchange/cbor_gen.go b/chain/exchange/cbor_gen.go index 71c75869dba..4aa74f0c0f5 100644 --- a/chain/exchange/cbor_gen.go +++ b/chain/exchange/cbor_gen.go @@ -35,7 +35,7 @@ func (t *Request) MarshalCBOR(w io.Writer) error { } // t.Head ([]cid.Cid) (slice) - if len(t.Head) > cbg.MaxLength { + if len(t.Head) > 8192 { return xerrors.Errorf("Slice value in field t.Head was too long") } @@ -95,7 +95,7 @@ func (t *Request) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Head: array too large (%d)", extra) } @@ -126,9 +126,9 @@ func (t *Request) UnmarshalCBOR(r io.Reader) (err error) { t.Head[i] = c } + } } - // t.Length (uint64) (uint64) { @@ -181,7 +181,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error { } // t.ErrorMessage (string) (string) - if len(t.ErrorMessage) > cbg.MaxLength { + if len(t.ErrorMessage) > 8192 { return xerrors.Errorf("Value in field t.ErrorMessage was too long") } @@ -193,7 +193,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error { } // t.Chain ([]*exchange.BSTipSet) (slice) - if len(t.Chain) > cbg.MaxLength { + if len(t.Chain) > 8192 { return xerrors.Errorf("Slice value in field t.Chain was too long") } @@ -204,6 +204,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -248,7 +249,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { // t.ErrorMessage (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -262,7 +263,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Chain: array too large (%d)", extra) } @@ -300,9 +301,9 @@ func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -332,10 +333,11 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.BlsIncludes ([]exchange.messageIndices) (slice) - if len(t.BlsIncludes) > cbg.MaxLength { + if len(t.BlsIncludes) > 8192 { return xerrors.Errorf("Slice value in field t.BlsIncludes was too long") } @@ -346,6 +348,7 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Secpk ([]*types.SignedMessage) (slice) @@ -360,10 +363,11 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.SecpkIncludes ([]exchange.messageIndices) (slice) - if len(t.SecpkIncludes) > cbg.MaxLength { + if len(t.SecpkIncludes) > 8192 { return xerrors.Errorf("Slice value in field t.SecpkIncludes was too long") } @@ -374,6 +378,7 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -446,9 +451,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.BlsIncludes ([]exchange.messageIndices) (slice) maj, extra, err = cr.ReadHeader() @@ -456,7 +461,7 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.BlsIncludes: array too large (%d)", extra) } @@ -484,9 +489,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Secpk ([]*types.SignedMessage) (slice) maj, extra, err = cr.ReadHeader() @@ -532,9 +537,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.SecpkIncludes ([]exchange.messageIndices) (slice) maj, extra, err = cr.ReadHeader() @@ -542,7 +547,7 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.SecpkIncludes: array too large (%d)", extra) } @@ -570,9 +575,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -591,7 +596,7 @@ func (t *BSTipSet) MarshalCBOR(w io.Writer) error { } // t.Blocks ([]*types.BlockHeader) (slice) - if len(t.Blocks) > cbg.MaxLength { + if len(t.Blocks) > 8192 { return xerrors.Errorf("Slice value in field t.Blocks was too long") } @@ -602,6 +607,7 @@ func (t *BSTipSet) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Messages (exchange.CompactedMessages) (struct) @@ -641,7 +647,7 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Blocks: array too large (%d)", extra) } @@ -679,9 +685,9 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Messages (exchange.CompactedMessages) (struct) { diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index df8900cab8c..9ae39cf35cb 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -374,13 +374,33 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal // Commit sectors { for pi, preseal := range m.Sectors { - params := &minertypes.SectorPreCommitInfo{ - SealProof: preseal.ProofType, - SectorNumber: preseal.SectorID, - SealedCID: preseal.CommR, - SealRandEpoch: -1, - DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, - Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + var paramEnc []byte + var preCommitMethodNum abi.MethodNum + if nv >= network.Version22 { + paramEnc = mustEnc(&miner.PreCommitSectorBatchParams2{ + Sectors: []miner.SectorPreCommitInfo{ + { + SealProof: preseal.ProofType, + SectorNumber: preseal.SectorID, + SealedCID: preseal.CommR, + SealRandEpoch: -1, + DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, + Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + UnsealedCid: &preseal.CommD, + }, + }, + }) + preCommitMethodNum = builtintypes.MethodsMiner.PreCommitSectorBatch2 + } else { + paramEnc = mustEnc(&minertypes.SectorPreCommitInfo{ + SealProof: preseal.ProofType, + SectorNumber: preseal.SectorID, + SealedCID: preseal.CommR, + SealRandEpoch: -1, + DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, + Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + }) + preCommitMethodNum = builtintypes.MethodsMiner.PreCommitSector } sectorWeight := minerInfos[i].sectorWeight[pi] @@ -463,7 +483,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal pledge = big.Add(pcd, pledge) - _, err = doExecValue(ctx, genesisVm, minerInfos[i].maddr, m.Worker, pledge, builtintypes.MethodsMiner.PreCommitSector, mustEnc(params)) + _, err = doExecValue(ctx, genesisVm, minerInfos[i].maddr, m.Worker, pledge, preCommitMethodNum, paramEnc) if err != nil { return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) } diff --git a/chain/state/statetree.go b/chain/state/statetree.go index 61d7d500a87..1a6497d04b9 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -156,7 +156,7 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) { case network.Version13, network.Version14, network.Version15, network.Version16, network.Version17: return types.StateTreeVersion4, nil - case network.Version18, network.Version19, network.Version20, network.Version21: + case network.Version18, network.Version19, network.Version20, network.Version21, network.Version22: return types.StateTreeVersion5, nil default: diff --git a/chain/stmgr/actors.go b/chain/stmgr/actors.go index 56744fa7489..f1d615e8d4f 100644 --- a/chain/stmgr/actors.go +++ b/chain/stmgr/actors.go @@ -284,7 +284,7 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts return &api.MarketDeal{ Proposal: *proposal, - State: *st, + State: api.MakeDealState(st), }, nil } diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 2f18bde8271..6d6f9ef65f3 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -200,10 +200,6 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig log.Errorw("FAILED migration", "height", height, "from", root, "error", err) return cid.Undef, err } - // Yes, we update the cache, even for the final upgrade epoch. Why? Reverts. This - // can save us a _lot_ of time because very few actors will have changed if we - // do a small revert then need to re-run the migration. - u.cache.Update(tmpCache) log.Warnw("COMPLETED migration", "height", height, "from", root, diff --git a/chain/types/actor_event.go b/chain/types/actor_event.go new file mode 100644 index 00000000000..bf95189e19c --- /dev/null +++ b/chain/types/actor_event.go @@ -0,0 +1,67 @@ +package types + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" +) + +type ActorEventBlock struct { + // The value codec to match when filtering event values. + Codec uint64 `json:"codec"` + + // The value to want to match on associated with the corresponding "event key" + // when filtering events. + // Should be a byte array encoded with the specified codec. + // Assumes base64 encoding when converting to/from JSON strings. + Value []byte `json:"value"` +} + +type ActorEventFilter struct { + // Matches events from one of these actors, or any actor if empty. + // For now, this MUST be a Filecoin address. + Addresses []address.Address `json:"addresses,omitempty"` + + // Matches events with the specified key/values, or all events if empty. + // If the value is an empty slice, the filter will match on the key only, accepting any value. + Fields map[string][]ActorEventBlock `json:"fields,omitempty"` + + // The height of the earliest tipset to include in the query. If empty, the query starts at the + // last finalized tipset. + // NOTE: In a future upgrade, this will be strict when set and will result in an error if a filter + // cannot be fulfilled by the depth of history available in the node. Currently, the node will + // nott return an error, but will return starting from the epoch it has data for. + FromHeight *abi.ChainEpoch `json:"fromHeight,omitempty"` + + // The height of the latest tipset to include in the query. If empty, the query ends at the + // latest tipset. + ToHeight *abi.ChainEpoch `json:"toHeight,omitempty"` + + // Restricts events returned to those emitted from messages contained in this tipset. + // If `TipSetKey` is legt empty in the filter criteria, then neither `FromHeight` nor `ToHeight` are allowed. + TipSetKey *TipSetKey `json:"tipsetKey,omitempty"` +} + +type ActorEvent struct { + // Event entries in log form. + Entries []EventEntry `json:"entries"` + + // Filecoin address of the actor that emitted this event. + // NOTE: In a future upgrade, this will change to always be an ID address. Currently this will be + // either the f4 address, or ID address if an f4 is not available for this actor. + Emitter address.Address `json:"emitter"` + + // Reverted is set to true if the message that produced this event was reverted because of a network re-org + // in that case, the event should be considered as reverted as well. + Reverted bool `json:"reverted"` + + // Height of the tipset that contained the message that produced this event. + Height abi.ChainEpoch `json:"height"` + + // The tipset that contained the message that produced this event. + TipSetKey TipSetKey `json:"tipsetKey"` + + // CID of message that produced this event. + MsgCid cid.Cid `json:"msgCid"` +} diff --git a/chain/types/actor_event_test.go b/chain/types/actor_event_test.go new file mode 100644 index 00000000000..8c50b171754 --- /dev/null +++ b/chain/types/actor_event_test.go @@ -0,0 +1,125 @@ +package types + +import ( + "encoding/json" + pseudo "math/rand" + "testing" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + builtintypes "github.com/filecoin-project/go-state-types/builtin" +) + +func TestJSONMarshalling(t *testing.T) { + rng := pseudo.New(pseudo.NewSource(0)) + t.Run("actor event with entries", + testJsonMarshalling( + ActorEvent{ + Entries: []EventEntry{ + { + Key: "key1", + Codec: 0x51, + Value: []byte("value1"), + }, + { + Key: "key2", + Codec: 0x52, + Value: []byte("value2"), + }, + }, + Emitter: randomF4Addr(t, rng), + Reverted: false, + Height: 1001, + TipSetKey: NewTipSetKey(randomCid(t, rng)), + MsgCid: randomCid(t, rng), + }, + `{"entries":[{"Flags":0,"Key":"key1","Codec":81,"Value":"dmFsdWUx"},{"Flags":0,"Key":"key2","Codec":82,"Value":"dmFsdWUy"}],"emitter":"f410fagkp3qx2f76maqot74jaiw3tzbxe76k76zrkl3xifk67isrnbn2sll3yua","reverted":false,"height":1001,"tipsetKey":[{"/":"bafkqacx3dag26sfht3qlcdi"}],"msgCid":{"/":"bafkqacrziziykd6uuf4islq"}}`, + ), + ) + + t.Run("actor event filter", + testJsonMarshalling( + ActorEventFilter{ + Addresses: []address.Address{ + randomF4Addr(t, pseudo.New(pseudo.NewSource(0))), + randomF4Addr(t, pseudo.New(pseudo.NewSource(0))), + }, + Fields: map[string][]ActorEventBlock{ + "key1": { + { + Codec: 0x51, + Value: []byte("value1"), + }, + }, + "key2": { + { + Codec: 0x52, + Value: []byte("value2"), + }, + }, + }, + FromHeight: heightOf(0), + ToHeight: heightOf(100), + TipSetKey: randomTipSetKey(t, rng), + }, + `{"addresses":["f410fagkp3qx2f76maqot74jaiw3tzbxe76k76zrkl3xifk67isrnbn2sll3yua","f410fagkp3qx2f76maqot74jaiw3tzbxe76k76zrkl3xifk67isrnbn2sll3yua"],"fields":{"key1":[{"codec":81,"value":"dmFsdWUx"}],"key2":[{"codec":82,"value":"dmFsdWUy"}]},"fromHeight":0,"toHeight":100,"tipsetKey":[{"/":"bafkqacxcqxwocuiukv4aq5i"}]}`, + ), + ) + t.Run("actor event block", + testJsonMarshalling( + ActorEventBlock{ + Codec: 1, + Value: []byte("test"), + }, + `{"codec":1,"value":"dGVzdA=="}`, + ), + ) +} + +func testJsonMarshalling[V ActorEvent | ActorEventBlock | ActorEventFilter](subject V, expect string) func(t *testing.T) { + return func(t *testing.T) { + gotMarshalled, err := json.Marshal(subject) + require.NoError(t, err) + require.JSONEqf(t, expect, string(gotMarshalled), "serialization mismatch") + var gotUnmarshalled V + require.NoError(t, json.Unmarshal([]byte(expect), &gotUnmarshalled)) + require.Equal(t, subject, gotUnmarshalled) + } +} + +func heightOf(h int64) *abi.ChainEpoch { + hp := abi.ChainEpoch(h) + return &hp +} + +func randomTipSetKey(tb testing.TB, rng *pseudo.Rand) *TipSetKey { + tb.Helper() + tk := NewTipSetKey(randomCid(tb, rng)) + return &tk +} + +func randomF4Addr(tb testing.TB, rng *pseudo.Rand) address.Address { + tb.Helper() + addr, err := address.NewDelegatedAddress(builtintypes.EthereumAddressManagerActorID, randomBytes(32, rng)) + require.NoError(tb, err) + + return addr +} + +func randomCid(tb testing.TB, rng *pseudo.Rand) cid.Cid { + tb.Helper() + cb := cid.V1Builder{Codec: cid.Raw, MhType: mh.IDENTITY} + c, err := cb.Sum(randomBytes(10, rng)) + require.NoError(tb, err) + return c +} + +func randomBytes(n int, rng *pseudo.Rand) []byte { + buf := make([]byte, n) + rng.Read(buf) + return buf +} diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go index 74a278ce49b..dde703ceed5 100644 --- a/chain/types/cbor_gen.go +++ b/chain/types/cbor_gen.go @@ -55,7 +55,7 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { } // t.BeaconEntries ([]types.BeaconEntry) (slice) - if len(t.BeaconEntries) > cbg.MaxLength { + if len(t.BeaconEntries) > 8192 { return xerrors.Errorf("Slice value in field t.BeaconEntries was too long") } @@ -66,10 +66,11 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.WinPoStProof ([]proof.PoStProof) (slice) - if len(t.WinPoStProof) > cbg.MaxLength { + if len(t.WinPoStProof) > 8192 { return xerrors.Errorf("Slice value in field t.WinPoStProof was too long") } @@ -80,10 +81,11 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Parents ([]cid.Cid) (slice) - if len(t.Parents) > cbg.MaxLength { + if len(t.Parents) > 8192 { return xerrors.Errorf("Slice value in field t.Parents was too long") } @@ -238,7 +240,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.BeaconEntries: array too large (%d)", extra) } @@ -266,9 +268,9 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.WinPoStProof ([]proof.PoStProof) (slice) maj, extra, err = cr.ReadHeader() @@ -276,7 +278,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.WinPoStProof: array too large (%d)", extra) } @@ -304,9 +306,9 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Parents ([]cid.Cid) (slice) maj, extra, err = cr.ReadHeader() @@ -314,7 +316,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Parents: array too large (%d)", extra) } @@ -345,9 +347,9 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { t.Parents[i] = c } + } } - // t.ParentWeight (big.Int) (struct) { @@ -360,10 +362,10 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { // t.Height (abi.ChainEpoch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -511,7 +513,7 @@ func (t *Ticket) MarshalCBOR(w io.Writer) error { } // t.VRFProof ([]uint8) (slice) - if len(t.VRFProof) > cbg.ByteArrayMaxLen { + if len(t.VRFProof) > 2097152 { return xerrors.Errorf("Byte array in field t.VRFProof was too long") } @@ -519,9 +521,10 @@ func (t *Ticket) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.VRFProof[:]); err != nil { + if _, err := cw.Write(t.VRFProof); err != nil { return err } + return nil } @@ -555,7 +558,7 @@ func (t *Ticket) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -566,9 +569,10 @@ func (t *Ticket) UnmarshalCBOR(r io.Reader) (err error) { t.VRFProof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.VRFProof[:]); err != nil { + if _, err := io.ReadFull(cr, t.VRFProof); err != nil { return err } + return nil } @@ -598,7 +602,7 @@ func (t *ElectionProof) MarshalCBOR(w io.Writer) error { } // t.VRFProof ([]uint8) (slice) - if len(t.VRFProof) > cbg.ByteArrayMaxLen { + if len(t.VRFProof) > 2097152 { return xerrors.Errorf("Byte array in field t.VRFProof was too long") } @@ -606,9 +610,10 @@ func (t *ElectionProof) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.VRFProof[:]); err != nil { + if _, err := cw.Write(t.VRFProof); err != nil { return err } + return nil } @@ -638,10 +643,10 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) { // t.WinCount (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -667,7 +672,7 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -678,9 +683,10 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) { t.VRFProof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.VRFProof[:]); err != nil { + if _, err := io.ReadFull(cr, t.VRFProof); err != nil { return err } + return nil } @@ -753,7 +759,7 @@ func (t *Message) MarshalCBOR(w io.Writer) error { } // t.Params ([]uint8) (slice) - if len(t.Params) > cbg.ByteArrayMaxLen { + if len(t.Params) > 2097152 { return xerrors.Errorf("Byte array in field t.Params was too long") } @@ -761,9 +767,10 @@ func (t *Message) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Params[:]); err != nil { + if _, err := cw.Write(t.Params); err != nil { return err } + return nil } @@ -848,10 +855,10 @@ func (t *Message) UnmarshalCBOR(r io.Reader) (err error) { // t.GasLimit (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -909,7 +916,7 @@ func (t *Message) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Params: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -920,9 +927,10 @@ func (t *Message) UnmarshalCBOR(r io.Reader) (err error) { t.Params = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + if _, err := io.ReadFull(cr, t.Params); err != nil { return err } + return nil } @@ -1343,7 +1351,7 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { } // t.BlsMessages ([]cid.Cid) (slice) - if len(t.BlsMessages) > cbg.MaxLength { + if len(t.BlsMessages) > 8192 { return xerrors.Errorf("Slice value in field t.BlsMessages was too long") } @@ -1359,7 +1367,7 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { } // t.SecpkMessages ([]cid.Cid) (slice) - if len(t.SecpkMessages) > cbg.MaxLength { + if len(t.SecpkMessages) > 8192 { return xerrors.Errorf("Slice value in field t.SecpkMessages was too long") } @@ -1425,7 +1433,7 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.BlsMessages: array too large (%d)", extra) } @@ -1456,9 +1464,9 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { t.BlsMessages[i] = c } + } } - // t.SecpkMessages ([]cid.Cid) (slice) maj, extra, err = cr.ReadHeader() @@ -1466,7 +1474,7 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.SecpkMessages: array too large (%d)", extra) } @@ -1497,9 +1505,9 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { t.SecpkMessages[i] = c } + } } - return nil } @@ -1518,7 +1526,7 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { } // t.Cids ([]cid.Cid) (slice) - if len(t.Cids) > cbg.MaxLength { + if len(t.Cids) > 8192 { return xerrors.Errorf("Slice value in field t.Cids was too long") } @@ -1534,7 +1542,7 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { } // t.Blocks ([]*types.BlockHeader) (slice) - if len(t.Blocks) > cbg.MaxLength { + if len(t.Blocks) > 8192 { return xerrors.Errorf("Slice value in field t.Blocks was too long") } @@ -1545,6 +1553,7 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Height (abi.ChainEpoch) (int64) @@ -1557,6 +1566,7 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { return err } } + return nil } @@ -1590,7 +1600,7 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Cids: array too large (%d)", extra) } @@ -1621,9 +1631,9 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { t.Cids[i] = c } + } } - // t.Blocks ([]*types.BlockHeader) (slice) maj, extra, err = cr.ReadHeader() @@ -1631,7 +1641,7 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Blocks: array too large (%d)", extra) } @@ -1669,16 +1679,16 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Height (abi.ChainEpoch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1721,7 +1731,7 @@ func (t *BeaconEntry) MarshalCBOR(w io.Writer) error { } // t.Data ([]uint8) (slice) - if len(t.Data) > cbg.ByteArrayMaxLen { + if len(t.Data) > 2097152 { return xerrors.Errorf("Byte array in field t.Data was too long") } @@ -1729,9 +1739,10 @@ func (t *BeaconEntry) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Data[:]); err != nil { + if _, err := cw.Write(t.Data); err != nil { return err } + return nil } @@ -1779,7 +1790,7 @@ func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Data: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -1790,9 +1801,10 @@ func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) (err error) { t.Data = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Data[:]); err != nil { + if _, err := io.ReadFull(cr, t.Data); err != nil { return err } + return nil } @@ -1908,6 +1920,7 @@ func (t *StateInfo0) MarshalCBOR(w io.Writer) error { if _, err := cw.Write(lengthBufStateInfo0); err != nil { return err } + return nil } @@ -1958,7 +1971,7 @@ func (t *Event) MarshalCBOR(w io.Writer) error { } // t.Entries ([]types.EventEntry) (slice) - if len(t.Entries) > cbg.MaxLength { + if len(t.Entries) > 8192 { return xerrors.Errorf("Slice value in field t.Entries was too long") } @@ -1969,6 +1982,7 @@ func (t *Event) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -2017,7 +2031,7 @@ func (t *Event) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Entries: array too large (%d)", extra) } @@ -2045,9 +2059,9 @@ func (t *Event) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -2071,7 +2085,7 @@ func (t *EventEntry) MarshalCBOR(w io.Writer) error { } // t.Key (string) (string) - if len(t.Key) > cbg.MaxLength { + if len(t.Key) > 8192 { return xerrors.Errorf("Value in field t.Key was too long") } @@ -2089,7 +2103,7 @@ func (t *EventEntry) MarshalCBOR(w io.Writer) error { } // t.Value ([]uint8) (slice) - if len(t.Value) > cbg.ByteArrayMaxLen { + if len(t.Value) > 2097152 { return xerrors.Errorf("Byte array in field t.Value was too long") } @@ -2097,9 +2111,10 @@ func (t *EventEntry) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Value[:]); err != nil { + if _, err := cw.Write(t.Value); err != nil { return err } + return nil } @@ -2142,7 +2157,7 @@ func (t *EventEntry) UnmarshalCBOR(r io.Reader) (err error) { // t.Key (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -2170,7 +2185,7 @@ func (t *EventEntry) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Value: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -2181,9 +2196,10 @@ func (t *EventEntry) UnmarshalCBOR(r io.Reader) (err error) { t.Value = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Value[:]); err != nil { + if _, err := io.ReadFull(cr, t.Value); err != nil { return err } + return nil } @@ -2202,7 +2218,7 @@ func (t *GasTrace) MarshalCBOR(w io.Writer) error { } // t.Name (string) (string) - if len(t.Name) > cbg.MaxLength { + if len(t.Name) > 8192 { return xerrors.Errorf("Value in field t.Name was too long") } @@ -2256,6 +2272,7 @@ func (t *GasTrace) MarshalCBOR(w io.Writer) error { return err } } + return nil } @@ -2285,7 +2302,7 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.Name (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -2295,10 +2312,10 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.TotalGas (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -2320,10 +2337,10 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.ComputeGas (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -2345,10 +2362,10 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.StorageGas (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -2370,10 +2387,10 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.TimeTaken (time.Duration) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -2507,7 +2524,7 @@ func (t *MessageTrace) MarshalCBOR(w io.Writer) error { } // t.Params ([]uint8) (slice) - if len(t.Params) > cbg.ByteArrayMaxLen { + if len(t.Params) > 2097152 { return xerrors.Errorf("Byte array in field t.Params was too long") } @@ -2515,7 +2532,7 @@ func (t *MessageTrace) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Params[:]); err != nil { + if _, err := cw.Write(t.Params); err != nil { return err } @@ -2609,7 +2626,7 @@ func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Params: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -2620,9 +2637,10 @@ func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) { t.Params = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + if _, err := io.ReadFull(cr, t.Params); err != nil { return err } + // t.ParamsCodec (uint64) (uint64) { @@ -2697,7 +2715,7 @@ func (t *ReturnTrace) MarshalCBOR(w io.Writer) error { } // t.Return ([]uint8) (slice) - if len(t.Return) > cbg.ByteArrayMaxLen { + if len(t.Return) > 2097152 { return xerrors.Errorf("Byte array in field t.Return was too long") } @@ -2705,7 +2723,7 @@ func (t *ReturnTrace) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Return[:]); err != nil { + if _, err := cw.Write(t.Return); err != nil { return err } @@ -2744,10 +2762,10 @@ func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.ExitCode (exitcode.ExitCode) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -2773,7 +2791,7 @@ func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Return: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -2784,9 +2802,10 @@ func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) { t.Return = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Return[:]); err != nil { + if _, err := io.ReadFull(cr, t.Return); err != nil { return err } + // t.ReturnCodec (uint64) (uint64) { @@ -2845,6 +2864,7 @@ func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Subcalls ([]types.ExecutionTrace) (slice) @@ -2859,6 +2879,7 @@ func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -2968,9 +2989,9 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Subcalls ([]types.ExecutionTrace) (slice) maj, extra, err = cr.ReadHeader() @@ -3006,8 +3027,8 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go index acc0d5c9454..35fed87d802 100644 --- a/chain/types/ethtypes/eth_types.go +++ b/chain/types/ethtypes/eth_types.go @@ -610,7 +610,7 @@ type EthFilterSpec struct { Topics EthTopicSpec `json:"topics"` // Restricts event logs returned to those emitted from messages contained in this tipset. - // If BlockHash is present in in the filter criteria, then neither FromBlock nor ToBlock are allowed. + // If BlockHash is present in the filter criteria, then neither FromBlock nor ToBlock are allowed. // Added in EIP-234 BlockHash *EthHash `json:"blockHash,omitempty"` } diff --git a/chain/types/event.go b/chain/types/event.go index 106a120e211..5f6415d49e1 100644 --- a/chain/types/event.go +++ b/chain/types/event.go @@ -28,7 +28,7 @@ type EventEntry struct { // The event value's codec Codec uint64 - // The event value + // The event value. It is encoded using the codec specified above Value []byte } diff --git a/cli/client.go b/cli/client.go index 88f7ed2087b..81299b8fb3e 100644 --- a/cli/client.go +++ b/cli/client.go @@ -1770,7 +1770,7 @@ func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipS if v.DealID == 0 { return deal{ LocalDeal: v, - OnChainDealState: *market.EmptyDealState(), + OnChainDealState: market.EmptyDealState(), } } @@ -1781,7 +1781,7 @@ func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipS return deal{ LocalDeal: v, - OnChainDealState: onChain.State, + OnChainDealState: onChain.State.Iface(), } } @@ -1807,13 +1807,13 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tTransferChannelID\tTransferStatus\tVerified\tMessage\n") for _, d := range deals { onChain := "N" - if d.OnChainDealState.SectorStartEpoch != -1 { - onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch) + if d.OnChainDealState.SectorStartEpoch() != -1 { + onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch()) } slashed := "N" - if d.OnChainDealState.SlashEpoch != -1 { - slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch) + if d.OnChainDealState.SlashEpoch() != -1 { + slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch()) } price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration))) @@ -1869,13 +1869,13 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, propcid := ellipsis(d.LocalDeal.ProposalCid.String(), 8) onChain := "N" - if d.OnChainDealState.SectorStartEpoch != -1 { - onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch) + if d.OnChainDealState.SectorStartEpoch() != -1 { + onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch()) } slashed := "N" - if d.OnChainDealState.SlashEpoch != -1 { - slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch) + if d.OnChainDealState.SlashEpoch() != -1 { + slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch()) } piece := ellipsis(d.LocalDeal.PieceCID.String(), 8) diff --git a/cli/filplus.go b/cli/filplus.go index 56922943960..a4982cecbec 100644 --- a/cli/filplus.go +++ b/cli/filplus.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "strconv" + "strings" cbor "github.com/ipfs/go-ipld-cbor" "github.com/urfave/cli/v2" @@ -233,16 +234,21 @@ var filplusListClientsCmd = &cli.Command{ var filplusListAllocationsCmd = &cli.Command{ Name: "list-allocations", - Usage: "List allocations made by client", + Usage: "List allocations available in verified registry actor or made by a client if specified", ArgsUsage: "clientAddress", Flags: []cli.Flag{ &cli.BoolFlag{ Name: "expired", Usage: "list only expired allocations", }, + &cli.BoolFlag{ + Name: "json", + Usage: "output results in json format", + Value: false, + }, }, Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { + if cctx.NArg() > 1 { return IncorrectNumArgs(cctx) } @@ -253,14 +259,76 @@ var filplusListAllocationsCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - clientAddr, err := address.NewFromString(cctx.Args().Get(0)) - if err != nil { - return err - } + writeOut := func(tsHeight abi.ChainEpoch, allocations map[verifreg.AllocationId]verifreg.Allocation, json, expired bool) error { + // Map Keys. Corresponds to the standard tablewriter output + allocationID := "AllocationID" + client := "Client" + provider := "Miner" + pieceCid := "PieceCid" + pieceSize := "PieceSize" + tMin := "TermMin" + tMax := "TermMax" + expr := "Expiration" + + // One-to-one mapping between tablewriter keys and JSON keys + tableKeysToJsonKeys := map[string]string{ + allocationID: strings.ToLower(allocationID), + client: strings.ToLower(client), + provider: strings.ToLower(provider), + pieceCid: strings.ToLower(pieceCid), + pieceSize: strings.ToLower(pieceSize), + tMin: strings.ToLower(tMin), + tMax: strings.ToLower(tMax), + expr: strings.ToLower(expr), + } - clientIdAddr, err := api.StateLookupID(ctx, clientAddr, types.EmptyTSK) - if err != nil { - return err + var allocs []map[string]interface{} + + for key, val := range allocations { + if tsHeight > val.Expiration || !expired { + alloc := map[string]interface{}{ + allocationID: key, + client: val.Client, + provider: val.Provider, + pieceCid: val.Data, + pieceSize: val.Size, + tMin: val.TermMin, + tMax: val.TermMax, + expr: val.Expiration, + } + allocs = append(allocs, alloc) + } + } + + if json { + // get a new list of allocations with json keys instead of tablewriter keys + var jsonAllocs []map[string]interface{} + for _, alloc := range allocs { + jsonAlloc := make(map[string]interface{}) + for k, v := range alloc { + jsonAlloc[tableKeysToJsonKeys[k]] = v + } + jsonAllocs = append(jsonAllocs, jsonAlloc) + } + // then return this! + return PrintJson(jsonAllocs) + } + // Init the tablewriter's columns + tw := tablewriter.New( + tablewriter.Col(allocationID), + tablewriter.Col(client), + tablewriter.Col(provider), + tablewriter.Col(pieceCid), + tablewriter.Col(pieceSize), + tablewriter.Col(tMin), + tablewriter.Col(tMax), + tablewriter.NewLineCol(expr)) + // populate it with content + for _, alloc := range allocs { + tw.Write(alloc) + } + // return the corresponding string + return tw.Flush(os.Stdout) } store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) @@ -280,41 +348,38 @@ var filplusListAllocationsCmd = &cli.Command{ return err } - allocationsMap, err := verifregState.GetAllocations(clientIdAddr) + if cctx.NArg() == 1 { + clientAddr, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + + clientIdAddr, err := api.StateLookupID(ctx, clientAddr, types.EmptyTSK) + if err != nil { + return err + } + + allocationsMap, err := verifregState.GetAllocations(clientIdAddr) + if err != nil { + return err + } + + return writeOut(ts.Height(), allocationsMap, cctx.Bool("json"), cctx.Bool("expired")) + } + + allocationsMap, err := verifregState.GetAllAllocations() if err != nil { return err } - tw := tablewriter.New( - tablewriter.Col("ID"), - tablewriter.Col("Provider"), - tablewriter.Col("Data"), - tablewriter.Col("Size"), - tablewriter.Col("TermMin"), - tablewriter.Col("TermMax"), - tablewriter.Col("Expiration"), - ) + return writeOut(ts.Height(), allocationsMap, cctx.Bool("json"), cctx.Bool("expired")) - for allocationId, allocation := range allocationsMap { - if ts.Height() > allocation.Expiration || !cctx.IsSet("expired") { - tw.Write(map[string]interface{}{ - "ID": allocationId, - "Provider": allocation.Provider, - "Data": allocation.Data, - "Size": allocation.Size, - "TermMin": allocation.TermMin, - "TermMax": allocation.TermMax, - "Expiration": allocation.Expiration, - }) - } - } - return tw.Flush(os.Stdout) }, } var filplusListClaimsCmd = &cli.Command{ Name: "list-claims", - Usage: "List claims made by provider", + Usage: "List claims available in verified registry actor or made by provider if specified", ArgsUsage: "providerAddress", Flags: []cli.Flag{ &cli.BoolFlag{ @@ -323,7 +388,7 @@ var filplusListClaimsCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { + if cctx.NArg() > 1 { return IncorrectNumArgs(cctx) } @@ -334,14 +399,81 @@ var filplusListClaimsCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - providerAddr, err := address.NewFromString(cctx.Args().Get(0)) - if err != nil { - return err - } + writeOut := func(tsHeight abi.ChainEpoch, claims map[verifreg.ClaimId]verifreg.Claim, json, expired bool) error { + // Map Keys. Corresponds to the standard tablewriter output + claimID := "ClaimID" + provider := "Provider" + client := "Client" + data := "Data" + size := "Size" + tMin := "TermMin" + tMax := "TermMax" + tStart := "TermStart" + sector := "Sector" + + // One-to-one mapping between tablewriter keys and JSON keys + tableKeysToJsonKeys := map[string]string{ + claimID: strings.ToLower(claimID), + provider: strings.ToLower(provider), + client: strings.ToLower(client), + data: strings.ToLower(data), + size: strings.ToLower(size), + tMin: strings.ToLower(tMin), + tMax: strings.ToLower(tMax), + tStart: strings.ToLower(tStart), + sector: strings.ToLower(sector), + } - providerIdAddr, err := api.StateLookupID(ctx, providerAddr, types.EmptyTSK) - if err != nil { - return err + var claimList []map[string]interface{} + + for key, val := range claims { + if tsHeight > val.TermStart+val.TermMax || !expired { + claim := map[string]interface{}{ + claimID: key, + provider: val.Provider, + client: val.Client, + data: val.Data, + size: val.Size, + tMin: val.TermMin, + tMax: val.TermMax, + tStart: val.TermStart, + sector: val.Sector, + } + claimList = append(claimList, claim) + } + } + + if json { + // get a new list of claims with json keys instead of tablewriter keys + var jsonClaims []map[string]interface{} + for _, claim := range claimList { + jsonClaim := make(map[string]interface{}) + for k, v := range claim { + jsonClaim[tableKeysToJsonKeys[k]] = v + } + jsonClaims = append(jsonClaims, jsonClaim) + } + // then return this! + return PrintJson(jsonClaims) + } + // Init the tablewriter's columns + tw := tablewriter.New( + tablewriter.Col(claimID), + tablewriter.Col(client), + tablewriter.Col(provider), + tablewriter.Col(data), + tablewriter.Col(size), + tablewriter.Col(tMin), + tablewriter.Col(tMax), + tablewriter.Col(tStart), + tablewriter.NewLineCol(sector)) + // populate it with content + for _, alloc := range claimList { + + tw.Write(alloc) + } + // return the corresponding string + return tw.Flush(os.Stdout) } store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) @@ -361,39 +493,31 @@ var filplusListClaimsCmd = &cli.Command{ return err } - claimsMap, err := verifregState.GetClaims(providerIdAddr) - if err != nil { - return err - } + if cctx.NArg() == 1 { + providerAddr, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } - tw := tablewriter.New( - tablewriter.Col("ID"), - tablewriter.Col("Provider"), - tablewriter.Col("Client"), - tablewriter.Col("Data"), - tablewriter.Col("Size"), - tablewriter.Col("TermMin"), - tablewriter.Col("TermMax"), - tablewriter.Col("TermStart"), - tablewriter.Col("Sector"), - ) + providerIdAddr, err := api.StateLookupID(ctx, providerAddr, types.EmptyTSK) + if err != nil { + return err + } - for claimId, claim := range claimsMap { - if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired") { - tw.Write(map[string]interface{}{ - "ID": claimId, - "Provider": claim.Provider, - "Client": claim.Client, - "Data": claim.Data, - "Size": claim.Size, - "TermMin": claim.TermMin, - "TermMax": claim.TermMax, - "TermStart": claim.TermStart, - "Sector": claim.Sector, - }) + claimsMap, err := verifregState.GetClaims(providerIdAddr) + if err != nil { + return err } + + return writeOut(ts.Height(), claimsMap, cctx.Bool("json"), cctx.Bool("expired")) } - return tw.Flush(os.Stdout) + + claimsMap, err := verifregState.GetAllClaims() + if err != nil { + return err + } + + return writeOut(ts.Height(), claimsMap, cctx.Bool("json"), cctx.Bool("expired")) }, } diff --git a/cli/util.go b/cli/util.go index 03de817f9b1..de161f59085 100644 --- a/cli/util.go +++ b/cli/util.go @@ -2,6 +2,8 @@ package cli import ( "context" + "encoding/json" + "fmt" "os" "github.com/fatih/color" @@ -37,3 +39,13 @@ func parseTipSet(ctx context.Context, api v0api.FullNode, vals []string) (*types return types.NewTipSet(headers) } + +func PrintJson(obj interface{}) error { + resJson, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return fmt.Errorf("marshalling json: %w", err) + } + + fmt.Println(string(resJson)) + return nil +} diff --git a/cmd/lotus-bench/amt_internal.go b/cmd/lotus-bench/amt_internal.go deleted file mode 100644 index f0e3035b710..00000000000 --- a/cmd/lotus-bench/amt_internal.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copied from go-amt-ipld https://github.com/filecoin-project/go-amt-ipld/tree/master/internal -// which for some reason is a go internal package and therefore cannot be imported - -package main - -import ( - "fmt" - "io" - "math" - "sort" - - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -type AMTRoot struct { - BitWidth uint64 - Height uint64 - Count uint64 - AMTNode AMTNode -} - -type AMTNode struct { - Bmap []byte - Links []cid.Cid - Values []*cbg.Deferred -} - -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -var _ = xerrors.Errorf -var _ = cid.Undef -var _ = math.E -var _ = sort.Sort - -var lengthBufAMTRoot = []byte{132} - -func (t *AMTRoot) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write(lengthBufAMTRoot); err != nil { - return err - } - - // t.BitWidth (uint64) (uint64) - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.BitWidth); err != nil { - return err - } - - // t.Height (uint64) (uint64) - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Height); err != nil { - return err - } - - // t.Count (uint64) (uint64) - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Count); err != nil { - return err - } - - // t.AMTNode (internal.AMTNode) (struct) - if err := t.AMTNode.MarshalCBOR(cw); err != nil { - return err - } - return nil -} - -func (t *AMTRoot) UnmarshalCBOR(r io.Reader) (err error) { - *t = AMTRoot{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.BitWidth (uint64) (uint64) - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.BitWidth = extra - - } - // t.Height (uint64) (uint64) - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Height = extra - - } - // t.Count (uint64) (uint64) - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Count = extra - - } - // t.AMTNode (internal.AMTNode) (struct) - - { - - if err := t.AMTNode.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.AMTNode: %w", err) - } - - } - return nil -} - -var lengthBufAMTNode = []byte{131} - -func (t *AMTNode) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write(lengthBufAMTNode); err != nil { - return err - } - - // t.Bmap ([]uint8) (slice) - if len(t.Bmap) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Bmap was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Bmap))); err != nil { - return err - } - - if _, err := cw.Write(t.Bmap[:]); err != nil { - return err - } - - // t.Links ([]cid.Cid) (slice) - if len(t.Links) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Links was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Links))); err != nil { - return err - } - for _, v := range t.Links { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Links: %w", err) - } - } - - // t.Values ([]*typegen.Deferred) (slice) - if len(t.Values) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Values was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Values))); err != nil { - return err - } - for _, v := range t.Values { - if err := v.MarshalCBOR(cw); err != nil { - return err - } - } - return nil -} - -func (t *AMTNode) UnmarshalCBOR(r io.Reader) (err error) { - *t = AMTNode{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Bmap ([]uint8) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Bmap: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Bmap = make([]uint8, extra) - } - - if _, err := io.ReadFull(cr, t.Bmap[:]); err != nil { - return err - } - // t.Links ([]cid.Cid) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Links: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Links = make([]cid.Cid, extra) - } - - for i := 0; i < int(extra); i++ { - - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.Links failed: %w", err) - } - t.Links[i] = c - } - - // t.Values ([]*typegen.Deferred) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Values: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Values = make([]*cbg.Deferred, extra) - } - - for i := 0; i < int(extra); i++ { - - var v cbg.Deferred - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } - - t.Values[i] = &v - } - - return nil -} diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 7d3c0cde084..545ed1eb90b 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -1,7 +1,6 @@ package main import ( - "bytes" "context" "crypto/rand" "encoding/json" @@ -9,16 +8,9 @@ import ( "math/big" "os" "path/filepath" - "sync" "time" "github.com/docker/go-units" - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/go-cid" - offline "github.com/ipfs/go-ipfs-exchange-offline" - cbor "github.com/ipfs/go-ipld-cbor" - format "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" "github.com/minio/blake2b-simd" "github.com/mitchellh/go-homedir" @@ -28,14 +20,10 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" prooftypes "github.com/filecoin-project/go-state-types/proof" - adt "github.com/filecoin-project/specs-actors/v6/actors/util/adt" lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" @@ -116,7 +104,6 @@ func main() { DisableSliceFlagSeparator: true, Commands: []*cli.Command{ proveCmd, - amtBenchCmd, sealBenchCmd, simpleCmd, importBenchCmd, @@ -131,211 +118,6 @@ func main() { } } -type amtStatCollector struct { - ds format.NodeGetter - walk func(format.Node) ([]*format.Link, error) - - statsLk sync.Mutex - totalAMTLinks int - totalAMTValues int - totalAMTLinkNodes int - totalAMTValueNodes int - totalAMTLinkNodeSize int - totalAMTValueNodeSize int -} - -func (asc *amtStatCollector) String() string { - asc.statsLk.Lock() - defer asc.statsLk.Unlock() - - str := "\n------------\n" - str += fmt.Sprintf("Link Count: %d\n", asc.totalAMTLinks) - str += fmt.Sprintf("Value Count: %d\n", asc.totalAMTValues) - str += fmt.Sprintf("%d link nodes %d bytes\n", asc.totalAMTLinkNodes, asc.totalAMTLinkNodeSize) - str += fmt.Sprintf("%d value nodes %d bytes\n", asc.totalAMTValueNodes, asc.totalAMTValueNodeSize) - str += fmt.Sprintf("Total bytes: %d\n------------\n", asc.totalAMTLinkNodeSize+asc.totalAMTValueNodeSize) - return str -} - -func (asc *amtStatCollector) record(ctx context.Context, nd format.Node) error { - size, err := nd.Size() - if err != nil { - return err - } - - var node AMTNode - if err := node.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil { - // try to deserialize root - var root AMTRoot - if err := root.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil { - return err - } - node = root.AMTNode - } - - asc.statsLk.Lock() - defer asc.statsLk.Unlock() - - link := len(node.Links) > 0 - value := len(node.Values) > 0 - - if link { - asc.totalAMTLinks += len(node.Links) - asc.totalAMTLinkNodes++ - asc.totalAMTLinkNodeSize += int(size) - } else if value { - asc.totalAMTValues += len(node.Values) - asc.totalAMTValueNodes++ - asc.totalAMTValueNodeSize += int(size) - } else { - return xerrors.Errorf("unexpected AMT node %x: neither link nor value", nd.RawData()) - } - - return nil -} - -func (asc *amtStatCollector) walkLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) { - nd, err := asc.ds.Get(ctx, c) - if err != nil { - return nil, err - } - - if err := asc.record(ctx, nd); err != nil { - return nil, err - } - - return asc.walk(nd) -} - -func carWalkFunc(nd format.Node) (out []*format.Link, err error) { - for _, link := range nd.Links() { - if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed { - continue - } - out = append(out, link) - } - return out, nil -} - -var amtBenchCmd = &cli.Command{ - Name: "amt", - Usage: "Benchmark AMT churn", - Flags: []cli.Flag{ - &cli.IntFlag{ - Name: "rounds", - Usage: "rounds of churn to measure", - Value: 1, - }, - &cli.IntFlag{ - Name: "interval", - Usage: "AMT idx interval for churning values", - Value: 2880, - }, - &cli.IntFlag{ - Name: "bitwidth", - Usage: "AMT bitwidth", - Value: 6, - }, - }, - Action: func(c *cli.Context) error { - bs := blockstore.NewMemory() - ctx := c.Context - store := adt.WrapStore(ctx, cbor.NewCborStore(bs)) - - // Setup in memory blockstore - bitwidth := c.Int("bitwidth") - array, err := adt.MakeEmptyArray(store, bitwidth) - if err != nil { - return err - } - - // Using motivating empirical example: market actor states AMT - // Create 40,000,000 states for realistic workload - fmt.Printf("Populating AMT\n") - for i := 0; i < 40000000; i++ { - if err := array.Set(uint64(i), &market.DealState{ - SectorStartEpoch: abi.ChainEpoch(2000000 + i), - LastUpdatedEpoch: abi.ChainEpoch(-1), - SlashEpoch: -1, - VerifiedClaim: verifreg.AllocationId(i), - }); err != nil { - return err - } - } - - r, err := array.Root() - if err != nil { - return err - } - - // Measure ratio of internal / leaf nodes / sizes - dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - asc := &amtStatCollector{ - ds: dag, - walk: carWalkFunc, - } - - fmt.Printf("Measuring AMT\n") - seen := cid.NewSet() - if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil { - return err - } - - fmt.Printf("%s\n", asc) - - // Overwrite ids with idx % interval: one epoch of market cron - rounds := c.Int("rounds") - interval := c.Int("interval") - - fmt.Printf("Overwrite 1 out of %d values for %d rounds\n", interval, rounds) - array, err = adt.AsArray(store, r, bitwidth) - if err != nil { - return err - } - roots := make([]cid.Cid, rounds) - for j := 0; j < rounds; j++ { - if j%10 == 0 { - fmt.Printf("round: %d\n", j) - } - for i := j; i < 40000000; i += interval { - if i%interval == j { - if err := array.Set(uint64(i), &market.DealState{ - SectorStartEpoch: abi.ChainEpoch(2000000 + i), - LastUpdatedEpoch: abi.ChainEpoch(1), - SlashEpoch: -1, - VerifiedClaim: verifreg.AllocationId(i), - }); err != nil { - return err - } - } - } - roots[j], err = array.Root() - if err != nil { - return err - } - - } - - // Measure churn - dag = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - asc = &amtStatCollector{ - ds: dag, - walk: carWalkFunc, - } - - fmt.Printf("Measuring %d rounds of churn\n", rounds) - - for _, r := range roots { - if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil { - return err - } - } - - fmt.Printf("%s\n", asc) - return nil - }, -} - var sealBenchCmd = &cli.Command{ Name: "sealing", Usage: "Benchmark seal and winning post and window post", diff --git a/cmd/lotus-miner/init.go b/cmd/lotus-miner/init.go index 1a4a98fc4f0..9ab4e8b05b0 100644 --- a/cmd/lotus-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -55,6 +55,7 @@ import ( "github.com/filecoin-project/lotus/storage" "github.com/filecoin-project/lotus/storage/paths" pipeline "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" "github.com/filecoin-project/lotus/storage/sealer/storiface" @@ -327,21 +328,21 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string info := &pipeline.SectorInfo{ State: pipeline.Proving, SectorNumber: sector.SectorID, - Pieces: []lapi.SectorPiece{ - { + Pieces: []pipeline.SafeSectorPiece{ + pipeline.SafePiece(lapi.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(meta.SectorSize), PieceCID: commD, }, - DealInfo: &lapi.PieceDealInfo{ + DealInfo: &piece.PieceDealInfo{ DealID: dealID, DealProposal: §or.Deal, - DealSchedule: lapi.DealSchedule{ + DealSchedule: piece.DealSchedule{ StartEpoch: sector.Deal.StartEpoch, EndEpoch: sector.Deal.EndEpoch, }, }, - }, + }), }, CommD: &commD, CommR: &commR, diff --git a/cmd/lotus-shed/fip-0036.go b/cmd/lotus-shed/fip-0036.go deleted file mode 100644 index 4c8456c04ce..00000000000 --- a/cmd/lotus-shed/fip-0036.go +++ /dev/null @@ -1,554 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "sort" - "strconv" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" - "github.com/filecoin-project/lotus/chain/actors/builtin/power" - "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/repo" -) - -type Option uint64 - -const ( - Approve Option = 49 - Reject Option = 50 -) - -type Vote struct { - ID uint64 - OptionID Option - SignerAddress address.Address -} - -type msigVote struct { - Multisig msigBriefInfo - ApproveCount uint64 - RejectCount uint64 -} - -// https://filpoll.io/poll/16 -// snapshot height: 2162760 -// state root: bafy2bzacebdnzh43hw66bmvguk65wiwr5ssaejlq44fpdei2ysfh3eefpdlqs -var fip36PollCmd = &cli.Command{ - Name: "fip36poll", - Usage: "Process the FIP0036 FilPoll result", - ArgsUsage: "[state root, votes]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "repo", - Value: "~/.lotus", - }, - }, - Subcommands: []*cli.Command{ - finalResultCmd, - }, -} - -var finalResultCmd = &cli.Command{ - Name: "results", - Usage: "get poll results", - ArgsUsage: "[state root] [height] [votes json]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "repo", - Value: "~/.lotus", - }, - }, - - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 3 { - return xerrors.New("filpoll0036 results [state root] [height] [votes.json]") - } - - ctx := context.TODO() - if !cctx.Args().Present() { - return fmt.Errorf("must pass state root") - } - - sroot, err := cid.Decode(cctx.Args().First()) - if err != nil { - return fmt.Errorf("failed to parse input: %w", err) - } - - fsrepo, err := repo.NewFS(cctx.String("repo")) - if err != nil { - return err - } - - lkrepo, err := fsrepo.Lock(repo.FullNode) - if err != nil { - return err - } - - defer lkrepo.Close() //nolint:errcheck - - bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) - if err != nil { - return fmt.Errorf("failed to open blockstore: %w", err) - } - - defer func() { - if c, ok := bs.(io.Closer); ok { - if err := c.Close(); err != nil { - log.Warnf("failed to close blockstore: %s", err) - } - } - }() - - mds, err := lkrepo.Datastore(context.Background(), "/metadata") - if err != nil { - return err - } - - cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) - defer cs.Close() //nolint:errcheck - - cst := cbor.NewCborStore(bs) - store := adt.WrapStore(ctx, cst) - - st, err := state.LoadStateTree(cst, sroot) - if err != nil { - return err - } - - height, err := strconv.Atoi(cctx.Args().Get(1)) - if err != nil { - return err - } - - //get all the votes' signer ID address && their vote - vj, err := homedir.Expand(cctx.Args().Get(2)) - if err != nil { - return xerrors.Errorf("fail to get votes json") - } - votes, err := getVotesMap(vj) - if err != nil { - return xerrors.Errorf("failed to get voters: %w\n", err) - } - - type minerBriefInfo struct { - rawBytePower abi.StoragePower - dealPower abi.StoragePower - balance abi.TokenAmount - } - - // power actor - pa, err := st.GetActor(power.Address) - if err != nil { - return xerrors.Errorf("failed to get power actor: %w\n", err) - } - - powerState, err := power.Load(store, pa) - if err != nil { - return xerrors.Errorf("failed to get power state: %w\n", err) - } - - //market actor - ma, err := st.GetActor(market.Address) - if err != nil { - return xerrors.Errorf("fail to get market actor: %w\n", err) - } - - marketState, err := market.Load(store, ma) - if err != nil { - return xerrors.Errorf("fail to load market state: %w\n", err) - } - - lookupId := func(addr address.Address) address.Address { - ret, err := st.LookupID(addr) - if err != nil { - panic(err) - } - - return ret - } - - // we need to build several pieces of information, as we traverse the state tree: - // a map of accounts to every msig that they are a signer of - accountsToMultisigs := make(map[address.Address][]address.Address) - // a map of multisigs to some info about them for quick lookup - msigActorsInfo := make(map[address.Address]msigBriefInfo) - // a map of actors (accounts+multisigs) to every miner that they are an owner of - ownerMap := make(map[address.Address][]address.Address) - // a map of accounts to every miner that they are a worker of - workerMap := make(map[address.Address][]address.Address) - // a map of miners to some info about them for quick lookup - minerActorsInfo := make(map[address.Address]minerBriefInfo) - // a map of client addresses to deal data stored in proposals - clientToDealStorage := make(map[address.Address]abi.StoragePower) - - fmt.Println("iterating over all actors") - count := 0 - err = st.ForEach(func(addr address.Address, act *types.Actor) error { - if count%200000 == 0 { - fmt.Println("processed ", count, " actors building maps") - } - count++ - if builtin.IsMultisigActor(act.Code) { - ms, err := multisig.Load(store, act) - if err != nil { - return fmt.Errorf("load msig failed %v", err) - - } - - // TODO: Confirm that these are always ID addresses - signers, err := ms.Signers() - if err != nil { - return xerrors.Errorf("fail to get msig signers: %w", err) - } - for _, s := range signers { - signerId := lookupId(s) - accountsToMultisigs[signerId] = append(accountsToMultisigs[signerId], addr) - } - - locked, err := ms.LockedBalance(abi.ChainEpoch(height)) - if err != nil { - return xerrors.Errorf("failed to compute locked multisig balance: %w", err) - } - - threshold, _ := ms.Threshold() - info := msigBriefInfo{ - ID: addr, - Signer: signers, - Balance: big.Max(big.Zero(), types.BigSub(act.Balance, locked)), - Threshold: threshold, - } - msigActorsInfo[addr] = info - } - - if builtin.IsStorageMinerActor(act.Code) { - m, err := miner.Load(store, act) - if err != nil { - return xerrors.Errorf("fail to load miner actor: %w", err) - } - - info, err := m.Info() - if err != nil { - return xerrors.Errorf("fail to get miner info: %w\n", err) - } - - ownerId := lookupId(info.Owner) - ownerMap[ownerId] = append(ownerMap[ownerId], addr) - - workerId := lookupId(info.Worker) - workerMap[workerId] = append(workerMap[workerId], addr) - - lockedFunds, err := m.LockedFunds() - if err != nil { - return err - } - - bal := big.Sub(act.Balance, lockedFunds.TotalLockedFunds()) - bal = big.Max(big.Zero(), bal) - - pow, ok, err := powerState.MinerPower(addr) - if err != nil { - return err - } - - if !ok { - pow.RawBytePower = big.Zero() - } - - minerActorsInfo[addr] = minerBriefInfo{ - rawBytePower: pow.RawBytePower, - // gets added up outside this loop - dealPower: big.Zero(), - balance: bal, - } - } - - return nil - }) - - if err != nil { - return err - } - - fmt.Println("iterating over proposals") - dealProposals, err := marketState.Proposals() - if err != nil { - return err - } - - dealStates, err := marketState.States() - if err != nil { - return err - } - - if err := dealProposals.ForEach(func(dealID abi.DealID, d market.DealProposal) error { - - dealState, ok, err := dealStates.Get(dealID) - if err != nil { - return err - } - if !ok || dealState.SectorStartEpoch == -1 { - // effectively a continue - return nil - } - - clientId := lookupId(d.Client) - if cd, found := clientToDealStorage[clientId]; found { - clientToDealStorage[clientId] = big.Add(cd, big.NewInt(int64(d.PieceSize))) - } else { - clientToDealStorage[clientId] = big.NewInt(int64(d.PieceSize)) - } - - providerId := lookupId(d.Provider) - mai, found := minerActorsInfo[providerId] - - if !found { - return xerrors.Errorf("didn't find miner %s", providerId) - } - - mai.dealPower = big.Add(mai.dealPower, big.NewInt(int64(d.PieceSize))) - minerActorsInfo[providerId] = mai - return nil - }); err != nil { - return xerrors.Errorf("fail to get deals") - } - - // now tabulate votes - - approveBalance := abi.NewTokenAmount(0) - rejectionBalance := abi.NewTokenAmount(0) - clientApproveBytes := big.Zero() - clientRejectBytes := big.Zero() - msigPendingVotes := make(map[address.Address]msigVote) //map[msig ID]msigVote - msigVotes := make(map[address.Address]Option) - minerVotes := make(map[address.Address]Option) - fmt.Println("counting account and multisig votes") - for _, vote := range votes { - signerId, err := st.LookupID(vote.SignerAddress) - if err != nil { - fmt.Println("voter ", vote.SignerAddress, " not found in state tree, skipping") - continue - } - - //process votes for regular accounts - accountActor, err := st.GetActor(signerId) - if err != nil { - return xerrors.Errorf("fail to get account account for signer: %w\n", err) - } - - clientBytes, ok := clientToDealStorage[signerId] - if !ok { - clientBytes = big.Zero() - } - - if vote.OptionID == Approve { - approveBalance = types.BigAdd(approveBalance, accountActor.Balance) - clientApproveBytes = big.Add(clientApproveBytes, clientBytes) - } else { - rejectionBalance = types.BigAdd(rejectionBalance, accountActor.Balance) - clientRejectBytes = big.Add(clientRejectBytes, clientBytes) - } - - if minerInfos, found := ownerMap[signerId]; found { - for _, minerInfo := range minerInfos { - minerVotes[minerInfo] = vote.OptionID - } - } - if minerInfos, found := workerMap[signerId]; found { - for _, minerInfo := range minerInfos { - if _, ok := minerVotes[minerInfo]; !ok { - minerVotes[minerInfo] = vote.OptionID - } - } - } - - //process msigs - // There is a possibility that enough signers have voted for BOTH options in the poll to be above the threshold - // Because we are iterating over votes in order they arrived, the first option to go over the threshold will win - // This is in line with onchain behaviour (consider a case where signers are competing to withdraw all the funds - // in an msig into 2 different accounts) - if mss, found := accountsToMultisigs[signerId]; found { - for _, ms := range mss { //get all the msig signer has - if _, ok := msigVotes[ms]; ok { - // msig has already voted, skip - continue - } - if mpv, found := msigPendingVotes[ms]; found { //other signers of the multisig have voted, yet the threshold has not met - if vote.OptionID == Approve { - if mpv.ApproveCount+1 == mpv.Multisig.Threshold { //met threshold - approveBalance = types.BigAdd(approveBalance, mpv.Multisig.Balance) - delete(msigPendingVotes, ms) //threshold, can skip later signer votes - msigVotes[ms] = vote.OptionID - - } else { - mpv.ApproveCount++ - msigPendingVotes[ms] = mpv - } - } else { - if mpv.RejectCount+1 == mpv.Multisig.Threshold { //met threshold - rejectionBalance = types.BigAdd(rejectionBalance, mpv.Multisig.Balance) - delete(msigPendingVotes, ms) //threshold, can skip later signer votes - msigVotes[ms] = vote.OptionID - - } else { - mpv.RejectCount++ - msigPendingVotes[ms] = mpv - } - } - } else { //first vote received from one of the signers of the msig - msi, ok := msigActorsInfo[ms] - if !ok { - return xerrors.Errorf("didn't find msig %s in msig map", ms) - } - - if msi.Threshold == 1 { //met threshold with this signer's single vote - if vote.OptionID == Approve { - approveBalance = types.BigAdd(approveBalance, msi.Balance) - msigVotes[ms] = Approve - - } else { - rejectionBalance = types.BigAdd(rejectionBalance, msi.Balance) - msigVotes[ms] = Reject - } - } else { //threshold not met, add to pending vote - if vote.OptionID == Approve { - msigPendingVotes[ms] = msigVote{ - Multisig: msi, - ApproveCount: 1, - } - } else { - msigPendingVotes[ms] = msigVote{ - Multisig: msi, - RejectCount: 1, - } - } - } - } - } - } - } - - for s, v := range msigVotes { - if minerInfos, found := ownerMap[s]; found { - for _, minerInfo := range minerInfos { - minerVotes[minerInfo] = v - } - } - if minerInfos, found := workerMap[s]; found { - for _, minerInfo := range minerInfos { - if _, ok := minerVotes[minerInfo]; !ok { - minerVotes[minerInfo] = v - } - } - } - } - - approveRBP := big.Zero() - approveDealPower := big.Zero() - rejectionRBP := big.Zero() - rejectionDealPower := big.Zero() - fmt.Println("adding up miner votes") - for minerAddr, vote := range minerVotes { - mbi, ok := minerActorsInfo[minerAddr] - if !ok { - return xerrors.Errorf("failed to find miner info for %s", minerAddr) - } - - if vote == Approve { - approveBalance = big.Add(approveBalance, mbi.balance) - approveRBP = big.Add(approveRBP, mbi.rawBytePower) - approveDealPower = big.Add(approveDealPower, mbi.dealPower) - } else { - rejectionBalance = big.Add(rejectionBalance, mbi.balance) - rejectionRBP = big.Add(rejectionRBP, mbi.rawBytePower) - rejectionDealPower = big.Add(rejectionDealPower, mbi.dealPower) - } - } - - fmt.Println("Total acceptance token: ", approveBalance) - fmt.Println("Total rejection token: ", rejectionBalance) - - fmt.Println("Total acceptance SP deal power: ", approveDealPower) - fmt.Println("Total rejection SP deal power: ", rejectionDealPower) - - fmt.Println("Total acceptance SP rb power: ", approveRBP) - fmt.Println("Total rejection SP rb power: ", rejectionRBP) - - fmt.Println("Total acceptance Client rb power: ", clientApproveBytes) - fmt.Println("Total rejection Client rb power: ", clientRejectBytes) - - fmt.Println("\n\nFinal results **drumroll**") - if rejectionBalance.GreaterThanEqual(big.Mul(approveBalance, big.NewInt(3))) { - fmt.Println("token holders VETO FIP-0036!") - } else if approveBalance.LessThanEqual(rejectionBalance) { - fmt.Println("token holders REJECT FIP-0036") - } else { - fmt.Println("token holders ACCEPT FIP-0036") - } - - if rejectionDealPower.GreaterThanEqual(big.Mul(approveDealPower, big.NewInt(3))) { - fmt.Println("SPs by deal data stored VETO FIP-0036!") - } else if approveDealPower.LessThanEqual(rejectionDealPower) { - fmt.Println("SPs by deal data stored REJECT FIP-0036") - } else { - fmt.Println("SPs by deal data stored ACCEPT FIP-0036") - } - - if rejectionRBP.GreaterThanEqual(big.Mul(approveRBP, big.NewInt(3))) { - fmt.Println("SPs by total raw byte power VETO FIP-0036!") - } else if approveRBP.LessThanEqual(rejectionRBP) { - fmt.Println("SPs by total raw byte power REJECT FIP-0036") - } else { - fmt.Println("SPs by total raw byte power ACCEPT FIP-0036") - } - - if clientRejectBytes.GreaterThanEqual(big.Mul(clientApproveBytes, big.NewInt(3))) { - fmt.Println("Storage Clients VETO FIP-0036!") - } else if clientApproveBytes.LessThanEqual(clientRejectBytes) { - fmt.Println("Storage Clients REJECT FIP-0036") - } else { - fmt.Println("Storage Clients ACCEPT FIP-0036") - } - - return nil - }, -} - -// Returns voted sorted by votes from earliest to latest -func getVotesMap(file string) ([]Vote, error) { - var votes []Vote - vb, err := os.ReadFile(file) - if err != nil { - return nil, xerrors.Errorf("read vote: %w", err) - } - - if err := json.Unmarshal(vb, &votes); err != nil { - return nil, xerrors.Errorf("unmarshal vote: %w", err) - } - - sort.SliceStable(votes, func(i, j int) bool { - return votes[i].ID < votes[j].ID - }) - - return votes, nil -} diff --git a/cmd/lotus-shed/indexes.go b/cmd/lotus-shed/indexes.go index be7d43e0513..620933e25f8 100644 --- a/cmd/lotus-shed/indexes.go +++ b/cmd/lotus-shed/indexes.go @@ -9,13 +9,11 @@ import ( "strings" "github.com/mitchellh/go-homedir" - "github.com/multiformats/go-varint" "github.com/urfave/cli/v2" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - builtintypes "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" @@ -109,6 +107,7 @@ var backfillEventsCmd = &cli.Command{ addressLookups := make(map[abi.ActorID]address.Address) + // TODO: We don't need this address resolution anymore once https://github.com/filecoin-project/lotus/issues/11594 lands resolveFn := func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { // we only want to match using f4 addresses idAddr, err := address.NewIDAddress(uint64(emitter)) @@ -118,18 +117,9 @@ var backfillEventsCmd = &cli.Command{ actor, err := api.StateGetActor(ctx, idAddr, ts.Key()) if err != nil || actor.Address == nil { - return address.Undef, false - } - - // if robust address is not f4 then we won't match against it so bail early - if actor.Address.Protocol() != address.Delegated { - return address.Undef, false + return idAddr, true } - // we have an f4 address, make sure it's assigned by the EAM - if namespace, _, err := varint.FromUvarint(actor.Address.Payload()); err != nil || namespace != builtintypes.EthereumAddressManagerActorID { - return address.Undef, false - } return *actor.Address, true } diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index d5a2ad2cdfd..e9f9f3b6bd1 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -84,7 +84,6 @@ func main() { diffCmd, itestdCmd, msigCmd, - fip36PollCmd, invariantsCmd, gasTraceCmd, replayOfflineCmd, diff --git a/cmd/lotus-shed/market.go b/cmd/lotus-shed/market.go index 4436e3c404a..6fb1566b671 100644 --- a/cmd/lotus-shed/market.go +++ b/cmd/lotus-shed/market.go @@ -387,7 +387,7 @@ var marketDealsTotalStorageCmd = &cli.Command{ count := 0 for _, deal := range deals { - if market.IsDealActive(deal.State) { + if market.IsDealActive(deal.State.Iface()) { dealStorage := big.NewIntUnsigned(uint64(deal.Proposal.PieceSize)) total = big.Add(total, dealStorage) count++ diff --git a/cmd/lotus-shed/shedgen/cbor_gen.go b/cmd/lotus-shed/shedgen/cbor_gen.go index f2a79fe7dce..10b41827ffd 100644 --- a/cmd/lotus-shed/shedgen/cbor_gen.go +++ b/cmd/lotus-shed/shedgen/cbor_gen.go @@ -31,7 +31,7 @@ func (t *CarbNode) MarshalCBOR(w io.Writer) error { } // t.Sub ([]cid.Cid) (slice) - if len("Sub") > cbg.MaxLength { + if len("Sub") > 8192 { return xerrors.Errorf("Value in field \"Sub\" was too long") } @@ -42,7 +42,7 @@ func (t *CarbNode) MarshalCBOR(w io.Writer) error { return err } - if len(t.Sub) > cbg.MaxLength { + if len(t.Sub) > 8192 { return xerrors.Errorf("Slice value in field t.Sub was too long") } @@ -88,7 +88,7 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -105,7 +105,7 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Sub: array too large (%d)", extra) } @@ -136,6 +136,7 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) (err error) { t.Sub[i] = c } + } } diff --git a/conformance/chaos/cbor_gen.go b/conformance/chaos/cbor_gen.go index d74ae0946a1..09d48ad103d 100644 --- a/conformance/chaos/cbor_gen.go +++ b/conformance/chaos/cbor_gen.go @@ -37,7 +37,7 @@ func (t *State) MarshalCBOR(w io.Writer) error { } // t.Value (string) (string) - if len(t.Value) > cbg.MaxLength { + if len(t.Value) > 8192 { return xerrors.Errorf("Value in field t.Value was too long") } @@ -49,7 +49,7 @@ func (t *State) MarshalCBOR(w io.Writer) error { } // t.Unmarshallable ([]*chaos.UnmarshallableCBOR) (slice) - if len(t.Unmarshallable) > cbg.MaxLength { + if len(t.Unmarshallable) > 8192 { return xerrors.Errorf("Slice value in field t.Unmarshallable was too long") } @@ -60,6 +60,7 @@ func (t *State) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -90,7 +91,7 @@ func (t *State) UnmarshalCBOR(r io.Reader) (err error) { // t.Value (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -104,7 +105,7 @@ func (t *State) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Unmarshallable: array too large (%d)", extra) } @@ -142,9 +143,9 @@ func (t *State) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -174,7 +175,7 @@ func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error { } // t.Addrs ([]address.Address) (slice) - if len(t.Addrs) > cbg.MaxLength { + if len(t.Addrs) > 8192 { return xerrors.Errorf("Slice value in field t.Addrs was too long") } @@ -185,10 +186,11 @@ func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Types ([]cid.Cid) (slice) - if len(t.Types) > cbg.MaxLength { + if len(t.Types) > 8192 { return xerrors.Errorf("Slice value in field t.Types was too long") } @@ -231,10 +233,10 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { // t.Branch (chaos.CallerValidationBranch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -260,7 +262,7 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Addrs: array too large (%d)", extra) } @@ -288,9 +290,9 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Types ([]cid.Cid) (slice) maj, extra, err = cr.ReadHeader() @@ -298,7 +300,7 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Types: array too large (%d)", extra) } @@ -329,9 +331,9 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { t.Types[i] = c } + } } - return nil } @@ -562,7 +564,7 @@ func (t *SendArgs) MarshalCBOR(w io.Writer) error { } // t.Params ([]uint8) (slice) - if len(t.Params) > cbg.ByteArrayMaxLen { + if len(t.Params) > 2097152 { return xerrors.Errorf("Byte array in field t.Params was too long") } @@ -570,9 +572,10 @@ func (t *SendArgs) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Params[:]); err != nil { + if _, err := cw.Write(t.Params); err != nil { return err } + return nil } @@ -638,7 +641,7 @@ func (t *SendArgs) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Params: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -649,9 +652,10 @@ func (t *SendArgs) UnmarshalCBOR(r io.Reader) (err error) { t.Params = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + if _, err := io.ReadFull(cr, t.Params); err != nil { return err } + return nil } @@ -670,7 +674,7 @@ func (t *SendReturn) MarshalCBOR(w io.Writer) error { } // t.Return (builtin.CBORBytes) (slice) - if len(t.Return) > cbg.ByteArrayMaxLen { + if len(t.Return) > 2097152 { return xerrors.Errorf("Byte array in field t.Return was too long") } @@ -678,7 +682,7 @@ func (t *SendReturn) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Return[:]); err != nil { + if _, err := cw.Write(t.Return); err != nil { return err } @@ -692,6 +696,7 @@ func (t *SendReturn) MarshalCBOR(w io.Writer) error { return err } } + return nil } @@ -725,7 +730,7 @@ func (t *SendReturn) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Return: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -736,16 +741,17 @@ func (t *SendReturn) UnmarshalCBOR(r io.Reader) (err error) { t.Return = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Return[:]); err != nil { + if _, err := io.ReadFull(cr, t.Return); err != nil { return err } + // t.Code (exitcode.ExitCode) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -782,7 +788,7 @@ func (t *MutateStateArgs) MarshalCBOR(w io.Writer) error { } // t.Value (string) (string) - if len(t.Value) > cbg.MaxLength { + if len(t.Value) > 8192 { return xerrors.Errorf("Value in field t.Value was too long") } @@ -803,6 +809,7 @@ func (t *MutateStateArgs) MarshalCBOR(w io.Writer) error { return err } } + return nil } @@ -832,7 +839,7 @@ func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) (err error) { // t.Value (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -842,10 +849,10 @@ func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) (err error) { // t.Branch (chaos.MutateStateBranch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -893,7 +900,7 @@ func (t *AbortWithArgs) MarshalCBOR(w io.Writer) error { } // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { + if len(t.Message) > 8192 { return xerrors.Errorf("Value in field t.Message was too long") } @@ -937,10 +944,10 @@ func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) (err error) { // t.Code (exitcode.ExitCode) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -962,7 +969,7 @@ func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) (err error) { // t.Message (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1094,10 +1101,10 @@ func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) (err error) { // t.CurrEpoch (abi.ChainEpoch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 57070caedf5..b133930bc7c 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -475,7 +475,7 @@ Inputs: ], "Bw==", 10101, - 21 + 22 ] ``` @@ -826,8 +826,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ] @@ -1426,8 +1425,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ] @@ -2910,6 +2908,14 @@ Inputs: "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } ] @@ -3212,6 +3218,14 @@ Inputs: "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } } @@ -3556,6 +3570,14 @@ Response: "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } } diff --git a/documentation/en/api-v0-methods-provider.md b/documentation/en/api-v0-methods-provider.md index 43846035a0d..a1ccc17c24b 100644 --- a/documentation/en/api-v0-methods-provider.md +++ b/documentation/en/api-v0-methods-provider.md @@ -69,6 +69,14 @@ Inputs: "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true }, 9, diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 41119033925..eedfd227e93 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -170,6 +170,8 @@ * [StateDealProviderCollateralBounds](#StateDealProviderCollateralBounds) * [StateDecodeParams](#StateDecodeParams) * [StateGetActor](#StateGetActor) + * [StateGetAllAllocations](#StateGetAllAllocations) + * [StateGetAllClaims](#StateGetAllClaims) * [StateGetAllocation](#StateGetAllocation) * [StateGetAllocationForPendingDeal](#StateGetAllocationForPendingDeal) * [StateGetAllocations](#StateGetAllocations) @@ -4732,7 +4734,7 @@ Perms: read Inputs: ```json [ - 21 + 22 ] ``` @@ -4747,7 +4749,7 @@ Perms: read Inputs: ```json [ - 21 + 22 ] ``` @@ -5313,6 +5315,50 @@ Response: } ``` +### StateGetAllAllocations +StateGetAllAllocations returns the all the allocations available in verified registry actor. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### StateGetAllClaims +StateGetAllClaims returns the all the claims available in verified registry actor. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + ### StateGetAllocation StateGetAllocation returns the allocation for a given address and allocation ID. @@ -5513,7 +5559,9 @@ Response: "UpgradeHyggeHeight": 10101, "UpgradeLightningHeight": 10101, "UpgradeThunderHeight": 10101, - "UpgradeWatermelonHeight": 10101 + "UpgradeWatermelonHeight": 10101, + "UpgradeDragonHeight": 10101, + "UpgradePhoenixHeight": 10101 }, "Eip155ChainID": 123 } @@ -5794,8 +5842,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } } @@ -5873,8 +5920,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ``` @@ -6439,7 +6485,7 @@ Inputs: ] ``` -Response: `21` +Response: `22` ### StateReadState StateReadState returns the indicated actor's state. diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index c65c102d39b..287cfce5374 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -115,6 +115,8 @@ * [GasEstimateGasLimit](#GasEstimateGasLimit) * [GasEstimateGasPremium](#GasEstimateGasPremium) * [GasEstimateMessageGas](#GasEstimateMessageGas) +* [Get](#Get) + * [GetActorEvents](#GetActorEvents) * [I](#I) * [ID](#ID) * [Log](#Log) @@ -227,8 +229,11 @@ * [StateDecodeParams](#StateDecodeParams) * [StateEncodeParams](#StateEncodeParams) * [StateGetActor](#StateGetActor) + * [StateGetAllAllocations](#StateGetAllAllocations) + * [StateGetAllClaims](#StateGetAllClaims) * [StateGetAllocation](#StateGetAllocation) * [StateGetAllocationForPendingDeal](#StateGetAllocationForPendingDeal) + * [StateGetAllocationIdForPendingDeal](#StateGetAllocationIdForPendingDeal) * [StateGetAllocations](#StateGetAllocations) * [StateGetBeaconEntry](#StateGetBeaconEntry) * [StateGetClaim](#StateGetClaim) @@ -276,6 +281,8 @@ * [StateVerifiedRegistryRootKey](#StateVerifiedRegistryRootKey) * [StateVerifierStatus](#StateVerifierStatus) * [StateWaitMsg](#StateWaitMsg) +* [Subscribe](#Subscribe) + * [SubscribeActorEvents](#SubscribeActorEvents) * [Sync](#Sync) * [SyncCheckBad](#SyncCheckBad) * [SyncCheckpoint](#SyncCheckpoint) @@ -3376,6 +3383,72 @@ Response: } ``` +## Get + + +### GetActorEvents +GetActorEvents returns all user-programmed and built-in actor events that match the given +filter. +This is a request/response API. +Results available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange +configuration options and also the amount of historical data available in the node. + +This is an EXPERIMENTAL API and may be subject to change. + + +Perms: read + +Inputs: +```json +[ + { + "addresses": [ + "f01234" + ], + "fields": { + "abc": [ + { + "codec": 81, + "value": "ZGRhdGE=" + } + ] + }, + "fromHeight": 1010, + "toHeight": 1020 + } +] +``` + +Response: +```json +[ + { + "entries": [ + { + "Flags": 7, + "Key": "string value", + "Codec": 42, + "Value": "Ynl0ZSBhcnJheQ==" + } + ], + "emitter": "f01234", + "reverted": true, + "height": 10101, + "tipsetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "msgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } +] +``` + ## I @@ -6225,7 +6298,7 @@ Perms: read Inputs: ```json [ - 21 + 22 ] ``` @@ -6240,7 +6313,7 @@ Perms: read Inputs: ```json [ - 21 + 22 ] ``` @@ -6857,6 +6930,50 @@ Response: } ``` +### StateGetAllAllocations +StateGetAllAllocations returns the all the allocations available in verified registry actor. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### StateGetAllClaims +StateGetAllClaims returns the all the claims available in verified registry actor. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + ### StateGetAllocation StateGetAllocation returns the allocation for a given address and allocation ID. @@ -6931,6 +7048,29 @@ Response: } ``` +### StateGetAllocationIdForPendingDeal +StateGetAllocationIdForPendingDeal is like StateGetAllocationForPendingDeal except it returns the allocation ID + + +Perms: read + +Inputs: +```json +[ + 5432, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `0` + ### StateGetAllocations StateGetAllocations returns the all the allocations for a given client. @@ -7081,7 +7221,9 @@ Response: "UpgradeHyggeHeight": 10101, "UpgradeLightningHeight": 10101, "UpgradeThunderHeight": 10101, - "UpgradeWatermelonHeight": 10101 + "UpgradeWatermelonHeight": 10101, + "UpgradeDragonHeight": 10101, + "UpgradePhoenixHeight": 10101 }, "Eip155ChainID": 123 } @@ -7388,8 +7530,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } } @@ -7467,8 +7608,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ``` @@ -8061,7 +8201,7 @@ Inputs: ] ``` -Response: `21` +Response: `22` ### StateReadState StateReadState returns the indicated actor's state. @@ -8658,6 +8798,77 @@ Response: } ``` +## Subscribe + + +### SubscribeActorEvents +SubscribeActorEvents returns a long-lived stream of all user-programmed and built-in actor +events that match the given filter. +Events that match the given filter are written to the stream in real-time as they are emitted +from the FVM. +The response stream is closed when the client disconnects, when a ToHeight is specified and is +reached, or if there is an error while writing an event to the stream. +This API also allows clients to read all historical events matching the given filter before any +real-time events are written to the response stream if the filter specifies an earlier +FromHeight. +Results available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange +configuration options and also the amount of historical data available in the node. + +Note: this API is only available via websocket connections. +This is an EXPERIMENTAL API and may be subject to change. + + +Perms: read + +Inputs: +```json +[ + { + "addresses": [ + "f01234" + ], + "fields": { + "abc": [ + { + "codec": 81, + "value": "ZGRhdGE=" + } + ] + }, + "fromHeight": 1010, + "toHeight": 1020 + } +] +``` + +Response: +```json +{ + "entries": [ + { + "Flags": 7, + "Key": "string value", + "Codec": 42, + "Value": "Ynl0ZSBhcnJheQ==" + } + ], + "emitter": "f01234", + "reverted": true, + "height": 10101, + "tipsetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "msgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +} +``` + ## Sync The Sync method group contains methods for interacting with and observing the lotus sync service. diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 2ea89e6ce2d..0f670427546 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.25.3-dev + 1.27.0-dev COMMANDS: init Initialize a lotus miner repo diff --git a/documentation/en/cli-lotus-provider.md b/documentation/en/cli-lotus-provider.md index 984d1a2d5ab..be3b5ec6ec0 100644 --- a/documentation/en/cli-lotus-provider.md +++ b/documentation/en/cli-lotus-provider.md @@ -7,7 +7,7 @@ USAGE: lotus-provider [global options] command [command options] [arguments...] VERSION: - 1.25.3-dev + 1.27.0-dev COMMANDS: cli Execute cli commands diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index 0e0fee1570e..043c16202cd 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.25.3-dev + 1.27.0-dev COMMANDS: run Start lotus worker diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index ff62980dc31..65dd92f0125 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.25.3-dev + 1.27.0-dev COMMANDS: daemon Start a lotus daemon process @@ -1188,8 +1188,8 @@ COMMANDS: check-client-datacap check verified client remaining bytes check-notary-datacap check a notary's remaining bytes sign-remove-data-cap-proposal allows a notary to sign a Remove Data Cap Proposal - list-allocations List allocations made by client - list-claims List claims made by provider + list-allocations List allocations available in verified registry actor or made by a client if specified + list-claims List claims available in verified registry actor or made by provider if specified remove-expired-allocations remove expired allocations (if no allocations are specified all eligible allocations are removed) remove-expired-claims remove expired claims (if no claims are specified all eligible claims are removed) help, h Shows a list of commands or help for one command @@ -1275,20 +1275,21 @@ OPTIONS: ### lotus filplus list-allocations ``` NAME: - lotus filplus list-allocations - List allocations made by client + lotus filplus list-allocations - List allocations available in verified registry actor or made by a client if specified USAGE: lotus filplus list-allocations [command options] clientAddress OPTIONS: --expired list only expired allocations (default: false) + --json output results in json format (default: false) --help, -h show help ``` ### lotus filplus list-claims ``` NAME: - lotus filplus list-claims - List claims made by provider + lotus filplus list-claims - List claims available in verified registry actor or made by provider if specified USAGE: lotus filplus list-claims [command options] providerAddress diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 9f9836bc0cd..420c192bd4f 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -276,9 +276,8 @@ #EthTxHashMappingLifetimeDays = 0 [Fevm.Events] - # EnableEthRPC enables APIs that # DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. - # The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag. + # The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag. # # type: bool # env var: LOTUS_FEVM_EVENTS_DISABLEREALTIMEFILTERAPI @@ -286,7 +285,7 @@ # DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events # that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - # The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag. + # The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag. # # type: bool # env var: LOTUS_FEVM_EVENTS_DISABLEHISTORICFILTERAPI @@ -328,6 +327,17 @@ #DatabasePath = "" +[Events] + # EnableActorEventsAPI enables the Actor events API that enables clients to consume events + # emitted by (smart contracts + built-in Actors). + # This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be + # disabled by setting their respective Disable* options in Fevm.Events. + # + # type: bool + # env var: LOTUS_EVENTS_ENABLEACTOREVENTSAPI + #EnableActorEventsAPI = false + + [Index] # EXPERIMENTAL FEATURE. USE WITH CAUTION # EnableMsgIndex enables indexing of messages on chain. diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index a65e82e9504..17fd24fa370 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -702,6 +702,30 @@ # env var: LOTUS_SEALING_USESYNTHETICPOREP #UseSyntheticPoRep = false + # Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIREACTIVATIONSUCCESS + #RequireActivationSuccess = false + + # Whether to abort if any piece activation notification returns a non-zero exit code (newly sealed sectors, only with ProveCommitSectors3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIREACTIVATIONSUCCESSUPDATE + #RequireActivationSuccessUpdate = false + + # Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIRENOTIFICATIONSUCCESS + #RequireNotificationSuccess = false + + # Whether to abort if any piece activation notification returns a non-zero exit code (updating sectors, only with ProveReplicaUpdates3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIRENOTIFICATIONSUCCESSUPDATE + #RequireNotificationSuccessUpdate = false + [Storage] # type: int diff --git a/gateway/node.go b/gateway/node.go index f2464d27496..f3ecb764092 100644 --- a/gateway/node.go +++ b/gateway/node.go @@ -146,6 +146,9 @@ type TargetAPI interface { Web3ClientVersion(ctx context.Context) (string, error) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) + + GetActorEvents(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) + SubscribeActorEvents(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) } var _ TargetAPI = *new(api.FullNode) // gateway depends on latest diff --git a/gateway/proxy_fil.go b/gateway/proxy_fil.go index eb8a354edc5..e7ad3bdb49e 100644 --- a/gateway/proxy_fil.go +++ b/gateway/proxy_fil.go @@ -437,6 +437,20 @@ func (gw *Node) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64 return gw.target.StateWaitMsg(ctx, msg, confidence, limit, allowReplaced) } +func (gw *Node) GetActorEvents(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) { + if err := gw.limit(ctx, stateRateLimitTokens); err != nil { + return nil, err + } + return gw.target.GetActorEvents(ctx, filter) +} + +func (gw *Node) SubscribeActorEvents(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + if err := gw.limit(ctx, stateRateLimitTokens); err != nil { + return nil, err + } + return gw.target.SubscribeActorEvents(ctx, filter) +} + func (gw *Node) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) { if err := gw.limit(ctx, stateRateLimitTokens); err != nil { return nil, err diff --git a/gen/inlinegen-data.json b/gen/inlinegen-data.json index cf72d24fa9c..70c8fff61f4 100644 --- a/gen/inlinegen-data.json +++ b/gen/inlinegen-data.json @@ -1,7 +1,7 @@ { - "actorVersions": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], - "latestActorsVersion": 12, + "actorVersions": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + "latestActorsVersion": 13, - "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], - "latestNetworkVersion": 21 + "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], + "latestNetworkVersion": 22 } diff --git a/gen/main.go b/gen/main.go index d84343739ea..f1fe3876ac0 100644 --- a/gen/main.go +++ b/gen/main.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/cmd/lotus-shed/shedgen" "github.com/filecoin-project/lotus/node/hello" "github.com/filecoin-project/lotus/paychmgr" + "github.com/filecoin-project/lotus/storage/pipeline/piece" sectorstorage "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -64,9 +65,7 @@ func main() { api.SealedRefs{}, api.SealTicket{}, api.SealSeed{}, - api.PieceDealInfo{}, api.SectorPiece{}, - api.DealSchedule{}, ) if err != nil { fmt.Println(err) @@ -111,6 +110,15 @@ func main() { os.Exit(1) } + err = gen.WriteMapEncodersToFile("./storage/pipeline/piece/cbor_gen.go", "piece", + piece.PieceDealInfo{}, + piece.DealSchedule{}, + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = gen.WriteMapEncodersToFile("./storage/sealer/cbor_gen.go", "sealer", sectorstorage.Call{}, sectorstorage.WorkState{}, diff --git a/go.mod b/go.mod index 583a1760e78..b8f6c38f2e3 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ retract v1.20.2 // Wrongfully cherry picked PR, use v1.20.2+ instead. require ( contrib.go.opencensus.io/exporter/prometheus v0.4.2 - github.com/BurntSushi/toml v1.2.1 + github.com/BurntSushi/toml v1.3.0 github.com/DataDog/zstd v1.4.5 github.com/GeertJohan/go.rice v1.0.3 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee @@ -23,14 +23,14 @@ require ( github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/dgraph-io/badger/v2 v2.2007.4 github.com/docker/go-units v0.5.0 - github.com/drand/drand v1.4.9 - github.com/drand/kyber v1.1.15 + github.com/drand/drand v1.5.7 + github.com/drand/kyber v1.2.0 github.com/dustin/go-humanize v1.0.1 github.com/elastic/go-elasticsearch/v7 v7.14.0 github.com/elastic/go-sysinfo v1.7.0 github.com/elastic/gosigar v0.14.2 github.com/etclabscore/go-openrpc-reflect v0.0.36 - github.com/fatih/color v1.13.0 + github.com/fatih/color v1.15.0 github.com/filecoin-project/dagstore v0.5.2 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20220519234331-bfd1f5f9fe38 github.com/filecoin-project/go-address v1.1.0 @@ -47,7 +47,7 @@ require ( github.com/filecoin-project/go-jsonrpc v0.3.1 github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-paramfetch v0.0.4 - github.com/filecoin-project/go-state-types v0.12.8 + github.com/filecoin-project/go-state-types v0.13.0-rc.2 github.com/filecoin-project/go-statemachine v1.0.3 github.com/filecoin-project/go-statestore v0.2.0 github.com/filecoin-project/go-storedcounter v0.1.0 @@ -90,7 +90,6 @@ require ( github.com/ipfs/go-fs-lock v0.0.7 github.com/ipfs/go-graphsync v0.14.6 github.com/ipfs/go-ipfs-blocksutil v0.0.1 - github.com/ipfs/go-ipfs-exchange-offline v0.3.0 github.com/ipfs/go-ipld-cbor v0.0.6 github.com/ipfs/go-ipld-format v0.5.0 github.com/ipfs/go-log/v2 v2.5.1 @@ -122,7 +121,7 @@ require ( github.com/minio/sha256-simd v1.0.1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.1.0 - github.com/multiformats/go-multiaddr v0.12.1 + github.com/multiformats/go-multiaddr v0.12.2 github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.0 @@ -141,7 +140,7 @@ require ( github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed github.com/urfave/cli/v2 v2.25.5 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba - github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f + github.com/whyrusleeping/cbor-gen v0.1.0 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/xeipuuv/gojsonschema v1.2.0 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 @@ -156,11 +155,11 @@ require ( go.uber.org/fx v1.20.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.25.0 - golang.org/x/crypto v0.17.0 + golang.org/x/crypto v0.18.0 golang.org/x/net v0.14.0 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.15.0 - golang.org/x/term v0.15.0 + golang.org/x/sys v0.16.0 + golang.org/x/term v0.16.0 golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 @@ -188,9 +187,9 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect - github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/drand/kyber-bls12381 v0.2.3 // indirect + github.com/drand/kyber-bls12381 v0.3.1 // indirect github.com/elastic/go-windows v1.0.0 // indirect github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect @@ -201,10 +200,9 @@ require ( github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gdamore/encoding v1.0.0 // indirect github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.5 // indirect @@ -214,7 +212,7 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.1.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect @@ -256,7 +254,7 @@ require ( github.com/jpillora/backoff v1.0.0 // indirect github.com/kilic/bls12-381 v0.1.0 // indirect github.com/klauspost/compress v1.16.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect @@ -290,7 +288,7 @@ require ( github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/quic-go/qpack v0.4.0 // indirect @@ -301,7 +299,7 @@ require ( github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v2.18.12+incompatible // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.2 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/tidwall/gjson v1.14.4 // indirect github.com/twmb/murmur3 v1.1.6 // indirect @@ -324,7 +322,7 @@ require ( golang.org/x/mod v0.12.0 // indirect golang.org/x/text v0.14.0 // indirect gonum.org/v1/gonum v0.13.0 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect google.golang.org/grpc v1.55.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 566ce37c634..3efa6b8e2c9 100644 --- a/go.sum +++ b/go.sum @@ -46,8 +46,8 @@ github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOv github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.0 h1:Ws8e5YmnrGEHzZEzg0YvK/7COGYtTC5PbaH9oSSbgfA= +github.com/BurntSushi/toml v1.3.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= @@ -96,6 +96,8 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8V github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/ardanlabs/darwin/v2 v2.0.0 h1:XCisQMgQ5EG+ZvSEcADEo+pyfIMKyWAGnn5o2TgriYE= +github.com/ardanlabs/darwin/v2 v2.0.0/go.mod h1:MubZ2e9DAYGaym0mClSOi183NYahrrfKxvSy1HMhoes= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -232,8 +234,8 @@ github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdw github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -242,19 +244,12 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUn github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v1.4.9 h1:WE8Jf/l+7B/rheCMCLZTp5xk0/a05t+ciwBvORq9jXM= -github.com/drand/drand v1.4.9/go.mod h1:vsmJ/kDoVLv1NC0nFihzBPmIFvMGmYtgJewzRBBRVSc= -github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= -github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= -github.com/drand/kyber v1.1.10/go.mod h1:UkHLsI4W6+jT5PvNxmc0cvQAgppjTUpX+XCsN9TXmRo= -github.com/drand/kyber v1.1.15 h1:YNL02FPOA98GmlIhh5FuEJWhz1ZCp6tOUVFN7ujBJPE= -github.com/drand/kyber v1.1.15/go.mod h1:tw0l70U6aWCkc4vDr8u/canpOOOiUNJlzsmeElhBfe0= -github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= -github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= -github.com/drand/kyber-bls12381 v0.2.3 h1:wueWtqjj71wnwm6fYR8MAQk4q8bKVK9WukrGGcaVxzk= -github.com/drand/kyber-bls12381 v0.2.3/go.mod h1:FsudUZf6Xu61u/gYrDHEHf6lKIKluJdnX7WJe4hkMh4= +github.com/drand/drand v1.5.7 h1:5f2D5aH1nEfVI9S6tl2p9bgIDMZ92oltmiY12Kh+eYU= +github.com/drand/drand v1.5.7/go.mod h1:jrJ0244yOHNL5V04vazk3mFatjAWm3i6dg6URWwgbXk= +github.com/drand/kyber v1.2.0 h1:22SbBxsKbgQnJUoyYKIfG909PhBsj0vtANeu4BX5xgE= +github.com/drand/kyber v1.2.0/go.mod h1:6TqFlCc7NGOiNVTF9pF2KcDRfllPd9XOkExuG5Xtwfo= +github.com/drand/kyber-bls12381 v0.3.1 h1:KWb8l/zYTP5yrvKTgvhOrk2eNPscbMiUOIeWBnmUxGo= +github.com/drand/kyber-bls12381 v0.3.1/go.mod h1:H4y9bLPu7KZA/1efDg+jtJ7emKx+ro3PU7/jWUVt140= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -283,8 +278,8 @@ github.com/etclabscore/go-openrpc-reflect v0.0.36 h1:kSqNB2U8RVoW4si+4fsv13NGNkR github.com/etclabscore/go-openrpc-reflect v0.0.36/go.mod h1:0404Ky3igAasAOpyj1eESjstTyneBAIk5PgJFbK4s5E= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/filecoin-project/dagstore v0.5.2 h1:Nd6oXdnolbbVhpMpkYT5PJHOjQp4OBSntHpMV5pxj3c= @@ -349,8 +344,8 @@ github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psS github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.11.2-0.20230712101859-8f37624fa540/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= -github.com/filecoin-project/go-state-types v0.12.8 h1:W/UObdAsv+LbB9EfyLg92DSYoatzUWmlfV8FGyh30VA= -github.com/filecoin-project/go-state-types v0.12.8/go.mod h1:gR2NV0CSGSQwopxF+3In9nDh1sqvoYukLcs5vK0AHCA= +github.com/filecoin-project/go-state-types v0.13.0-rc.2 h1:JHyDDx/nV8sbQNgjUfhumiGWh8Dedc8psbiVtD0YOh0= +github.com/filecoin-project/go-state-types v0.13.0-rc.2/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.3 h1:N07o6alys+V1tNoSTi4WuuoeNC4erS/6jE74+NsgQuk= github.com/filecoin-project/go-statemachine v1.0.3/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= @@ -426,8 +421,9 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= @@ -480,8 +476,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw= +github.com/golang/glog v1.1.1/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -589,8 +585,8 @@ github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487 h1:NyaWOSkqFK github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487/go.mod h1:a1eRkbhd3DYpRH2lnuUsVG+QMTI+v0hGnsis8C9hMrA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -900,6 +896,8 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= @@ -929,9 +927,6 @@ github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2vi github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= -github.com/kilic/bls12-381 v0.0.0-20200731194930-64c428e1bff5/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= -github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -946,9 +941,8 @@ github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQs github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koalacxr/quantile v0.0.1 h1:wAW+SQ286Erny9wOjVww96t8ws+x5Zj6AKHDULUK+o0= github.com/koalacxr/quantile v0.0.1/go.mod h1:bGN/mCZLZ4lrSDHRQ6Lglj9chowGux8sGUIND+DQeD0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -973,8 +967,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= @@ -1207,14 +1201,12 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -1291,8 +1283,8 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk= -github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE= +github.com/multiformats/go-multiaddr v0.12.2 h1:9G9sTY/wCYajKa9lyfWPmpZAwe6oV+Wb1zcmMS1HG24= +github.com/multiformats/go-multiaddr v0.12.2/go.mod h1:GKyaTYjZRdcUhyOetrxTk9z0cW+jA/YrnqTOvKgi44M= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1358,9 +1350,8 @@ github.com/nikkolasg/hexjson v0.1.0 h1:Cgi1MSZVQFoJKYeRpBNEcdF3LB+Zo4fYKsDz7h8uJ github.com/nikkolasg/hexjson v0.1.0/go.mod h1:fbGbWFZ0FmJMFbpCMtJpwb0tudVxSSZ+Es2TsCg57cA= github.com/nkovacs/streamquote v1.0.0 h1:PmVIV08Zlx2lZK5fFZlMZ04eHcDTIFJCv/5/0twVUow= github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -1462,8 +1453,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1500,8 +1491,8 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -1516,8 +1507,8 @@ github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= -github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sercand/kuberesolver/v4 v4.0.0 h1:frL7laPDG/lFm5n98ODmWnn+cvPpzlkf3LhzuPhcHP4= +github.com/sercand/kuberesolver/v4 v4.0.0/go.mod h1:F4RGyuRmMAjeXHKL+w4P7AwUnPceEAPAhxUgXZjKgvM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM= github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -1549,8 +1540,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= +github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= @@ -1599,7 +1590,6 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= @@ -1652,8 +1642,8 @@ github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvS github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/weaveworks/common v0.0.0-20220810113439-c65105d60b18 h1:JN4YR/TNWiZEAHHImrVA2u4DPI+aqPOar23ICUnYZTQ= -github.com/weaveworks/common v0.0.0-20220810113439-c65105d60b18/go.mod h1:YfOOLoW1Q/jIIu0WLeSwgStmrKjuJEZSKTAUc+0KFvE= +github.com/weaveworks/common v0.0.0-20230531151736-e2613bee6b73 h1:CMM9+/AgM77vaMXMQedzqPRMuNwjbI0EcdofPqxc9F8= +github.com/weaveworks/common v0.0.0-20230531151736-e2613bee6b73/go.mod h1:rgbeLfJUtEr+G74cwFPR1k/4N0kDeaeSv/qhUNE4hm8= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY= @@ -1674,8 +1664,8 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:f github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f h1:SBuSxXJL0/ZJMtTxbXZgHZkThl9dNrzyaNhlyaqscRo= -github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.1.0 h1:Jneeq3V5enErVcuL0NKEbD1Gi+iOvEeFhXOV1S1Fc6g= +github.com/whyrusleeping/cbor-gen v0.1.0/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -1718,15 +1708,11 @@ github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XD github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis= go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= -go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= -go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= -go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= -go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1801,7 +1787,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1818,13 +1803,10 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1834,8 +1816,9 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1982,7 +1965,6 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2011,7 +1993,6 @@ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2037,9 +2018,7 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2063,19 +2042,18 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2083,8 +2061,9 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2236,8 +2215,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= diff --git a/itests/deadlines_test.go b/itests/deadlines_test.go index fb28f450974..70da4be5a51 100644 --- a/itests/deadlines_test.go +++ b/itests/deadlines_test.go @@ -4,6 +4,7 @@ package itests import ( "bytes" "context" + "strings" "testing" "time" @@ -16,7 +17,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" - minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner" "github.com/filecoin-project/go-state-types/exitcode" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" @@ -183,13 +183,17 @@ func TestDeadlineToggling(t *testing.T) { cr, err := cid.Parse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz") require.NoError(t, err) - params := &minertypes.SectorPreCommitInfo{ - Expiration: 2880 * 300, - SectorNumber: 22, - SealProof: kit.TestSpt, - - SealedCID: cr, - SealRandEpoch: head.Height() - 200, + params := &miner.PreCommitSectorBatchParams2{ + Sectors: []miner.SectorPreCommitInfo{ + { + Expiration: 2880 * 300, + SectorNumber: 22, + SealProof: kit.TestSpt, + + SealedCID: cr, + SealRandEpoch: head.Height() - 200, + }, + }, } enc := new(bytes.Buffer) @@ -199,7 +203,7 @@ func TestDeadlineToggling(t *testing.T) { To: maddrE, From: defaultFrom, Value: types.FromFil(1), - Method: builtin.MethodsMiner.PreCommitSector, + Method: builtin.MethodsMiner.PreCommitSectorBatch2, Params: enc.Bytes(), }, nil) require.NoError(t, err) @@ -286,14 +290,18 @@ func TestDeadlineToggling(t *testing.T) { sp, aerr := actors.SerializeParams(terminateSectorParams) require.NoError(t, aerr) - smsg, err := client.MpoolPushMessage(ctx, &types.Message{ - From: defaultFrom, - To: maddrD, - Method: builtin.MethodsMiner.TerminateSectors, - - Value: big.Zero(), - Params: sp, - }, nil) + var smsg *types.SignedMessage + require.Eventually(t, func() bool { + smsg, err = client.MpoolPushMessage(ctx, &types.Message{ + From: defaultFrom, + To: maddrD, + Method: builtin.MethodsMiner.TerminateSectors, + + Value: big.Zero(), + Params: sp, + }, nil) + return err == nil || !strings.Contains(err.Error(), "cannot terminate sectors in immutable deadline") + }, 60*time.Second, 100*time.Millisecond) require.NoError(t, err) t.Log("sent termination message:", smsg.Cid()) diff --git a/itests/direct_data_onboard_test.go b/itests/direct_data_onboard_test.go new file mode 100644 index 00000000000..703419ad123 --- /dev/null +++ b/itests/direct_data_onboard_test.go @@ -0,0 +1,408 @@ +package itests + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/multiformats/go-multicodec" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-commp-utils/nonffi" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + market2 "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + minertypes "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/lib/must" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/storage/pipeline/piece" +) + +func TestActors13Migration(t *testing.T) { + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + client, _, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.UpgradeSchedule(stmgr.Upgrade{ + Network: network.Version21, + Height: -1, + }, stmgr.Upgrade{ + Network: network.Version22, + Height: 10, + Migration: filcns.UpgradeActorsV13, + })) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + // mine until 15 + client.WaitTillChain(ctx, kit.HeightAtLeast(15)) +} + +func TestOnboardRawPiece(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC()) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + pieceSize := abi.PaddedPieceSize(2048).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + KeepUnsealed: true, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: dc.PieceCID, + Size: dc.Size, + VerifiedAllocationKey: nil, + Notify: nil, + }, + }) + require.NoError(t, err) + + // wait for sector to commit + + // wait for sector to commit and enter proving state + toCheck := map[abi.SectorNumber]struct{}{ + so.Sector: {}, + } + + miner.WaitSectorsProving(ctx, toCheck) + + si, err := miner.SectorsStatus(ctx, so.Sector, false) + require.NoError(t, err) + require.Equal(t, dc.PieceCID, *si.CommD) +} + +func TestOnboardMixedMarketDDO(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) { + sc.RequireActivationSuccess = true + sc.RequireNotificationSuccess = true + })) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + var pieces []abi.PieceInfo + var dealID abi.DealID + + { + // market piece + pieceSize := abi.PaddedPieceSize(2048 / 2).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + pieces = append(pieces, dc) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + // PSD + + psdParams := market2.PublishStorageDealsParams{ + Deals: []market2.ClientDealProposal{ + makeMarketDealProposal(t, client, miner, dc.PieceCID, pieceSize.Padded(), head.Height()+2880*2, head.Height()+2880*400), + }, + } + + psdMsg := &types.Message{ + To: market.Address, + From: mi.Worker, + + Method: market.Methods.PublishStorageDeals, + Params: must.One(cborutil.Dump(&psdParams)), + } + + smsg, err := client.MpoolPushMessage(ctx, psdMsg, nil) + require.NoError(t, err) + + r, err := client.StateWaitMsg(ctx, smsg.Cid(), 1, stmgr.LookbackNoLimit, true) + require.NoError(t, err) + + require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) + + nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK) + require.NoError(t, err) + + res, err := market.DecodePublishStorageDealsReturn(r.Receipt.Return, nv) + require.NoError(t, err) + dealID = must.One(res.DealIDs())[0] + + mcid := smsg.Cid() + + so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: &mcid, + DealID: dealID, + DealProposal: &psdParams.Deals[0].Proposal, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + PieceActivationManifest: nil, + KeepUnsealed: true, + }) + require.NoError(t, err) + + require.Equal(t, abi.PaddedPieceSize(0), so.Offset) + require.Equal(t, abi.SectorNumber(2), so.Sector) + } + + { + // raw ddo piece + + pieceSize := abi.PaddedPieceSize(2048 / 2).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + pieces = append(pieces, dc) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + KeepUnsealed: false, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: dc.PieceCID, + Size: dc.Size, + VerifiedAllocationKey: nil, + Notify: nil, + }, + }) + require.NoError(t, err) + + require.Equal(t, abi.PaddedPieceSize(1024), so.Offset) + require.Equal(t, abi.SectorNumber(2), so.Sector) + } + + toCheck := map[abi.SectorNumber]struct{}{ + 2: {}, + } + + miner.WaitSectorsProving(ctx, toCheck) + + expectCommD, err := nonffi.GenerateUnsealedCID(abi.RegisteredSealProof_StackedDrg2KiBV1_1, pieces) + require.NoError(t, err) + + si, err := miner.SectorsStatus(ctx, 2, false) + require.NoError(t, err) + require.Equal(t, expectCommD, *si.CommD) + + ds, err := client.StateMarketStorageDeal(ctx, dealID, types.EmptyTSK) + require.NoError(t, err) + + require.NotEqual(t, -1, ds.State.SectorStartEpoch) + + { + deals, err := client.StateMarketDeals(ctx, types.EmptyTSK) + require.NoError(t, err) + for id, deal := range deals { + fmt.Println("Deal", id, deal.Proposal.PieceCID, deal.Proposal.PieceSize, deal.Proposal.Client, deal.Proposal.Provider) + } + + // check actor events, verify deal-published is as expected + minerIdAddr, err := client.StateLookupID(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + minerId, err := address.IDFromAddress(minerIdAddr) + require.NoError(t, err) + caddr, err := client.WalletDefaultAddress(context.Background()) + require.NoError(t, err) + clientIdAddr, err := client.StateLookupID(ctx, caddr, types.EmptyTSK) + require.NoError(t, err) + clientId, err := address.IDFromAddress(clientIdAddr) + require.NoError(t, err) + + fmt.Println("minerId", minerId, "clientId", clientId) + for _, piece := range pieces { + fmt.Println("piece", piece.PieceCID, piece.Size) + } + + // check "deal-published" actor event + var epochZero abi.ChainEpoch + allEvents, err := miner.FullNode.GetActorEvents(ctx, &types.ActorEventFilter{ + FromHeight: &epochZero, + }) + require.NoError(t, err) + for _, key := range []string{"deal-published", "deal-activated", "sector-precommitted", "sector-activated"} { + var found bool + keyBytes := must.One(ipld.Encode(basicnode.NewString(key), dagcbor.Encode)) + for _, event := range allEvents { + for _, e := range event.Entries { + if e.Key == "$type" && bytes.Equal(e.Value, keyBytes) { + found = true + switch key { + case "deal-published", "deal-activated": + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: keyBytes}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(2), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, + } + require.ElementsMatch(t, expectedEntries, event.Entries) + } + break + } + } + } + require.True(t, found, "expected to find event %s", key) + } + } +} + +func TestOnboardRawPieceSnap(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) { + sc.PreferNewSectorsForDeals = false + sc.MakeNewSectorForDeals = false + sc.MakeCCSectorsAvailable = true + sc.AggregateCommits = false + })) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + miner.PledgeSectors(ctx, 1, 0, nil) + sl, err := miner.SectorsListNonGenesis(ctx) + require.NoError(t, err) + require.Len(t, sl, 1, "expected 1 sector") + + snum := sl[0] + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + client.WaitForSectorActive(ctx, t, snum, maddr) + + pieceSize := abi.PaddedPieceSize(2048).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, // todo set so that it works with the sector + }, + KeepUnsealed: false, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: dc.PieceCID, + Size: dc.Size, + VerifiedAllocationKey: nil, + Notify: nil, + }, + }) + require.NoError(t, err) + + // wait for sector to commit + + // wait for sector to commit and enter proving state + toCheck := map[abi.SectorNumber]struct{}{ + so.Sector: {}, + } + + miner.WaitSectorsProving(ctx, toCheck) +} + +func makeMarketDealProposal(t *testing.T, client *kit.TestFullNode, miner *kit.TestMiner, data cid.Cid, ps abi.PaddedPieceSize, start, end abi.ChainEpoch) market2.ClientDealProposal { + ca, err := client.WalletDefaultAddress(context.Background()) + require.NoError(t, err) + + ma, err := miner.ActorAddress(context.Background()) + require.NoError(t, err) + + dp := market2.DealProposal{ + PieceCID: data, + PieceSize: ps, + VerifiedDeal: false, + Client: ca, + Provider: ma, + Label: must.One(market2.NewLabelFromString("wat")), + StartEpoch: start, + EndEpoch: end, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: abi.TokenAmount{}, // below + ClientCollateral: big.Zero(), + } + + cb, err := client.StateDealProviderCollateralBounds(context.Background(), dp.PieceSize, dp.VerifiedDeal, types.EmptyTSK) + require.NoError(t, err) + dp.ProviderCollateral = big.Div(big.Mul(cb.Min, big.NewInt(2)), big.NewInt(2)) + + buf, err := cborutil.Dump(&dp) + require.NoError(t, err) + sig, err := client.WalletSign(context.Background(), ca, buf) + require.NoError(t, err) + + return market2.ClientDealProposal{ + Proposal: dp, + ClientSignature: *sig, + } + +} diff --git a/itests/direct_data_onboard_verified_test.go b/itests/direct_data_onboard_verified_test.go new file mode 100644 index 00000000000..0c3de2448d2 --- /dev/null +++ b/itests/direct_data_onboard_verified_test.go @@ -0,0 +1,713 @@ +package itests + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "strings" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/codec/dagjson" + "github.com/ipld/go-ipld-prime/datamodel" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/node/bindnode" + "github.com/multiformats/go-multicodec" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + verifregtypes13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" + datacap2 "github.com/filecoin-project/go-state-types/builtin/v9/datacap" + verifregtypes9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/datacap" + minertypes "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet/key" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/lib/must" + "github.com/filecoin-project/lotus/storage/pipeline/piece" +) + +func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + rootKey, err := key.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifierKey, err := key.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifiedClientKey, err := key.GenerateKey(types.KTBLS) + require.NoError(t, err) + + bal, err := types.ParseFIL("100fil") + require.NoError(t, err) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), + kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())), + kit.Account(verifierKey, abi.NewTokenAmount(bal.Int64())), + kit.Account(verifiedClientKey, abi.NewTokenAmount(bal.Int64())), + ) + + /* --- Setup subscription channels for ActorEvents --- */ + + // subscribe only to miner's actor events + minerEvtsChan, err := miner.FullNode.SubscribeActorEvents(ctx, &types.ActorEventFilter{ + Addresses: []address.Address{miner.ActorAddr}, + }) + require.NoError(t, err) + + // subscribe only to sector-activated events + sectorActivatedCbor := must.One(ipld.Encode(basicnode.NewString("sector-activated"), dagcbor.Encode)) + sectorActivatedEvtsChan, err := miner.FullNode.SubscribeActorEvents(ctx, &types.ActorEventFilter{ + Fields: map[string][]types.ActorEventBlock{ + "$type": { + {Codec: uint64(multicodec.Cbor), Value: sectorActivatedCbor}, + }, + }, + }) + require.NoError(t, err) + + /* --- Start mining --- */ + + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + minerId, err := address.IDFromAddress(miner.ActorAddr) + require.NoError(t, err) + + miner.PledgeSectors(ctx, 1, 0, nil) + sl, err := miner.SectorsListNonGenesis(ctx) + require.NoError(t, err) + require.Len(t, sl, 1, "expected 1 sector") + + snum := sl[0] + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + client.WaitForSectorActive(ctx, t, snum, maddr) + + /* --- Setup verified registry and client and allocate datacap to client */ + + verifierAddr, verifiedClientAddr := ddoVerifiedSetupVerifiedClient(ctx, t, client, rootKey, verifierKey, verifiedClientKey) + + /* --- Prepare piece for onboarding --- */ + + pieceSize := abi.PaddedPieceSize(2048).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + + /* --- Allocate datacap for the piece by the verified client --- */ + + clientId, allocationId := ddoVerifiedSetupAllocations(ctx, t, client, minerId, dc, verifiedClientAddr) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + // subscribe to actor events up until the current head + initialEventsChan, err := miner.FullNode.SubscribeActorEvents(ctx, &types.ActorEventFilter{ + FromHeight: epochPtr(0), + ToHeight: epochPtr(int64(head.Height())), + }) + require.NoError(t, err) + + /* --- Onboard the piece --- */ + + so, si := ddoVerifiedOnboardPiece(ctx, t, miner, clientId, allocationId, dc, pieceData) + + // check that we have one allocation because the real allocation has been claimed by the miner for the piece + allocations, err := client.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + require.Len(t, allocations, 1) // allocation has been claimed, leaving the bogus one + + ddoVerifiedRemoveAllocations(ctx, t, client, verifiedClientAddr, clientId) + + // check that we have no more allocations + allocations, err = client.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + require.Len(t, allocations, 0) + + /* --- Tests for ActorEvents --- */ + + t.Logf("Inspecting events as they appear in message receipts") + + // construct ActorEvents from messages and receipts + eventsFromMessages := ddoVerifiedBuildActorEventsFromMessages(ctx, t, miner.FullNode) + fmt.Println("Events from message receipts:") + printEvents(t, eventsFromMessages) + + // check for precisely these events and ensure they contain what we expect; don't be strict on + // other events to make sure we're forward-compatible as new events are added + + { + precommitedEvents := filterEvents(eventsFromMessages, "sector-precommitted") + require.Len(t, precommitedEvents, 2) + + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-precommitted"), dagcbor.Encode))}, + // first sector to start mining is CC + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(so.Sector)-1), dagcbor.Encode))}, + } + require.ElementsMatch(t, expectedEntries, precommitedEvents[0].Entries) + + // second sector has our piece + expectedEntries[1].Value = must.One(ipld.Encode(basicnode.NewInt(int64(so.Sector)), dagcbor.Encode)) + require.ElementsMatch(t, expectedEntries, precommitedEvents[1].Entries) + } + + { + activatedEvents := filterEvents(eventsFromMessages, "sector-activated") + require.Len(t, activatedEvents, 2) + + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-activated"), dagcbor.Encode))}, + // first sector to start mining is CC + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(so.Sector)-1), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "unsealed-cid", Value: must.One(ipld.Encode(datamodel.Null, dagcbor.Encode))}, + } + require.ElementsMatch(t, expectedEntries, activatedEvents[0].Entries) + + // second sector has our piece, and only our piece, so usealed-cid matches piece-cid, + // unfortunately we don't have a case with multiple pieces + expectedEntries[1].Value = must.One(ipld.Encode(basicnode.NewInt(int64(so.Sector)), dagcbor.Encode)) + expectedEntries[2].Value = must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: dc.PieceCID}), dagcbor.Encode)) + expectedEntries = append(expectedEntries, + types.EventEntry{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: dc.PieceCID}), dagcbor.Encode))}, + types.EventEntry{Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(pieceSize.Padded())), dagcbor.Encode))}, + ) + require.ElementsMatch(t, expectedEntries, activatedEvents[1].Entries) + } + + { + verifierBalanceEvents := filterEvents(eventsFromMessages, "verifier-balance") + require.Len(t, verifierBalanceEvents, 2) + + verifierIdAddr, err := client.StateLookupID(ctx, verifierAddr, types.EmptyTSK) + require.NoError(t, err) + verifierId, err := address.IDFromAddress(verifierIdAddr) + require.NoError(t, err) + + verifierEntry := types.EventEntry{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "verifier", Value: must.One(ipld.Encode(basicnode.NewInt(int64(verifierId)), dagcbor.Encode))} + require.Contains(t, verifierBalanceEvents[0].Entries, verifierEntry) + require.Contains(t, verifierBalanceEvents[1].Entries, verifierEntry) + } + + { + allocationEvents := filterEvents(eventsFromMessages, "allocation") + require.Len(t, allocationEvents, 2) + + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("allocation"), dagcbor.Encode))}, + // first, bogus, allocation + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)-1), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + } + require.ElementsMatch(t, expectedEntries, allocationEvents[0].Entries) + + // the second, real allocation + expectedEntries[1].Value = must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)), dagcbor.Encode)) + require.ElementsMatch(t, expectedEntries, allocationEvents[1].Entries) + } + + { + allocationEvents := filterEvents(eventsFromMessages, "allocation-removed") + require.Len(t, allocationEvents, 1) + + // manual removal of the bogus allocation + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("allocation-removed"), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)-1), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + } + require.ElementsMatch(t, expectedEntries, allocationEvents[0].Entries) + } + + { + claimEvents := filterEvents(eventsFromMessages, "claim") + require.Len(t, claimEvents, 1) + + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("claim"), dagcbor.Encode))}, + // claimId inherits from its original allocationId + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + } + require.ElementsMatch(t, expectedEntries, claimEvents[0].Entries) + } + + // verify that we can trace a datacap allocation through to a claim with the events, since this + // information is not completely available from the state tree + claims := ddoVerifiedBuildClaimsFromMessages(ctx, t, eventsFromMessages, miner.FullNode) + for _, claim := range claims { + p, err := address.NewIDAddress(uint64(claim.Provider)) + require.NoError(t, err) + c, err := address.NewIDAddress(uint64(claim.Client)) + require.NoError(t, err) + fmt.Printf("Claim\n", + p, c, claim.Data, claim.Size, claim.TermMin, claim.TermMax, claim.TermStart, claim.Sector) + } + require.Equal(t, []*verifregtypes9.Claim{ + { + Provider: abi.ActorID(minerId), + Client: clientId, + Data: dc.PieceCID, + Size: dc.Size, + TermMin: verifregtypes13.MinimumVerifiedAllocationTerm, + TermMax: verifregtypes13.MaximumVerifiedAllocationTerm, + TermStart: si.Activation, + Sector: so.Sector, + }, + }, claims) + + // construct ActorEvents from GetActorEvents API + t.Logf("Inspecting full events list from GetActorEvents") + allEvtsFromGetAPI, err := miner.FullNode.GetActorEvents(ctx, &types.ActorEventFilter{ + FromHeight: epochPtr(0), + }) + require.NoError(t, err) + fmt.Println("Events from GetActorEvents:") + printEvents(t, allEvtsFromGetAPI) + // compare events from messages and receipts with events from GetActorEvents API + require.Equal(t, eventsFromMessages, allEvtsFromGetAPI) + + // construct ActorEvents from subscription channel for just the miner actor + t.Logf("Inspecting only miner's events list from SubscribeActorEvents") + var subMinerEvts []*types.ActorEvent + for evt := range minerEvtsChan { + subMinerEvts = append(subMinerEvts, evt) + if len(subMinerEvts) == 4 { + break + } + } + var allMinerEvts []*types.ActorEvent + for _, evt := range eventsFromMessages { + if evt.Emitter == miner.ActorAddr { + allMinerEvts = append(allMinerEvts, evt) + } + } + // compare events from messages and receipts with events from subscription channel + require.Equal(t, allMinerEvts, subMinerEvts) + + // construct ActorEvents from subscription channels for just the sector-activated events + var sectorActivatedEvts []*types.ActorEvent + for _, evt := range eventsFromMessages { + for _, entry := range evt.Entries { + if entry.Key == "$type" && bytes.Equal(entry.Value, sectorActivatedCbor) { + sectorActivatedEvts = append(sectorActivatedEvts, evt) + break + } + } + } + require.Len(t, sectorActivatedEvts, 2) // sanity check + + t.Logf("Inspecting only sector-activated events list from real-time SubscribeActorEvents") + var subscribedSectorActivatedEvts []*types.ActorEvent + for evt := range sectorActivatedEvtsChan { + subscribedSectorActivatedEvts = append(subscribedSectorActivatedEvts, evt) + if len(subscribedSectorActivatedEvts) == 2 { + break + } + } + // compare events from messages and receipts with events from subscription channel + require.Equal(t, sectorActivatedEvts, subscribedSectorActivatedEvts) + + // same thing but use historical event fetching to see the same list + t.Logf("Inspecting only sector-activated events list from historical SubscribeActorEvents") + sectorActivatedEvtsChan, err = miner.FullNode.SubscribeActorEvents(ctx, &types.ActorEventFilter{ + Fields: map[string][]types.ActorEventBlock{ + "$type": { + {Codec: uint64(multicodec.Cbor), Value: sectorActivatedCbor}, + }, + }, + FromHeight: epochPtr(0), + }) + require.NoError(t, err) + subscribedSectorActivatedEvts = subscribedSectorActivatedEvts[:0] + for evt := range sectorActivatedEvtsChan { + subscribedSectorActivatedEvts = append(subscribedSectorActivatedEvts, evt) + if len(subscribedSectorActivatedEvts) == 2 { + break + } + } + // compare events from messages and receipts with events from subscription channel + require.Equal(t, sectorActivatedEvts, subscribedSectorActivatedEvts) + + // check that our `ToHeight` filter works as expected + t.Logf("Inspecting only initial list of events SubscribeActorEvents with ToHeight") + var initialEvents []*types.ActorEvent + for evt := range initialEventsChan { + initialEvents = append(initialEvents, evt) + } + // sector-precommitted, sector-activated, verifier-balance, verifier-balance, allocation, allocation + require.Equal(t, eventsFromMessages[0:6], initialEvents) + + // construct ActorEvents from subscription channel for all actor events + t.Logf("Inspecting full events list from historical SubscribeActorEvents") + allEvtsChan, err := miner.FullNode.SubscribeActorEvents(ctx, &types.ActorEventFilter{ + FromHeight: epochPtr(0), + }) + require.NoError(t, err) + var prefillEvts []*types.ActorEvent + for evt := range allEvtsChan { + prefillEvts = append(prefillEvts, evt) + if len(prefillEvts) == len(eventsFromMessages) { + break + } + } + // compare events from messages and receipts with events from subscription channel + require.Equal(t, eventsFromMessages, prefillEvts) + t.Logf("All done comparing events") + + // NOTE: There is a delay in finishing this test because the SubscribeActorEvents + // with the ToHeight (initialEventsChan) has to wait at least a full actual epoch before + // realising that there's no more events for that filter. itests run with a different block + // speed than the ActorEventHandler is aware of. +} + +func ddoVerifiedSetupAllocations( + ctx context.Context, + t *testing.T, + node v1api.FullNode, + minerId uint64, + dc abi.PieceInfo, + verifiedClientAddr address.Address, +) (clientID abi.ActorID, allocationID verifregtypes13.AllocationId) { + + head, err := node.ChainHead(ctx) + require.NoError(t, err) + + // design this one to expire so we can observe allocation-removed + expiringAllocationHeight := head.Height() + 100 + allocationRequestBork := verifregtypes13.AllocationRequest{ + Provider: abi.ActorID(minerId), + Data: cid.MustParse("baga6ea4seaaqa"), + Size: dc.Size, + TermMin: verifregtypes13.MinimumVerifiedAllocationTerm, + TermMax: verifregtypes13.MaximumVerifiedAllocationTerm, + Expiration: expiringAllocationHeight, + } + allocationRequest := verifregtypes13.AllocationRequest{ + Provider: abi.ActorID(minerId), + Data: dc.PieceCID, + Size: dc.Size, + TermMin: verifregtypes13.MinimumVerifiedAllocationTerm, + TermMax: verifregtypes13.MaximumVerifiedAllocationTerm, + Expiration: verifregtypes13.MaximumVerifiedAllocationExpiration, + } + + allocationRequests := verifregtypes13.AllocationRequests{ + Allocations: []verifregtypes13.AllocationRequest{allocationRequestBork, allocationRequest}, + } + + receiverParams, aerr := actors.SerializeParams(&allocationRequests) + require.NoError(t, aerr) + + transferParams, aerr := actors.SerializeParams(&datacap2.TransferParams{ + To: builtin.VerifiedRegistryActorAddr, + Amount: big.Mul(big.NewInt(int64(dc.Size*2)), builtin.TokenPrecision), + OperatorData: receiverParams, + }) + require.NoError(t, aerr) + + msg := &types.Message{ + To: builtin.DatacapActorAddr, + From: verifiedClientAddr, + Method: datacap.Methods.TransferExported, + Params: transferParams, + Value: big.Zero(), + } + + sm, err := node.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err := node.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + // check that we have an allocation + allocations, err := node.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + require.Len(t, allocations, 2) // allocation waiting to be claimed + + for key, value := range allocations { + if value.Data == dc.PieceCID { + allocationID = verifregtypes13.AllocationId(key) + clientID = value.Client + break + } + } + require.NotEqual(t, verifregtypes13.AllocationId(0), allocationID) // found it in there + return clientID, allocationID +} + +func ddoVerifiedOnboardPiece(ctx context.Context, t *testing.T, miner *kit.TestMiner, clientId abi.ActorID, allocationId verifregtypes13.AllocationId, dc abi.PieceInfo, pieceData []byte) (lapi.SectorOffset, lapi.SectorInfo) { + head, err := miner.FullNode.ChainHead(ctx) + require.NoError(t, err) + + so, err := miner.SectorAddPieceToAny(ctx, dc.Size.Unpadded(), bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + KeepUnsealed: true, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: dc.PieceCID, + Size: dc.Size, + VerifiedAllocationKey: &minertypes13.VerifiedAllocationKey{Client: clientId, ID: allocationId}, + Notify: nil, + }, + }) + require.NoError(t, err) + + // wait for sector to commit + miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{ + so.Sector: {}, + }) + + // Verify that the piece has been onboarded + + si, err := miner.SectorsStatus(ctx, so.Sector, true) + require.NoError(t, err) + require.Equal(t, dc.PieceCID, *si.CommD) + + require.Equal(t, si.DealWeight, big.Zero()) + require.Equal(t, si.VerifiedDealWeight, big.Mul(big.NewInt(int64(dc.Size)), big.NewInt(int64(si.Expiration-si.Activation)))) + + return so, si +} + +func ddoVerifiedRemoveAllocations(ctx context.Context, t *testing.T, node v1api.FullNode, verifiedClientAddr address.Address, clientId abi.ActorID) { + // trigger an allocation removal + removalParams, aerr := actors.SerializeParams(&verifregtypes13.RemoveExpiredAllocationsParams{Client: clientId}) + require.NoError(t, aerr) + + msg := &types.Message{ + To: builtin.VerifiedRegistryActorAddr, + From: verifiedClientAddr, + Method: verifreg.Methods.RemoveExpiredAllocations, + Params: removalParams, + Value: big.Zero(), + } + + sm, err := node.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err := node.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) +} + +func ddoVerifiedBuildClaimsFromMessages(ctx context.Context, t *testing.T, eventsFromMessages []*types.ActorEvent, node v1api.FullNode) []*verifregtypes9.Claim { + claimKeyCbor := must.One(ipld.Encode(basicnode.NewString("claim"), dagcbor.Encode)) + claims := make([]*verifregtypes9.Claim, 0) + for _, event := range eventsFromMessages { + var isClaim bool + var claimId int64 = -1 + var providerId int64 = -1 + for _, e := range event.Entries { + if e.Key == "$type" && bytes.Equal(e.Value, claimKeyCbor) { + isClaim = true + } else if isClaim && e.Key == "id" { + nd, err := ipld.DecodeUsingPrototype(e.Value, dagcbor.Decode, bindnode.Prototype((*int64)(nil), nil)) + require.NoError(t, err) + claimId = *bindnode.Unwrap(nd).(*int64) + } else if isClaim && e.Key == "provider" { + nd, err := ipld.DecodeUsingPrototype(e.Value, dagcbor.Decode, bindnode.Prototype((*int64)(nil), nil)) + require.NoError(t, err) + providerId = *bindnode.Unwrap(nd).(*int64) + } + if isClaim && claimId != -1 && providerId != -1 { + provider, err := address.NewIDAddress(uint64(providerId)) + require.NoError(t, err) + claim, err := node.StateGetClaim(ctx, provider, verifregtypes9.ClaimId(claimId), types.EmptyTSK) + require.NoError(t, err) + claims = append(claims, claim) + } + } + } + return claims +} + +func ddoVerifiedBuildActorEventsFromMessages(ctx context.Context, t *testing.T, node v1api.FullNode) []*types.ActorEvent { + actorEvents := make([]*types.ActorEvent, 0) + + head, err := node.ChainHead(ctx) + require.NoError(t, err) + var lastts types.TipSetKey + for height := 0; height < int(head.Height()); height++ { + // for each tipset + ts, err := node.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(height), types.EmptyTSK) + require.NoError(t, err) + if ts.Key() == lastts { + continue + } + lastts = ts.Key() + messages, err := node.ChainGetMessagesInTipset(ctx, ts.Key()) + require.NoError(t, err) + if len(messages) == 0 { + continue + } + for _, m := range messages { + receipt, err := node.StateSearchMsg(ctx, types.EmptyTSK, m.Cid, -1, false) + require.NoError(t, err) + require.NotNil(t, receipt) + // receipt + if receipt.Receipt.EventsRoot != nil { + events, err := node.ChainGetEvents(ctx, *receipt.Receipt.EventsRoot) + require.NoError(t, err) + for _, evt := range events { + // for each event + addr, err := address.NewIDAddress(uint64(evt.Emitter)) + require.NoError(t, err) + + actorEvents = append(actorEvents, &types.ActorEvent{ + Entries: evt.Entries, + Emitter: addr, + Reverted: false, + Height: ts.Height(), + TipSetKey: ts.Key(), + MsgCid: m.Cid, + }) + } + } + } + } + return actorEvents +} + +func ddoVerifiedSetupVerifiedClient(ctx context.Context, t *testing.T, client *kit.TestFullNode, rootKey *key.Key, verifierKey *key.Key, verifiedClientKey *key.Key) (address.Address, address.Address) { + // import the root key. + rootAddr, err := client.WalletImport(ctx, &rootKey.KeyInfo) + require.NoError(t, err) + + // import the verifiers' keys. + verifierAddr, err := client.WalletImport(ctx, &verifierKey.KeyInfo) + require.NoError(t, err) + + // import the verified client's key. + verifiedClientAddr, err := client.WalletImport(ctx, &verifiedClientKey.KeyInfo) + require.NoError(t, err) + + allowance := big.NewInt(100000000000) + params, aerr := actors.SerializeParams(&verifregtypes13.AddVerifierParams{Address: verifierAddr, Allowance: allowance}) + require.NoError(t, aerr) + + msg := &types.Message{ + From: rootAddr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifier, + Params: params, + Value: big.Zero(), + } + + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err, "AddVerifier failed") + + res, err := client.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + verifierAllowance, err := client.StateVerifierStatus(ctx, verifierAddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, allowance, *verifierAllowance) + + // assign datacap to a client + initialDatacap := big.NewInt(10000) + + params, aerr = actors.SerializeParams(&verifregtypes13.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: initialDatacap}) + require.NoError(t, aerr) + + msg = &types.Message{ + From: verifierAddr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + Value: big.Zero(), + } + + sm, err = client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err = client.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + return verifierAddr, verifiedClientAddr +} + +func filterEvents(events []*types.ActorEvent, key string) []*types.ActorEvent { + keyBytes := must.One(ipld.Encode(basicnode.NewString(key), dagcbor.Encode)) + filtered := make([]*types.ActorEvent, 0) + for _, event := range events { + for _, e := range event.Entries { + if e.Key == "$type" && bytes.Equal(e.Value, keyBytes) { + filtered = append(filtered, event) + break + } + } + } + return filtered +} + +func printEvents(t *testing.T, events []*types.ActorEvent) { + for _, event := range events { + entryStrings := []string{ + fmt.Sprintf("height=%d", event.Height), + fmt.Sprintf("msg=%s", event.MsgCid), + fmt.Sprintf("emitter=%s", event.Emitter), + fmt.Sprintf("reverted=%t", event.Reverted), + } + for _, e := range event.Entries { + // for each event entry + entryStrings = append(entryStrings, fmt.Sprintf("%s=%s", e.Key, eventValueToDagJson(t, e.Codec, e.Value))) + } + fmt.Printf("Event<%s>\n", strings.Join(entryStrings, ", ")) + } +} + +// eventValueToDagJson converts an ActorEvent value to a JSON string for printing. +func eventValueToDagJson(t *testing.T, codec uint64, data []byte) string { + switch codec { + case uint64(multicodec.Cbor): + nd, err := ipld.Decode(data, dagcbor.Decode) + require.NoError(t, err) + byts, err := ipld.Encode(nd, dagjson.Encode) + require.NoError(t, err) + return string(byts) + default: + return fmt.Sprintf("0x%x", data) + } +} + +func epochPtr(ei int64) *abi.ChainEpoch { + ep := abi.ChainEpoch(ei) + return &ep +} diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index a515b0e9998..c315b21c9d3 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -1099,14 +1099,14 @@ func importPreSealMeta(ctx context.Context, meta genesis.Miner, mds dtypes.Metad info := &pipeline.SectorInfo{ State: pipeline.Proving, SectorNumber: sector.SectorID, - Pieces: []api.SectorPiece{ - { + Pieces: []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(meta.SectorSize), PieceCID: commD, }, DealInfo: nil, // todo: likely possible to get, but not really that useful - }, + }), }, CommD: &commD, CommR: &commR, diff --git a/itests/kit/ensemble_opts_nv.go b/itests/kit/ensemble_opts_nv.go index d5bb1930ef0..18b531e13b1 100644 --- a/itests/kit/ensemble_opts_nv.go +++ b/itests/kit/ensemble_opts_nv.go @@ -35,12 +35,12 @@ func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt { }) /* inline-gen start */ return UpgradeSchedule(stmgr.Upgrade{ - Network: network.Version20, + Network: network.Version21, Height: -1, }, stmgr.Upgrade{ - Network: network.Version21, + Network: network.Version22, Height: upgradeHeight, - Migration: filcns.UpgradeActorsV12, + Migration: filcns.UpgradeActorsV13, }) /* inline-gen end */ } diff --git a/itests/kit/log.go b/itests/kit/log.go index 0c66427f9b7..2cb5970950f 100644 --- a/itests/kit/log.go +++ b/itests/kit/log.go @@ -23,6 +23,7 @@ func QuietMiningLogs() { _ = logging.SetLogLevel("rpc", "ERROR") _ = logging.SetLogLevel("consensus-common", "ERROR") _ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR") + _ = logging.SetLogLevel("consensus-common", "WARN") } func QuietAllLogsExcept(names ...string) { diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go index 9af284148c4..09e78995147 100644 --- a/itests/kit/node_opts.go +++ b/itests/kit/node_opts.go @@ -1,6 +1,8 @@ package kit import ( + "math" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -63,6 +65,8 @@ var DefaultNodeOpts = nodeOpts{ // test defaults cfg.Fevm.EnableEthRPC = true + cfg.Fevm.Events.MaxFilterHeightRange = math.MaxInt64 + cfg.Events.EnableActorEventsAPI = true return nil }, }, diff --git a/itests/pending_deal_allocation_test.go b/itests/pending_deal_allocation_test.go index c1e0531cfeb..60b755ac05e 100644 --- a/itests/pending_deal_allocation_test.go +++ b/itests/pending_deal_allocation_test.go @@ -180,10 +180,6 @@ func TestGetAllocationForPendingDeal(t *testing.T) { dealIds, err := ret.DealIDs() require.NoError(t, err) - dealInfo, err := api.StateMarketStorageDeal(ctx, dealIds[0], types.EmptyTSK) - require.NoError(t, err) - require.Equal(t, verifregtypes.AllocationId(0), dealInfo.State.VerifiedClaim) // Allocation in State should not be set yet, because it's in the allocation map - allocation, err := api.StateGetAllocationForPendingDeal(ctx, dealIds[0], types.EmptyTSK) require.NoError(t, err) require.Equal(t, dealProposal.PieceCID, allocation.Data) diff --git a/itests/sector_terminate_test.go b/itests/sector_terminate_test.go index 34b325f2ad1..57cffb0068e 100644 --- a/itests/sector_terminate_test.go +++ b/itests/sector_terminate_test.go @@ -2,10 +2,15 @@ package itests import ( + "bytes" "context" "testing" "time" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/multiformats/go-multicodec" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-bitfield" @@ -13,6 +18,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/lib/must" sealing "github.com/filecoin-project/lotus/storage/pipeline" ) @@ -164,4 +170,31 @@ loop: require.Equal(t, p.MinerPower, p.TotalPower) require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower) + + // check "sector-terminated" actor event + var epochZero abi.ChainEpoch + allEvents, err := miner.FullNode.GetActorEvents(ctx, &types.ActorEventFilter{ + FromHeight: &epochZero, + }) + require.NoError(t, err) + for _, key := range []string{"sector-precommitted", "sector-activated", "sector-terminated"} { + var found bool + keyBytes := must.One(ipld.Encode(basicnode.NewString(key), dagcbor.Encode)) + for _, event := range allEvents { + for _, e := range event.Entries { + if e.Key == "$type" && bytes.Equal(e.Value, keyBytes) { + found = true + if key == "sector-terminated" { + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: keyBytes}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(toTerminate)), dagcbor.Encode))}, + } + require.ElementsMatch(t, expectedEntries, event.Entries) + } + break + } + } + } + require.True(t, found, "expected to find event %s", key) + } } diff --git a/itests/verifreg_test.go b/itests/verifreg_test.go index ffe50c72b19..07a31477d75 100644 --- a/itests/verifreg_test.go +++ b/itests/verifreg_test.go @@ -503,3 +503,152 @@ func makeVerifier(ctx context.Context, t *testing.T, api *impl.FullNodeAPI, root require.NoError(t, err) require.Equal(t, allowance, *verifierAllowance) } + +func TestVerifiedListAllAllocationsAndClaims(t *testing.T) { + blockTime := 100 * time.Millisecond + + rootKey, err := key.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifier1Key, err := key.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifiedClientKey, err := key.GenerateKey(types.KTBLS) + require.NoError(t, err) + + bal, err := types.ParseFIL("100fil") + require.NoError(t, err) + + node, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), + kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())), + kit.Account(verifier1Key, abi.NewTokenAmount(bal.Int64())), + kit.Account(verifiedClientKey, abi.NewTokenAmount(bal.Int64())), + ) + + ens.InterconnectAll().BeginMining(blockTime) + + api := node.FullNode.(*impl.FullNodeAPI) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // get VRH + vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{}) + fmt.Println(vrh.String()) + require.NoError(t, err) + + // import the root key. + rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo) + require.NoError(t, err) + + // import the verifiers' keys. + verifier1Addr, err := api.WalletImport(ctx, &verifier1Key.KeyInfo) + require.NoError(t, err) + + // import the verified client's key. + verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo) + require.NoError(t, err) + + // resolve all keys + + // make the 2 verifiers + + makeVerifier(ctx, t, api, rootAddr, verifier1Addr) + + // assign datacap to a client + initialDatacap := big.NewInt(20000) + + params, err := actors.SerializeParams(&verifregst.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: initialDatacap}) + require.NoError(t, err) + + msg := &types.Message{ + From: verifier1Addr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + Value: big.Zero(), + } + + sm, err := api.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + // check datacap balance + dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, *dcap, initialDatacap) + + minerId, err := address.IDFromAddress(miner.ActorAddr) + require.NoError(t, err) + + allocationRequest1 := verifregst.AllocationRequest{ + Provider: abi.ActorID(minerId), + Data: cid.MustParse("baga6ea4seaaqa"), + Size: abi.PaddedPieceSize(initialDatacap.Uint64() / 2), + TermMin: verifregst.MinimumVerifiedAllocationTerm, + TermMax: verifregst.MinimumVerifiedAllocationTerm, + Expiration: verifregst.MaximumVerifiedAllocationExpiration, + } + + allocationRequest2 := verifregst.AllocationRequest{ + Provider: abi.ActorID(minerId), + Data: cid.MustParse("baga6ea4seaaqc"), + Size: abi.PaddedPieceSize(initialDatacap.Uint64() / 2), + TermMin: verifregst.MinimumVerifiedAllocationTerm, + TermMax: verifregst.MinimumVerifiedAllocationTerm, + Expiration: verifregst.MaximumVerifiedAllocationExpiration, + } + + allocationRequests := verifregst.AllocationRequests{ + Allocations: []verifregst.AllocationRequest{allocationRequest1, allocationRequest2}, + } + + receiverParams, err := actors.SerializeParams(&allocationRequests) + require.NoError(t, err) + + transferParams, err := actors.SerializeParams(&datacap2.TransferParams{ + To: builtin.VerifiedRegistryActorAddr, + Amount: big.Mul(initialDatacap, builtin.TokenPrecision), + OperatorData: receiverParams, + }) + require.NoError(t, err) + + msg = &types.Message{ + To: builtin.DatacapActorAddr, + From: verifiedClientAddr, + Method: datacap.Methods.TransferExported, + Params: transferParams, + Value: big.Zero(), + } + + sm, err = api.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + allocations, err := api.StateGetAllAllocations(ctx, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, 2, len(allocations)) + + var pcids []string + + for _, a := range allocations { + clientIdAddr, err := api.StateLookupID(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + clientId, err := address.IDFromAddress(clientIdAddr) + require.NoError(t, err) + require.Equal(t, abi.ActorID(clientId), a.Client) + require.Equal(t, abi.ActorID(minerId), a.Provider) + require.Equal(t, abi.PaddedPieceSize(10000), a.Size) + pcids = append(pcids, a.Data.String()) + } + + require.ElementsMatch(t, []string{"baga6ea4seaaqa", "baga6ea4seaaqc"}, pcids) + + // TODO: Add claims check to this test once https://github.com/filecoin-project/lotus/pull/11618 lands +} diff --git a/lib/result/result.go b/lib/result/result.go index 56a9ffab713..4f3a12ee8bd 100644 --- a/lib/result/result.go +++ b/lib/result/result.go @@ -1,5 +1,7 @@ package result +import "encoding/json" + // Result is a small wrapper type encapsulating Value/Error tuples, mostly for // use when sending values across channels // NOTE: Avoid adding any functionality to this, any "nice" things added here will @@ -39,3 +41,13 @@ func (r Result[T]) Assert(noErrFn func(err error, msgAndArgs ...interface{})) T return r.Value } + +// MarshalJSON implements the json.Marshaler interface, marshalling string error correctly +// this method makes the display in log.Infow nicer +func (r Result[T]) MarshalJSON() ([]byte, error) { + if r.Error != nil { + return json.Marshal(map[string]string{"Error": r.Error.Error()}) + } + + return json.Marshal(map[string]interface{}{"Value": r.Value}) +} diff --git a/markets/storageadapter/ondealsectorcommitted_test.go b/markets/storageadapter/ondealsectorcommitted_test.go index 1d7519ff9f9..e3d3187809a 100644 --- a/markets/storageadapter/ondealsectorcommitted_test.go +++ b/markets/storageadapter/ondealsectorcommitted_test.go @@ -55,21 +55,21 @@ func TestOnDealSectorPreCommitted(t *testing.T) { } unfinishedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: -1, LastUpdatedEpoch: 2, }, } activeDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } slashedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, SlashEpoch: 2, @@ -277,21 +277,21 @@ func TestOnDealSectorCommitted(t *testing.T) { } unfinishedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: -1, LastUpdatedEpoch: 2, }, } activeDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } slashedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, SlashEpoch: 2, diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index bdfce6f55af..11742c879f6 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -32,6 +32,7 @@ import ( "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/helpers" pipeline "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sectorblocks" ) @@ -92,11 +93,11 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema return nil, xerrors.Errorf("deal.PublishCid can't be nil") } - sdInfo := api.PieceDealInfo{ + sdInfo := piece.PieceDealInfo{ DealID: deal.DealID, DealProposal: &deal.Proposal, PublishCid: deal.PublishCid, - DealSchedule: api.DealSchedule{ + DealSchedule: piece.DealSchedule{ StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch, EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch, }, diff --git a/node/builder_chain.go b/node/builder_chain.go index 348916010ce..cf6c5642885 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/index" @@ -153,6 +154,7 @@ var ChainNode = Options( Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager), Override(new(full.EthModuleAPI), From(new(api.Gateway))), Override(new(full.EthEventAPI), From(new(api.Gateway))), + Override(new(full.ActorEventAPI), From(new(api.Gateway))), ), // Full node API / service startup @@ -250,13 +252,14 @@ func ConfigFullNode(c interface{}) Option { ), // Actor event filtering support - Override(new(events.EventAPI), From(new(modules.EventAPI))), + Override(new(events.EventHelperAPI), From(new(modules.EventHelperAPI))), + Override(new(*filter.EventFilterManager), modules.EventFilterManager(cfg.Fevm)), // in lite-mode Eth api is provided by gateway ApplyIf(isFullNode, If(cfg.Fevm.EnableEthRPC, Override(new(full.EthModuleAPI), modules.EthModuleAPI(cfg.Fevm)), - Override(new(full.EthEventAPI), modules.EthEventAPI(cfg.Fevm)), + Override(new(full.EthEventAPI), modules.EthEventHandler(cfg.Fevm)), ), If(!cfg.Fevm.EnableEthRPC, Override(new(full.EthModuleAPI), &full.EthModuleDummy{}), @@ -264,6 +267,15 @@ func ConfigFullNode(c interface{}) Option { ), ), + ApplyIf(isFullNode, + If(cfg.Events.EnableActorEventsAPI, + Override(new(full.ActorEventAPI), modules.ActorEventHandler(cfg.Events.EnableActorEventsAPI, cfg.Fevm)), + ), + If(!cfg.Events.EnableActorEventsAPI, + Override(new(full.ActorEventAPI), &full.ActorEventDummy{}), + ), + ), + // enable message index for full node when configured by the user, otherwise use dummy. If(cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.MsgIndex)), If(!cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.DummyMsgIndex)), diff --git a/node/config/def.go b/node/config/def.go index c660fdb600e..475f37517d1 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -118,6 +118,9 @@ func DefaultFullNode() *FullNode { MaxFilterHeightRange: 2880, // conservative limit of one day }, }, + Events: EventsConfig{ + EnableActorEventsAPI: false, + }, } } diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 6cb93a50eed..f28c5abd8ff 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -362,9 +362,8 @@ see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/# Name: "DisableRealTimeFilterAPI", Type: "bool", - Comment: `EnableEthRPC enables APIs that -DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. -The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag.`, + Comment: `DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. +The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, }, { Name: "DisableHistoricFilterAPI", @@ -372,7 +371,7 @@ The API is enabled when EnableEthRPC is true, but can be disabled selectively wi Comment: `DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events that occurred in the past. HistoricFilterAPI maintains a queryable index of events. -The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag.`, +The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, }, { Name: "FilterTTL", @@ -410,6 +409,17 @@ the database must already exist and be writeable. If a relative path is provided relative to the CWD (current working directory).`, }, }, + "EventsConfig": { + { + Name: "EnableActorEventsAPI", + Type: "bool", + + Comment: `EnableActorEventsAPI enables the Actor events API that enables clients to consume events +emitted by (smart contracts + built-in Actors). +This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be +disabled by setting their respective Disable* options in Fevm.Events.`, + }, + }, "FaultReporterConfig": { { Name: "EnableConsensusFaultReporter", @@ -500,6 +510,12 @@ Set to 0 to keep all mappings`, Comment: ``, }, + { + Name: "Events", + Type: "EventsConfig", + + Comment: ``, + }, { Name: "Index", Type: "IndexConfig", @@ -1680,6 +1696,30 @@ Submitting a smaller number of prove commits per epoch would reduce the possibil Comment: `UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB.`, }, + { + Name: "RequireActivationSuccess", + Type: "bool", + + Comment: `Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3).`, + }, + { + Name: "RequireActivationSuccessUpdate", + Type: "bool", + + Comment: `Whether to abort if any piece activation notification returns a non-zero exit code (newly sealed sectors, only with ProveCommitSectors3).`, + }, + { + Name: "RequireNotificationSuccess", + Type: "bool", + + Comment: `Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3).`, + }, + { + Name: "RequireNotificationSuccessUpdate", + Type: "bool", + + Comment: `Whether to abort if any piece activation notification returns a non-zero exit code (updating sectors, only with ProveReplicaUpdates3).`, + }, }, "Splitstore": { { diff --git a/node/config/types.go b/node/config/types.go index 6c281208678..789d24103dc 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -27,6 +27,7 @@ type FullNode struct { Fees FeeConfig Chainstore Chainstore Fevm FevmConfig + Events EventsConfig Index IndexConfig FaultReporter FaultReporterConfig } @@ -588,6 +589,15 @@ type SealingConfig struct { // UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB. UseSyntheticPoRep bool + + // Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3). + RequireActivationSuccess bool + // Whether to abort if any piece activation notification returns a non-zero exit code (newly sealed sectors, only with ProveCommitSectors3). + RequireActivationSuccessUpdate bool + // Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3). + RequireNotificationSuccess bool + // Whether to abort if any piece activation notification returns a non-zero exit code (updating sectors, only with ProveReplicaUpdates3). + RequireNotificationSuccessUpdate bool } type SealerConfig struct { @@ -859,14 +869,13 @@ type FevmConfig struct { } type Events struct { - // EnableEthRPC enables APIs that // DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. - // The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag. + // The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag. DisableRealTimeFilterAPI bool // DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events // that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - // The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag. + // The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag. DisableHistoricFilterAPI bool // FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than @@ -895,6 +904,14 @@ type Events struct { // Set upper bound on index size } +type EventsConfig struct { + // EnableActorEventsAPI enables the Actor events API that enables clients to consume events + // emitted by (smart contracts + built-in Actors). + // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be + // disabled by setting their respective Disable* options in Fevm.Events. + EnableActorEventsAPI bool +} + type IndexConfig struct { // EXPERIMENTAL FEATURE. USE WITH CAUTION // EnableMsgIndex enables indexing of messages on chain. @@ -918,6 +935,7 @@ type HarmonyDB struct { // The port to find Yugabyte. Blank for default. Port string } + type FaultReporterConfig struct { // EnableConsensusFaultReporter controls whether the node will monitor and // report consensus faults. When enabled, the node will watch for malicious diff --git a/node/hello/cbor_gen.go b/node/hello/cbor_gen.go index 78e950f6f6e..91a270ff74c 100644 --- a/node/hello/cbor_gen.go +++ b/node/hello/cbor_gen.go @@ -35,7 +35,7 @@ func (t *HelloMessage) MarshalCBOR(w io.Writer) error { } // t.HeaviestTipSet ([]cid.Cid) (slice) - if len(t.HeaviestTipSet) > cbg.MaxLength { + if len(t.HeaviestTipSet) > 8192 { return xerrors.Errorf("Slice value in field t.HeaviestTipSet was too long") } @@ -105,7 +105,7 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.HeaviestTipSet: array too large (%d)", extra) } @@ -136,16 +136,16 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) (err error) { t.HeaviestTipSet[i] = c } + } } - // t.HeaviestTipSetHeight (abi.ChainEpoch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -223,6 +223,7 @@ func (t *LatencyMessage) MarshalCBOR(w io.Writer) error { return err } } + return nil } @@ -252,10 +253,10 @@ func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) (err error) { // t.TArrival (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -277,10 +278,10 @@ func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) (err error) { // t.TSent (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) diff --git a/node/impl/full.go b/node/impl/full.go index bc555c8c26b..527a5538436 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -35,6 +35,7 @@ type FullNodeAPI struct { full.WalletAPI full.SyncAPI full.EthAPI + full.ActorEventsAPI DS dtypes.MetadataDS NetworkName dtypes.NetworkName diff --git a/node/impl/full/actor_events.go b/node/impl/full/actor_events.go new file mode 100644 index 00000000000..fecd1d2b6ad --- /dev/null +++ b/node/impl/full/actor_events.go @@ -0,0 +1,376 @@ +package full + +import ( + "context" + "fmt" + "time" + + "github.com/ipfs/go-cid" + "github.com/raulk/clock" + "go.uber.org/fx" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/types" +) + +type ActorEventAPI interface { + GetActorEvents(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) + SubscribeActorEvents(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) +} + +var ( + _ ActorEventAPI = *new(api.FullNode) + _ ActorEventAPI = *new(api.Gateway) +) + +type ChainAccessor interface { + GetHeaviestTipSet() *types.TipSet +} + +type EventFilterManager interface { + Install( + ctx context.Context, + minHeight, maxHeight abi.ChainEpoch, + tipsetCid cid.Cid, + addresses []address.Address, + keysWithCodec map[string][]types.ActorEventBlock, + excludeReverted bool, + ) (filter.EventFilter, error) + Remove(ctx context.Context, id types.FilterID) error +} + +type ActorEventsAPI struct { + fx.In + ActorEventAPI +} + +type ActorEventHandler struct { + chain ChainAccessor + eventFilterManager EventFilterManager + blockDelay time.Duration + maxFilterHeightRange abi.ChainEpoch + clock clock.Clock +} + +var _ ActorEventAPI = (*ActorEventHandler)(nil) + +func NewActorEventHandler( + chain ChainAccessor, + eventFilterManager EventFilterManager, + blockDelay time.Duration, + maxFilterHeightRange abi.ChainEpoch, +) *ActorEventHandler { + return &ActorEventHandler{ + chain: chain, + eventFilterManager: eventFilterManager, + blockDelay: blockDelay, + maxFilterHeightRange: maxFilterHeightRange, + clock: clock.New(), + } +} + +func NewActorEventHandlerWithClock( + chain ChainAccessor, + eventFilterManager EventFilterManager, + blockDelay time.Duration, + maxFilterHeightRange abi.ChainEpoch, + clock clock.Clock, +) *ActorEventHandler { + return &ActorEventHandler{ + chain: chain, + eventFilterManager: eventFilterManager, + blockDelay: blockDelay, + maxFilterHeightRange: maxFilterHeightRange, + clock: clock, + } +} + +func (a *ActorEventHandler) GetActorEvents(ctx context.Context, evtFilter *types.ActorEventFilter) ([]*types.ActorEvent, error) { + if a.eventFilterManager == nil { + return nil, api.ErrNotSupported + } + + if evtFilter == nil { + evtFilter = &types.ActorEventFilter{} + } + params, err := a.parseFilter(*evtFilter) + if err != nil { + return nil, err + } + + // Install a filter just for this call, collect events, remove the filter + tipSetCid, err := params.GetTipSetCid() + if err != nil { + return nil, fmt.Errorf("failed to get tipset cid: %w", err) + } + f, err := a.eventFilterManager.Install(ctx, params.MinHeight, params.MaxHeight, tipSetCid, evtFilter.Addresses, evtFilter.Fields, false) + if err != nil { + return nil, err + } + defer func() { + // Remove the temporary filter regardless of the original context. + if err := a.eventFilterManager.Remove(context.Background(), f.ID()); err != nil { + log.Warnf("failed to remove filter: %s", err) + } + }() + return getCollected(ctx, f), nil +} + +type filterParams struct { + MinHeight abi.ChainEpoch + MaxHeight abi.ChainEpoch + TipSetKey types.TipSetKey +} + +func (fp filterParams) GetTipSetCid() (cid.Cid, error) { + if fp.TipSetKey.IsEmpty() { + return cid.Undef, nil + } + return fp.TipSetKey.Cid() +} + +func (a *ActorEventHandler) parseFilter(f types.ActorEventFilter) (*filterParams, error) { + if f.TipSetKey != nil && !f.TipSetKey.IsEmpty() { + if f.FromHeight != nil || f.ToHeight != nil { + return nil, fmt.Errorf("cannot specify both TipSetKey and FromHeight/ToHeight") + } + + return &filterParams{ + MinHeight: 0, + MaxHeight: 0, + TipSetKey: *f.TipSetKey, + }, nil + } + + min, max, err := parseHeightRange(a.chain.GetHeaviestTipSet().Height(), f.FromHeight, f.ToHeight, a.maxFilterHeightRange) + if err != nil { + return nil, err + } + + return &filterParams{ + MinHeight: min, + MaxHeight: max, + TipSetKey: types.EmptyTSK, + }, nil +} + +// parseHeightRange is similar to eth's parseBlockRange but with slightly different semantics but +// results in equivalent values that we can plug in to the EventFilterManager. +// +// * Uses "height", allowing for nillable values rather than strings +// * No "latest" and "earliest", those are now represented by nil on the way in and -1 on the way out +// * No option for hex representation +func parseHeightRange(heaviest abi.ChainEpoch, fromHeight, toHeight *abi.ChainEpoch, maxRange abi.ChainEpoch) (minHeight abi.ChainEpoch, maxHeight abi.ChainEpoch, err error) { + if fromHeight != nil && *fromHeight < 0 { + return 0, 0, fmt.Errorf("range 'from' must be greater than or equal to 0") + } + if fromHeight == nil { + minHeight = -1 + } else { + minHeight = *fromHeight + } + if toHeight == nil { + maxHeight = -1 + } else { + maxHeight = *toHeight + } + + // Validate height ranges are within limits set by node operator + if minHeight == -1 && maxHeight > 0 { + // Here the client is looking for events between the head and some future height + if maxHeight-heaviest > maxRange { + return 0, 0, fmt.Errorf("invalid epoch range: 'to' height is too far in the future (maximum: %d)", maxRange) + } + } else if minHeight >= 0 && maxHeight == -1 { + // Here the client is looking for events between some time in the past and the current head + if heaviest-minHeight > maxRange { + return 0, 0, fmt.Errorf("invalid epoch range: 'from' height is too far in the past (maximum: %d)", maxRange) + } + } else if minHeight >= 0 && maxHeight >= 0 { + if minHeight > maxHeight { + return 0, 0, fmt.Errorf("invalid epoch range: 'to' height (%d) must be after 'from' height (%d)", minHeight, maxHeight) + } else if maxHeight-minHeight > maxRange { + return 0, 0, fmt.Errorf("invalid epoch range: range between to and 'from' heights is too large (maximum: %d)", maxRange) + } + } + return minHeight, maxHeight, nil +} + +func (a *ActorEventHandler) SubscribeActorEvents(ctx context.Context, evtFilter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + if a.eventFilterManager == nil { + return nil, api.ErrNotSupported + } + + if evtFilter == nil { + evtFilter = &types.ActorEventFilter{} + } + params, err := a.parseFilter(*evtFilter) + if err != nil { + return nil, err + } + + tipSetCid, err := params.GetTipSetCid() + if err != nil { + return nil, fmt.Errorf("failed to get tipset cid: %w", err) + } + fm, err := a.eventFilterManager.Install(ctx, params.MinHeight, params.MaxHeight, tipSetCid, evtFilter.Addresses, evtFilter.Fields, false) + if err != nil { + return nil, err + } + + // The goal for the code below is to send events on the `out` channel as fast as possible and not + // let it get too far behind the rate at which the events are generated. + // For historical events, we aim to send all events within a single block's time (30s on mainnet). + // This ensures that the client can catch up quickly enough to start receiving new events. + // For ongoing events, we also aim to send all events within a single block's time, so we never + // want to be buffering events (approximately) more than one epoch behind the current head. + // It's approximate because we only update our notion of "current epoch" once per ~blocktime. + + out := make(chan *types.ActorEvent) + + // When we start sending real-time events, we want to make sure that we don't fall behind more + // than one epoch's worth of events (approximately). Capture this value now, before we send + // historical events to allow for a little bit of slack in the historical event sending. + minBacklogHeight := a.chain.GetHeaviestTipSet().Height() - 1 + + go func() { + defer func() { + // tell the caller we're done + close(out) + fm.ClearSubChannel() + if err := a.eventFilterManager.Remove(ctx, fm.ID()); err != nil { + log.Warnf("failed to remove filter: %s", err) + } + }() + + // Handle any historical events that our filter may have picked up ----------------------------- + + evs := getCollected(ctx, fm) + if len(evs) > 0 { + // ensure we get all events out on the channel within one block's time (30s on mainnet) + timer := a.clock.Timer(a.blockDelay) + for _, ev := range evs { + select { + case out <- ev: + case <-timer.C: + log.Errorf("closing event subscription due to slow event sending rate") + timer.Stop() + return + case <-ctx.Done(): + timer.Stop() + return + } + } + timer.Stop() + } + + // for the case where we have a MaxHeight set, we don't get a signal from the filter when we + // reach that height, so we need to check it ourselves, do it now but also in the loop + if params.MaxHeight > 0 && minBacklogHeight+1 >= params.MaxHeight { + return + } + + // Handle ongoing events from the filter ------------------------------------------------------- + + in := make(chan interface{}, 256) + fm.SetSubChannel(in) + + var buffer []*types.ActorEvent + nextBacklogHeightUpdate := a.clock.Now().Add(a.blockDelay) + + collectEvent := func(ev interface{}) bool { + ce, ok := ev.(*filter.CollectedEvent) + if !ok { + log.Errorf("got unexpected value from event filter: %T", ev) + return false + } + + if ce.Height < minBacklogHeight { + // since we mostly care about buffer size, we only trigger a too-slow close when the buffer + // increases, i.e. we collect a new event + log.Errorf("closing event subscription due to slow event sending rate") + return false + } + + buffer = append(buffer, &types.ActorEvent{ + Entries: ce.Entries, + Emitter: ce.EmitterAddr, + Reverted: ce.Reverted, + Height: ce.Height, + TipSetKey: ce.TipSetKey, + MsgCid: ce.MsgCid, + }) + return true + } + + ticker := a.clock.Ticker(a.blockDelay) + defer ticker.Stop() + + for ctx.Err() == nil { + if len(buffer) > 0 { + select { + case ev, ok := <-in: // incoming event + if !ok || !collectEvent(ev) { + return + } + case out <- buffer[0]: // successful send + buffer[0] = nil + buffer = buffer[1:] + case <-ticker.C: + // check that our backlog isn't too big by looking at the oldest event + if buffer[0].Height < minBacklogHeight { + log.Errorf("closing event subscription due to slow event sending rate") + return + } + case <-ctx.Done(): + return + } + } else { + select { + case ev, ok := <-in: // incoming event + if !ok || !collectEvent(ev) { + return + } + case <-ctx.Done(): + return + case <-ticker.C: + currentHeight := a.chain.GetHeaviestTipSet().Height() + if params.MaxHeight > 0 && currentHeight > params.MaxHeight { + // we've reached the filter's MaxHeight, we're done so we can close the channel + return + } + } + } + + if a.clock.Now().After(nextBacklogHeightUpdate) { + minBacklogHeight = a.chain.GetHeaviestTipSet().Height() - 1 + nextBacklogHeightUpdate = a.clock.Now().Add(a.blockDelay) + } + } + }() + + return out, nil +} + +func getCollected(ctx context.Context, f filter.EventFilter) []*types.ActorEvent { + ces := f.TakeCollectedEvents(ctx) + + var out []*types.ActorEvent + + for _, e := range ces { + out = append(out, &types.ActorEvent{ + Entries: e.Entries, + Emitter: e.EmitterAddr, + Reverted: e.Reverted, + Height: e.Height, + TipSetKey: e.TipSetKey, + MsgCid: e.MsgCid, + }) + } + + return out +} diff --git a/node/impl/full/actor_events_test.go b/node/impl/full/actor_events_test.go new file mode 100644 index 00000000000..ab446e57b4a --- /dev/null +++ b/node/impl/full/actor_events_test.go @@ -0,0 +1,780 @@ +package full + +import ( + "context" + "fmt" + pseudo "math/rand" + "sync" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "github.com/raulk/clock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/types" +) + +var testCid = cid.MustParse("bafyreicmaj5hhoy5mgqvamfhgexxyergw7hdeshizghodwkjg6qmpoco7i") + +func TestParseHeightRange(t *testing.T) { + testCases := []struct { + name string + heaviest abi.ChainEpoch + from *abi.ChainEpoch + to *abi.ChainEpoch + maxRange abi.ChainEpoch + minOut abi.ChainEpoch + maxOut abi.ChainEpoch + errStr string + }{ + { + name: "fails when both are specified and range is greater than max allowed range", + heaviest: 100, + from: epochPtr(256), + to: epochPtr(512), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "too large", + }, + { + name: "fails when min is specified and range is greater than max allowed range", + heaviest: 500, + from: epochPtr(16), + to: nil, + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "'from' height is too far in the past", + }, + { + name: "fails when max is specified and range is greater than max allowed range", + heaviest: 500, + from: nil, + to: epochPtr(65536), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "'to' height is too far in the future", + }, + { + name: "fails when from is greater than to", + heaviest: 100, + from: epochPtr(512), + to: epochPtr(256), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "must be after", + }, + { + name: "works when range is valid (nil from)", + heaviest: 500, + from: nil, + to: epochPtr(48), + maxRange: 1000, + minOut: -1, + maxOut: 48, + }, + { + name: "works when range is valid (nil to)", + heaviest: 500, + from: epochPtr(0), + to: nil, + maxRange: 1000, + minOut: 0, + maxOut: -1, + }, + { + name: "works when range is valid (nil from and to)", + heaviest: 500, + from: nil, + to: nil, + maxRange: 1000, + minOut: -1, + maxOut: -1, + }, + { + name: "works when range is valid and specified", + heaviest: 500, + from: epochPtr(16), + to: epochPtr(48), + maxRange: 1000, + minOut: 16, + maxOut: 48, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + req := require.New(t) + min, max, err := parseHeightRange(tc.heaviest, tc.from, tc.to, tc.maxRange) + req.Equal(tc.minOut, min) + req.Equal(tc.maxOut, max) + if tc.errStr != "" { + t.Log(err) + req.Error(err) + req.Contains(err.Error(), tc.errStr) + } else { + req.NoError(err) + } + }) + } +} + +func TestGetActorEvents(t *testing.T) { + ctx := context.Background() + req := require.New(t) + + const ( + seed = 984651320 + maxFilterHeightRange = 100 + ) + + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + + minerAddr, err := address.NewIDAddress(uint64(rng.Int63())) + req.NoError(err) + + testCases := []struct { + name string + filter *types.ActorEventFilter + currentHeight int64 + installMinHeight int64 + installMaxHeight int64 + installTipSetKey cid.Cid + installAddresses []address.Address + installKeysWithCodec map[string][]types.ActorEventBlock + installExcludeReverted bool + expectErr string + }{ + { + name: "nil filter", + filter: nil, + installMinHeight: -1, + installMaxHeight: -1, + }, + { + name: "empty filter", + filter: &types.ActorEventFilter{}, + installMinHeight: -1, + installMaxHeight: -1, + }, + { + name: "basic height range filter", + filter: &types.ActorEventFilter{ + FromHeight: epochPtr(0), + ToHeight: epochPtr(maxFilterHeightRange), + }, + installMinHeight: 0, + installMaxHeight: maxFilterHeightRange, + }, + { + name: "from, no to height", + filter: &types.ActorEventFilter{ + FromHeight: epochPtr(0), + }, + currentHeight: maxFilterHeightRange - 1, + installMinHeight: 0, + installMaxHeight: -1, + }, + { + name: "to, no from height", + filter: &types.ActorEventFilter{ + ToHeight: epochPtr(maxFilterHeightRange - 1), + }, + installMinHeight: -1, + installMaxHeight: maxFilterHeightRange - 1, + }, + { + name: "from, no to height, too far", + filter: &types.ActorEventFilter{ + FromHeight: epochPtr(0), + }, + currentHeight: maxFilterHeightRange + 1, + expectErr: "invalid epoch range: 'from' height is too far in the past", + }, + { + name: "to, no from height, too far", + filter: &types.ActorEventFilter{ + ToHeight: epochPtr(maxFilterHeightRange + 1), + }, + currentHeight: 0, + expectErr: "invalid epoch range: 'to' height is too far in the future", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + efm := newMockEventFilterManager(t) + collectedEvents := makeCollectedEvents(t, rng, 0, 1, 10) + filter := newMockFilter(ctx, t, rng, collectedEvents) + + if tc.expectErr == "" { + efm.expectInstall(abi.ChainEpoch(tc.installMinHeight), abi.ChainEpoch(tc.installMaxHeight), tc.installTipSetKey, tc.installAddresses, tc.installKeysWithCodec, tc.installExcludeReverted, filter) + } + + ts, err := types.NewTipSet([]*types.BlockHeader{newBlockHeader(minerAddr, tc.currentHeight)}) + req.NoError(err) + chain := newMockChainAccessor(t, ts) + + handler := NewActorEventHandler(chain, efm, 50*time.Millisecond, maxFilterHeightRange) + + gotEvents, err := handler.GetActorEvents(ctx, tc.filter) + if tc.expectErr != "" { + req.Error(err) + req.Contains(err.Error(), tc.expectErr) + } else { + req.NoError(err) + expectedEvents := collectedToActorEvents(collectedEvents) + req.Equal(expectedEvents, gotEvents) + efm.requireRemoved(filter.ID()) + } + }) + } +} + +func TestSubscribeActorEvents(t *testing.T) { + const ( + seed = 984651320 + maxFilterHeightRange = 100 + blockDelay = 30 * time.Second + filterStartHeight = 0 + currentHeight = 10 + finishHeight = 20 + eventsPerEpoch = 2 + ) + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + mockClock := clock.NewMock() + + minerAddr, err := address.NewIDAddress(uint64(rng.Int63())) + require.NoError(t, err) + + for _, tc := range []struct { + name string + receiveSpeed time.Duration // how fast will we receive all events _per epoch_ + expectComplete bool // do we expect this to succeed? + endEpoch int // -1 for no end + }{ + {"fast", 0, true, -1}, + {"fast with end", 0, true, finishHeight}, + {"half block speed", blockDelay / 2, true, -1}, + {"half block speed with end", blockDelay / 2, true, finishHeight}, + // testing exactly blockDelay is a border case and will be flaky + {"1.5 block speed", blockDelay * 3 / 2, false, -1}, + {"twice block speed", blockDelay * 2, false, -1}, + } { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + tc := tc + t.Run(tc.name, func(t *testing.T) { + req := require.New(t) + + mockClock.Set(time.Now()) + mockFilterManager := newMockEventFilterManager(t) + allEvents := makeCollectedEvents(t, rng, filterStartHeight, eventsPerEpoch, finishHeight) + historicalEvents := allEvents[0 : (currentHeight-filterStartHeight)*eventsPerEpoch] + mockFilter := newMockFilter(ctx, t, rng, historicalEvents) + mockFilterManager.expectInstall(abi.ChainEpoch(0), abi.ChainEpoch(tc.endEpoch), cid.Undef, nil, nil, false, mockFilter) + + ts, err := types.NewTipSet([]*types.BlockHeader{newBlockHeader(minerAddr, currentHeight)}) + req.NoError(err) + mockChain := newMockChainAccessor(t, ts) + + handler := NewActorEventHandlerWithClock(mockChain, mockFilterManager, blockDelay, maxFilterHeightRange, mockClock) + + aef := &types.ActorEventFilter{FromHeight: epochPtr(0)} + if tc.endEpoch >= 0 { + aef.ToHeight = epochPtr(tc.endEpoch) + } + eventChan, err := handler.SubscribeActorEvents(ctx, aef) + req.NoError(err) + + // assume we can cleanly pick up all historical events in one go + var gotEvents []*types.ActorEvent + for len(gotEvents) < len(historicalEvents) && ctx.Err() == nil { + select { + case e, ok := <-eventChan: + req.True(ok) + gotEvents = append(gotEvents, e) + case <-ctx.Done(): + t.Fatalf("timed out waiting for event") + } + } + req.Equal(collectedToActorEvents(historicalEvents), gotEvents) + + mockClock.Add(blockDelay) + nextReceiveTime := mockClock.Now() + + // Ticker to simulate both time and the chain advancing, including emitting events at + // the right time directly to the filter. + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for thisHeight := int64(currentHeight); ctx.Err() == nil; thisHeight++ { + ts, err := types.NewTipSet([]*types.BlockHeader{newBlockHeader(minerAddr, thisHeight)}) + req.NoError(err) + mockChain.setHeaviestTipSet(ts) + + var eventsThisEpoch []*filter.CollectedEvent + if thisHeight <= finishHeight { + eventsThisEpoch = allEvents[(thisHeight-filterStartHeight)*eventsPerEpoch : (thisHeight-filterStartHeight+2)*eventsPerEpoch] + } + for i := 0; i < eventsPerEpoch && ctx.Err() == nil; i++ { + if len(eventsThisEpoch) > 0 { + mockFilter.sendEventToChannel(eventsThisEpoch[0]) + eventsThisEpoch = eventsThisEpoch[1:] + } + select { + case <-time.After(2 * time.Millisecond): // allow everyone to catch a breath + mockClock.Add(blockDelay / eventsPerEpoch) + case <-ctx.Done(): + return + } + } + + if thisHeight == finishHeight+1 && tc.expectComplete && tc.endEpoch < 0 && ctx.Err() == nil { + // at finish+1, for the case where we expect clean completion and there is no ToEpoch + // set on the filter, if we send one more event at the next height so we end up with + // something uncollected in the buffer, causing a disconnect + evt := makeCollectedEvents(t, rng, finishHeight+1, 1, finishHeight+1)[0] + mockFilter.sendEventToChannel(evt) + } // else if endEpoch is set, we expect the chain advance to force closure + } + }() + + // Client collecting events off the channel + + var prematureEnd bool + for thisHeight := int64(currentHeight); thisHeight <= finishHeight && !prematureEnd && ctx.Err() == nil; thisHeight++ { + // delay to simulate latency + select { + case <-mockClock.After(nextReceiveTime.Sub(mockClock.Now())): + case <-ctx.Done(): + t.Fatalf("timed out simulating receive delay") + } + + // collect eventsPerEpoch more events + var newEvents []*types.ActorEvent + for len(newEvents) < eventsPerEpoch && !prematureEnd && ctx.Err() == nil { + select { + case e, ok := <-eventChan: // receive the events from the subscription + if ok { + newEvents = append(newEvents, e) + } else { + prematureEnd = true + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for event") + } + nextReceiveTime = nextReceiveTime.Add(tc.receiveSpeed) + } + + if tc.expectComplete || !prematureEnd { + // sanity check that we got what we expected this epoch + req.Len(newEvents, eventsPerEpoch) + epochEvents := allEvents[(thisHeight)*eventsPerEpoch : (thisHeight+1)*eventsPerEpoch] + req.Equal(collectedToActorEvents(epochEvents), newEvents) + gotEvents = append(gotEvents, newEvents...) + } + } + + req.Equal(tc.expectComplete, !prematureEnd, "expected to complete") + if tc.expectComplete { + req.Len(gotEvents, len(allEvents)) + req.Equal(collectedToActorEvents(allEvents), gotEvents) + } else { + req.NotEqual(len(gotEvents), len(allEvents)) + } + + // cleanup + mockFilter.requireClearSubChannelCalledEventually(500 * time.Millisecond) + mockFilterManager.requireRemovedEventually(mockFilter.ID(), 500*time.Millisecond) + cancel() + wg.Wait() // wait for the chain to stop advancing + }) + } +} + +func TestSubscribeActorEvents_OnlyHistorical(t *testing.T) { + // Similar to TestSubscribeActorEvents but we set an explicit end that caps out at the current height + const ( + seed = 984651320 + maxFilterHeightRange = 100 + blockDelay = 30 * time.Second + filterStartHeight = 0 + currentHeight = 10 + eventsPerEpoch = 2 + ) + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + mockClock := clock.NewMock() + + minerAddr, err := address.NewIDAddress(uint64(rng.Int63())) + require.NoError(t, err) + + for _, tc := range []struct { + name string + blockTimeToComplete float64 // fraction of a block time that it takes to receive all events + expectComplete bool // do we expect this to succeed? + }{ + {"fast", 0, true}, + {"half block speed", 0.5, true}, + {"1.5 block speed", 1.5, false}, + {"twice block speed", 2, false}, + } { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + tc := tc + t.Run(tc.name, func(t *testing.T) { + req := require.New(t) + + mockClock.Set(time.Now()) + mockFilterManager := newMockEventFilterManager(t) + allEvents := makeCollectedEvents(t, rng, filterStartHeight, eventsPerEpoch, currentHeight) + mockFilter := newMockFilter(ctx, t, rng, allEvents) + mockFilterManager.expectInstall(abi.ChainEpoch(0), abi.ChainEpoch(currentHeight), cid.Undef, nil, nil, false, mockFilter) + + ts, err := types.NewTipSet([]*types.BlockHeader{newBlockHeader(minerAddr, currentHeight)}) + req.NoError(err) + mockChain := newMockChainAccessor(t, ts) + + handler := NewActorEventHandlerWithClock(mockChain, mockFilterManager, blockDelay, maxFilterHeightRange, mockClock) + + aef := &types.ActorEventFilter{FromHeight: epochPtr(0), ToHeight: epochPtr(currentHeight)} + eventChan, err := handler.SubscribeActorEvents(ctx, aef) + req.NoError(err) + + var gotEvents []*types.ActorEvent + + // assume we can cleanly pick up all historical events in one go + receiveLoop: + for ctx.Err() == nil { + select { + case e, ok := <-eventChan: + if ok { + gotEvents = append(gotEvents, e) + mockClock.Add(time.Duration(float64(blockDelay) * tc.blockTimeToComplete / float64(len(allEvents)))) + // no need to advance the chain, we're also testing that's not necessary + time.Sleep(2 * time.Millisecond) // catch a breath + } else { + break receiveLoop + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for event, got %d/%d events", len(gotEvents), len(allEvents)) + } + } + if tc.expectComplete { + req.Equal(collectedToActorEvents(allEvents), gotEvents) + } else { + req.NotEqual(len(gotEvents), len(allEvents)) + } + // advance the chain and observe cleanup + ts, err = types.NewTipSet([]*types.BlockHeader{newBlockHeader(minerAddr, currentHeight+1)}) + req.NoError(err) + mockChain.setHeaviestTipSet(ts) + mockClock.Add(blockDelay) + mockFilterManager.requireRemovedEventually(mockFilter.ID(), 1*time.Second) + }) + } +} + +var ( + _ ChainAccessor = (*mockChainAccessor)(nil) + _ filter.EventFilter = (*mockFilter)(nil) + _ EventFilterManager = (*mockEventFilterManager)(nil) +) + +type mockChainAccessor struct { + t *testing.T + ts *types.TipSet + lk sync.Mutex +} + +func newMockChainAccessor(t *testing.T, ts *types.TipSet) *mockChainAccessor { + return &mockChainAccessor{t: t, ts: ts} +} + +func (m *mockChainAccessor) setHeaviestTipSet(ts *types.TipSet) { + m.lk.Lock() + defer m.lk.Unlock() + m.ts = ts +} + +func (m *mockChainAccessor) GetHeaviestTipSet() *types.TipSet { + m.lk.Lock() + defer m.lk.Unlock() + return m.ts +} + +type mockFilter struct { + t *testing.T + ctx context.Context + id types.FilterID + lastTaken time.Time + ch chan<- interface{} + historicalEvents []*filter.CollectedEvent + subChannelCalls int + clearSubChannelCalls int + lk sync.Mutex +} + +func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historicalEvents []*filter.CollectedEvent) *mockFilter { + t.Helper() + var id [32]byte + _, err := rng.Read(id[:]) + require.NoError(t, err) + return &mockFilter{ + t: t, + ctx: ctx, + id: id, + historicalEvents: historicalEvents, + } +} + +func (m *mockFilter) sendEventToChannel(e *filter.CollectedEvent) { + m.lk.Lock() + defer m.lk.Unlock() + if m.ch != nil { + select { + case m.ch <- e: + case <-m.ctx.Done(): + } + } +} + +func (m *mockFilter) requireClearSubChannelCalledEventually(timeout time.Duration) { + m.t.Helper() + require.Eventually(m.t, + func() bool { + m.lk.Lock() + c := m.clearSubChannelCalls + m.lk.Unlock() + switch c { + case 0: + return false + case 1: + return true + default: + m.t.Fatalf("ClearSubChannel called more than once: %d", c) + return false + } + }, timeout, 10*time.Millisecond, "ClearSubChannel is not called exactly once") +} + +func (m *mockFilter) ID() types.FilterID { + return m.id +} + +func (m *mockFilter) LastTaken() time.Time { + return m.lastTaken +} + +func (m *mockFilter) SetSubChannel(ch chan<- interface{}) { + m.t.Helper() + m.lk.Lock() + defer m.lk.Unlock() + m.subChannelCalls++ + m.ch = ch +} + +func (m *mockFilter) ClearSubChannel() { + m.t.Helper() + m.lk.Lock() + defer m.lk.Unlock() + m.clearSubChannelCalls++ + m.ch = nil +} + +func (m *mockFilter) TakeCollectedEvents(context.Context) []*filter.CollectedEvent { + e := m.historicalEvents + m.historicalEvents = nil + m.lastTaken = time.Now() + return e +} + +func (m *mockFilter) CollectEvents(context.Context, *filter.TipSetEvents, bool, filter.AddressResolver) error { + m.t.Fatalf("unexpected call to CollectEvents") + return nil +} + +type filterManagerExpectation struct { + minHeight, maxHeight abi.ChainEpoch + tipsetCid cid.Cid + addresses []address.Address + keysWithCodec map[string][]types.ActorEventBlock + excludeReverted bool + returnFilter filter.EventFilter +} + +type mockEventFilterManager struct { + t *testing.T + expectations []filterManagerExpectation + removed []types.FilterID + lk sync.Mutex +} + +func newMockEventFilterManager(t *testing.T) *mockEventFilterManager { + return &mockEventFilterManager{t: t} +} + +func (m *mockEventFilterManager) expectInstall( + minHeight, maxHeight abi.ChainEpoch, + tipsetCid cid.Cid, + addresses []address.Address, + keysWithCodec map[string][]types.ActorEventBlock, + excludeReverted bool, + returnFilter filter.EventFilter) { + + m.t.Helper() + m.expectations = append(m.expectations, filterManagerExpectation{ + minHeight: minHeight, + maxHeight: maxHeight, + tipsetCid: tipsetCid, + addresses: addresses, + keysWithCodec: keysWithCodec, + excludeReverted: excludeReverted, + returnFilter: returnFilter, + }) +} + +func (m *mockEventFilterManager) requireRemoved(id types.FilterID) { + m.t.Helper() + m.lk.Lock() + defer m.lk.Unlock() + require.Contains(m.t, m.removed, id) +} + +func (m *mockEventFilterManager) requireRemovedEventually(id types.FilterID, timeout time.Duration) { + m.t.Helper() + require.Eventuallyf(m.t, func() bool { + m.lk.Lock() + defer m.lk.Unlock() + if len(m.removed) == 0 { + return false + } + assert.Contains(m.t, m.removed, id) + return true + }, timeout, 10*time.Millisecond, "filter %x not removed", id) +} + +func (m *mockEventFilterManager) Install( + _ context.Context, + minHeight, maxHeight abi.ChainEpoch, + tipsetCid cid.Cid, + addresses []address.Address, + keysWithCodec map[string][]types.ActorEventBlock, + excludeReverted bool, +) (filter.EventFilter, error) { + + require.True(m.t, len(m.expectations) > 0, "unexpected call to Install") + exp := m.expectations[0] + m.expectations = m.expectations[1:] + // check the expectation matches the call then return the attached filter + require.Equal(m.t, exp.minHeight, minHeight) + require.Equal(m.t, exp.maxHeight, maxHeight) + require.Equal(m.t, exp.tipsetCid, tipsetCid) + require.Equal(m.t, exp.addresses, addresses) + require.Equal(m.t, exp.keysWithCodec, keysWithCodec) + require.Equal(m.t, exp.excludeReverted, excludeReverted) + return exp.returnFilter, nil +} + +func (m *mockEventFilterManager) Remove(_ context.Context, id types.FilterID) error { + m.lk.Lock() + defer m.lk.Unlock() + m.removed = append(m.removed, id) + return nil +} + +func newBlockHeader(minerAddr address.Address, height int64) *types.BlockHeader { + return &types.BlockHeader{ + Miner: minerAddr, + Ticket: &types.Ticket{ + VRFProof: []byte("vrf proof0000000vrf proof0000000"), + }, + ElectionProof: &types.ElectionProof{ + VRFProof: []byte("vrf proof0000000vrf proof0000000"), + }, + Parents: []cid.Cid{testCid, testCid}, + ParentMessageReceipts: testCid, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("sign me up")}, + ParentWeight: types.NewInt(123125126212), + Messages: testCid, + Height: abi.ChainEpoch(height), + ParentStateRoot: testCid, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("sign me up")}, + ParentBaseFee: types.NewInt(3432432843291), + } +} + +func epochPtr(i int) *abi.ChainEpoch { + e := abi.ChainEpoch(i) + return &e +} + +func collectedToActorEvents(collected []*filter.CollectedEvent) []*types.ActorEvent { + var out []*types.ActorEvent + for _, c := range collected { + out = append(out, &types.ActorEvent{ + Entries: c.Entries, + Emitter: c.EmitterAddr, + Reverted: c.Reverted, + Height: c.Height, + TipSetKey: c.TipSetKey, + MsgCid: c.MsgCid, + }) + } + return out +} + +func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, eventsPerHeight, eventEndHeight int64) []*filter.CollectedEvent { + var out []*filter.CollectedEvent + for h := eventStartHeight; h <= eventEndHeight; h++ { + for i := int64(0); i < eventsPerHeight; i++ { + out = append(out, makeCollectedEvent(t, rng, types.NewTipSetKey(mkCid(t, fmt.Sprintf("h=%d", h))), abi.ChainEpoch(h))) + } + } + return out +} + +func makeCollectedEvent(t *testing.T, rng *pseudo.Rand, tsKey types.TipSetKey, height abi.ChainEpoch) *filter.CollectedEvent { + addr, err := address.NewIDAddress(uint64(rng.Int63())) + require.NoError(t, err) + + return &filter.CollectedEvent{ + Entries: []types.EventEntry{ + {Flags: 0x01, Key: "k1", Codec: cid.Raw, Value: []byte("v1")}, + {Flags: 0x01, Key: "k2", Codec: cid.Raw, Value: []byte("v2")}, + }, + EmitterAddr: addr, + EventIdx: 0, + Reverted: false, + Height: height, + TipSetKey: tsKey, + MsgIdx: 0, + MsgCid: testCid, + } +} + +func mkCid(t *testing.T, s string) cid.Cid { + h, err := multihash.Sum([]byte(s), multihash.SHA2_256, -1) + require.NoError(t, err) + return cid.NewCidV1(cid.Raw, h) +} diff --git a/node/impl/full/dummy.go b/node/impl/full/dummy.go index 11ff95a632c..abe52dec6b8 100644 --- a/node/impl/full/dummy.go +++ b/node/impl/full/dummy.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" ) @@ -188,3 +189,17 @@ func (e *EthModuleDummy) EthTraceReplayBlockTransactions(ctx context.Context, bl var _ EthModuleAPI = &EthModuleDummy{} var _ EthEventAPI = &EthModuleDummy{} + +var ErrActorEventModuleDisabled = errors.New("module disabled, enable with Events.EnableActorEventsAPI") + +type ActorEventDummy struct{} + +func (a *ActorEventDummy) GetActorEvents(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) { + return nil, ErrActorEventModuleDisabled +} + +func (a *ActorEventDummy) SubscribeActorEvents(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + return nil, ErrActorEventModuleDisabled +} + +var _ ActorEventAPI = &ActorEventDummy{} diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index 031a8360561..e7aeafa9085 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -12,6 +12,7 @@ import ( "time" "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" cbg "github.com/whyrusleeping/cbor-gen" "go.uber.org/fx" "golang.org/x/xerrors" @@ -136,7 +137,7 @@ type EthModule struct { var _ EthModuleAPI = (*EthModule)(nil) -type EthEvent struct { +type EthEventHandler struct { Chain *store.ChainStore EventFilterManager *filter.EventFilterManager TipSetFilterManager *filter.TipSetFilterManager @@ -147,7 +148,7 @@ type EthEvent struct { SubscribtionCtx context.Context } -var _ EthEventAPI = (*EthEvent)(nil) +var _ EthEventAPI = (*EthEventHandler)(nil) type EthAPI struct { fx.In @@ -1203,7 +1204,7 @@ func (a *EthModule) EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam e return ethtypes.EthBytes{}, nil } -func (e *EthEvent) EthGetLogs(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) { +func (e *EthEventHandler) EthGetLogs(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) { if e.EventFilterManager == nil { return nil, api.ErrNotSupported } @@ -1220,7 +1221,7 @@ func (e *EthEvent) EthGetLogs(ctx context.Context, filterSpec *ethtypes.EthFilte return ethFilterResultFromEvents(ctx, ces, e.SubManager.StateAPI) } -func (e *EthEvent) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { +func (e *EthEventHandler) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { if e.FilterStore == nil { return nil, api.ErrNotSupported } @@ -1242,7 +1243,7 @@ func (e *EthEvent) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilte return nil, xerrors.Errorf("unknown filter type") } -func (e *EthEvent) EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { +func (e *EthEventHandler) EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { if e.FilterStore == nil { return nil, api.ErrNotSupported } @@ -1260,7 +1261,65 @@ func (e *EthEvent) EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID return nil, xerrors.Errorf("wrong filter type") } -func (e *EthEvent) installEthFilterSpec(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (*filter.EventFilter, error) { +// parseBlockRange is similar to actor event's parseHeightRange but with slightly different semantics +// +// * "block" instead of "height" +// * strings that can have "latest" and "earliest" and nil +// * hex strings for actual heights +func parseBlockRange(heaviest abi.ChainEpoch, fromBlock, toBlock *string, maxRange abi.ChainEpoch) (minHeight abi.ChainEpoch, maxHeight abi.ChainEpoch, err error) { + if fromBlock == nil || *fromBlock == "latest" || len(*fromBlock) == 0 { + minHeight = heaviest + } else if *fromBlock == "earliest" { + minHeight = 0 + } else { + if !strings.HasPrefix(*fromBlock, "0x") { + return 0, 0, xerrors.Errorf("FromBlock is not a hex") + } + epoch, err := ethtypes.EthUint64FromHex(*fromBlock) + if err != nil { + return 0, 0, xerrors.Errorf("invalid epoch") + } + minHeight = abi.ChainEpoch(epoch) + } + + if toBlock == nil || *toBlock == "latest" || len(*toBlock) == 0 { + // here latest means the latest at the time + maxHeight = -1 + } else if *toBlock == "earliest" { + maxHeight = 0 + } else { + if !strings.HasPrefix(*toBlock, "0x") { + return 0, 0, xerrors.Errorf("ToBlock is not a hex") + } + epoch, err := ethtypes.EthUint64FromHex(*toBlock) + if err != nil { + return 0, 0, xerrors.Errorf("invalid epoch") + } + maxHeight = abi.ChainEpoch(epoch) + } + + // Validate height ranges are within limits set by node operator + if minHeight == -1 && maxHeight > 0 { + // Here the client is looking for events between the head and some future height + if maxHeight-heaviest > maxRange { + return 0, 0, xerrors.Errorf("invalid epoch range: to block is too far in the future (maximum: %d)", maxRange) + } + } else if minHeight >= 0 && maxHeight == -1 { + // Here the client is looking for events between some time in the past and the current head + if heaviest-minHeight > maxRange { + return 0, 0, xerrors.Errorf("invalid epoch range: from block is too far in the past (maximum: %d)", maxRange) + } + } else if minHeight >= 0 && maxHeight >= 0 { + if minHeight > maxHeight { + return 0, 0, xerrors.Errorf("invalid epoch range: to block (%d) must be after from block (%d)", minHeight, maxHeight) + } else if maxHeight-minHeight > maxRange { + return 0, 0, xerrors.Errorf("invalid epoch range: range between to and from blocks is too large (maximum: %d)", maxRange) + } + } + return minHeight, maxHeight, nil +} + +func (e *EthEventHandler) installEthFilterSpec(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (filter.EventFilter, error) { var ( minHeight abi.ChainEpoch maxHeight abi.ChainEpoch @@ -1276,64 +1335,11 @@ func (e *EthEvent) installEthFilterSpec(ctx context.Context, filterSpec *ethtype tipsetCid = filterSpec.BlockHash.ToCid() } else { - if filterSpec.FromBlock == nil || *filterSpec.FromBlock == "latest" { - ts := e.Chain.GetHeaviestTipSet() - minHeight = ts.Height() - } else if *filterSpec.FromBlock == "earliest" { - minHeight = 0 - } else if *filterSpec.FromBlock == "pending" { - return nil, api.ErrNotSupported - } else { - if !strings.HasPrefix(*filterSpec.FromBlock, "0x") { - return nil, xerrors.Errorf("FromBlock is not a hex") - } - epoch, err := ethtypes.EthUint64FromHex(*filterSpec.FromBlock) - if err != nil { - return nil, xerrors.Errorf("invalid epoch") - } - minHeight = abi.ChainEpoch(epoch) - } - - if filterSpec.ToBlock == nil || *filterSpec.ToBlock == "latest" { - // here latest means the latest at the time - maxHeight = -1 - } else if *filterSpec.ToBlock == "earliest" { - maxHeight = 0 - } else if *filterSpec.ToBlock == "pending" { - return nil, api.ErrNotSupported - } else { - if !strings.HasPrefix(*filterSpec.ToBlock, "0x") { - return nil, xerrors.Errorf("ToBlock is not a hex") - } - epoch, err := ethtypes.EthUint64FromHex(*filterSpec.ToBlock) - if err != nil { - return nil, xerrors.Errorf("invalid epoch") - } - maxHeight = abi.ChainEpoch(epoch) - } - - // Validate height ranges are within limits set by node operator - if minHeight == -1 && maxHeight > 0 { - // Here the client is looking for events between the head and some future height - ts := e.Chain.GetHeaviestTipSet() - if maxHeight-ts.Height() > e.MaxFilterHeightRange { - return nil, xerrors.Errorf("invalid epoch range: to block is too far in the future (maximum: %d)", e.MaxFilterHeightRange) - } - } else if minHeight >= 0 && maxHeight == -1 { - // Here the client is looking for events between some time in the past and the current head - ts := e.Chain.GetHeaviestTipSet() - if ts.Height()-minHeight > e.MaxFilterHeightRange { - return nil, xerrors.Errorf("invalid epoch range: from block is too far in the past (maximum: %d)", e.MaxFilterHeightRange) - } - - } else if minHeight >= 0 && maxHeight >= 0 { - if minHeight > maxHeight { - return nil, xerrors.Errorf("invalid epoch range: to block (%d) must be after from block (%d)", minHeight, maxHeight) - } else if maxHeight-minHeight > e.MaxFilterHeightRange { - return nil, xerrors.Errorf("invalid epoch range: range between to and from blocks is too large (maximum: %d)", e.MaxFilterHeightRange) - } + var err error + minHeight, maxHeight, err = parseBlockRange(e.Chain.GetHeaviestTipSet().Height(), filterSpec.FromBlock, filterSpec.ToBlock, e.MaxFilterHeightRange) + if err != nil { + return nil, err } - } // Convert all addresses to filecoin f4 addresses @@ -1350,10 +1356,23 @@ func (e *EthEvent) installEthFilterSpec(ctx context.Context, filterSpec *ethtype return nil, err } - return e.EventFilterManager.Install(ctx, minHeight, maxHeight, tipsetCid, addresses, keys) + return e.EventFilterManager.Install(ctx, minHeight, maxHeight, tipsetCid, addresses, keysToKeysWithCodec(keys), true) +} + +func keysToKeysWithCodec(keys map[string][][]byte) map[string][]types.ActorEventBlock { + keysWithCodec := make(map[string][]types.ActorEventBlock) + for k, v := range keys { + for _, vv := range v { + keysWithCodec[k] = append(keysWithCodec[k], types.ActorEventBlock{ + Codec: uint64(multicodec.Raw), // FEVM smart contract events are always encoded with the `raw` Codec. + Value: vv, + }) + } + } + return keysWithCodec } -func (e *EthEvent) EthNewFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) { +func (e *EthEventHandler) EthNewFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) { if e.FilterStore == nil || e.EventFilterManager == nil { return ethtypes.EthFilterID{}, api.ErrNotSupported } @@ -1375,7 +1394,7 @@ func (e *EthEvent) EthNewFilter(ctx context.Context, filterSpec *ethtypes.EthFil return ethtypes.EthFilterID(f.ID()), nil } -func (e *EthEvent) EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) { +func (e *EthEventHandler) EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) { if e.FilterStore == nil || e.TipSetFilterManager == nil { return ethtypes.EthFilterID{}, api.ErrNotSupported } @@ -1398,7 +1417,7 @@ func (e *EthEvent) EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, return ethtypes.EthFilterID(f.ID()), nil } -func (e *EthEvent) EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) { +func (e *EthEventHandler) EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) { if e.FilterStore == nil || e.MemPoolFilterManager == nil { return ethtypes.EthFilterID{}, api.ErrNotSupported } @@ -1421,7 +1440,7 @@ func (e *EthEvent) EthNewPendingTransactionFilter(ctx context.Context) (ethtypes return ethtypes.EthFilterID(f.ID()), nil } -func (e *EthEvent) EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) { +func (e *EthEventHandler) EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) { if e.FilterStore == nil { return false, api.ErrNotSupported } @@ -1441,9 +1460,9 @@ func (e *EthEvent) EthUninstallFilter(ctx context.Context, id ethtypes.EthFilter return true, nil } -func (e *EthEvent) uninstallFilter(ctx context.Context, f filter.Filter) error { +func (e *EthEventHandler) uninstallFilter(ctx context.Context, f filter.Filter) error { switch f.(type) { - case *filter.EventFilter: + case filter.EventFilter: err := e.EventFilterManager.Remove(ctx, f.ID()) if err != nil && !errors.Is(err, filter.ErrFilterNotFound) { return err @@ -1471,7 +1490,7 @@ const ( EthSubscribeEventTypePendingTransactions = "newPendingTransactions" ) -func (e *EthEvent) EthSubscribe(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) { +func (e *EthEventHandler) EthSubscribe(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) { params, err := jsonrpc.DecodeParams[ethtypes.EthSubscribeParams](p) if err != nil { return ethtypes.EthSubscriptionID{}, xerrors.Errorf("decoding params: %w", err) @@ -1524,7 +1543,7 @@ func (e *EthEvent) EthSubscribe(ctx context.Context, p jsonrpc.RawParams) (ethty } } - f, err := e.EventFilterManager.Install(ctx, -1, -1, cid.Undef, addresses, keys) + f, err := e.EventFilterManager.Install(ctx, -1, -1, cid.Undef, addresses, keysToKeysWithCodec(keys), true) if err != nil { // clean up any previous filters added and stop the sub _, _ = e.EthUnsubscribe(ctx, sub.id) @@ -1547,7 +1566,7 @@ func (e *EthEvent) EthSubscribe(ctx context.Context, p jsonrpc.RawParams) (ethty return sub.id, nil } -func (e *EthEvent) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) { +func (e *EthEventHandler) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) { if e.SubManager == nil { return false, api.ErrNotSupported } @@ -1561,7 +1580,7 @@ func (e *EthEvent) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscripti } // GC runs a garbage collection loop, deleting filters that have not been used within the ttl window -func (e *EthEvent) GC(ctx context.Context, ttl time.Duration) { +func (e *EthEventHandler) GC(ctx context.Context, ttl time.Duration) { if e.FilterStore == nil { return } diff --git a/node/impl/full/eth_event.go b/node/impl/full/eth_events.go similarity index 100% rename from node/impl/full/eth_event.go rename to node/impl/full/eth_events.go diff --git a/node/impl/full/eth_test.go b/node/impl/full/eth_test.go index 05c3f257504..6f9d8f297ee 100644 --- a/node/impl/full/eth_test.go +++ b/node/impl/full/eth_test.go @@ -3,6 +3,7 @@ package full import ( "bytes" "encoding/hex" + "fmt" "testing" "github.com/ipfs/go-cid" @@ -10,12 +11,87 @@ import ( "github.com/stretchr/testify/require" cbg "github.com/whyrusleeping/cbor-gen" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" ) +func TestParseBlockRange(t *testing.T) { + pstring := func(s string) *string { return &s } + + tcs := map[string]struct { + heaviest abi.ChainEpoch + from *string + to *string + maxRange abi.ChainEpoch + minOut abi.ChainEpoch + maxOut abi.ChainEpoch + errStr string + }{ + "fails when both are specified and range is greater than max allowed range": { + heaviest: 100, + from: pstring("0x100"), + to: pstring("0x200"), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "too large", + }, + "fails when min is specified and range is greater than max allowed range": { + heaviest: 500, + from: pstring("0x10"), + to: pstring("latest"), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "too far in the past", + }, + "fails when max is specified and range is greater than max allowed range": { + heaviest: 500, + from: pstring("earliest"), + to: pstring("0x10000"), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "too large", + }, + "works when range is valid": { + heaviest: 500, + from: pstring("earliest"), + to: pstring("latest"), + maxRange: 1000, + minOut: 0, + maxOut: -1, + }, + "works when range is valid and specified": { + heaviest: 500, + from: pstring("0x10"), + to: pstring("0x30"), + maxRange: 1000, + minOut: 16, + maxOut: 48, + }, + } + + for name, tc := range tcs { + tc2 := tc + t.Run(name, func(t *testing.T) { + min, max, err := parseBlockRange(tc2.heaviest, tc2.from, tc2.to, tc2.maxRange) + require.Equal(t, tc2.minOut, min) + require.Equal(t, tc2.maxOut, max) + if tc2.errStr != "" { + fmt.Println(err) + require.Error(t, err) + require.Contains(t, err.Error(), tc2.errStr) + } else { + require.NoError(t, err) + } + }) + } +} + func TestEthLogFromEvent(t *testing.T) { // basic empty data, topics, ok := ethLogFromEvent(nil) diff --git a/node/impl/full/state.go b/node/impl/full/state.go index 0e92c8e5b6f..dda8898325f 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -762,7 +762,7 @@ func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (m } out[strconv.FormatInt(int64(dealID), 10)] = &api.MarketDeal{ Proposal: d, - State: *s, + State: api.MakeDealState(s), } return nil }); err != nil { @@ -779,18 +779,27 @@ func (m *StateModule) StateMarketStorageDeal(ctx context.Context, dealId abi.Dea return stmgr.GetStorageDeal(ctx, m.StateManager, dealId, ts) } -func (a *StateAPI) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifreg.Allocation, error) { +func (a *StateAPI) StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) { ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { - return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + return verifreg.NoAllocationID, xerrors.Errorf("loading tipset %s: %w", tsk, err) } st, err := a.StateManager.GetMarketState(ctx, ts) if err != nil { - return nil, err + return verifreg.NoAllocationID, err } allocationId, err := st.GetAllocationIdForPendingDeal(dealId) + if err != nil { + return verifreg.NoAllocationID, err + } + + return allocationId, nil +} + +func (a *StateAPI) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifreg.Allocation, error) { + allocationId, err := a.StateGetAllocationIdForPendingDeal(ctx, dealId, tsk) if err != nil { return nil, err } @@ -857,6 +866,25 @@ func (a *StateAPI) StateGetAllocations(ctx context.Context, clientAddr address.A return allocations, nil } +func (a *StateAPI) StateGetAllAllocations(ctx context.Context, tsk types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) + if err != nil { + return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + } + + st, err := a.StateManager.GetVerifregState(ctx, ts) + if err != nil { + return nil, xerrors.Errorf("loading verifreg state: %w", err) + } + + allocations, err := st.GetAllAllocations() + if err != nil { + return nil, xerrors.Errorf("getting all allocations: %w", err) + } + + return allocations, nil +} + func (a *StateAPI) StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifreg.ClaimId, tsk types.TipSetKey) (*verifreg.Claim, error) { idAddr, err := a.StateLookupID(ctx, providerAddr, tsk) if err != nil { @@ -908,6 +936,25 @@ func (a *StateAPI) StateGetClaims(ctx context.Context, providerAddr address.Addr return claims, nil } +func (a *StateAPI) StateGetAllClaims(ctx context.Context, tsk types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) { + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) + if err != nil { + return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + } + + st, err := a.StateManager.GetVerifregState(ctx, ts) + if err != nil { + return nil, xerrors.Errorf("loading verifreg state: %w", err) + } + + claims, err := st.GetAllClaims() + if err != nil { + return nil, xerrors.Errorf("getting all claims: %w", err) + } + + return claims, nil +} + func (a *StateAPI) StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) { nv, err := a.StateNetworkVersion(ctx, tsk) if err != nil { @@ -1914,6 +1961,8 @@ func (a *StateAPI) StateGetNetworkParams(ctx context.Context) (*api.NetworkParam UpgradeLightningHeight: build.UpgradeLightningHeight, UpgradeThunderHeight: build.UpgradeThunderHeight, UpgradeWatermelonHeight: build.UpgradeWatermelonHeight, + UpgradeDragonHeight: build.UpgradeDragonHeight, + UpgradePhoenixHeight: build.UpgradePhoenixHeight, }, }, nil } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 2ce42c32715..90248a355a4 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -54,6 +54,7 @@ import ( "github.com/filecoin-project/lotus/storage/ctladdr" "github.com/filecoin-project/lotus/storage/paths" sealing "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/fsutil" @@ -243,7 +244,7 @@ func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumb return sInfo, nil } -func (sm *StorageMinerAPI) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) { +func (sm *StorageMinerAPI) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d piece.PieceDealInfo) (api.SectorOffset, error) { so, err := sm.Miner.SectorAddPieceToAny(ctx, size, r, d) if err != nil { // jsonrpc doesn't support returning values with errors, make sure we never do that @@ -506,7 +507,7 @@ func (sm *StorageMinerAPI) ComputeWindowPoSt(ctx context.Context, dlIdx uint64, } func (sm *StorageMinerAPI) ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) { - return sm.StorageMgr.DataCid(ctx, pieceSize, pieceData) + return sm.IStorageMgr.DataCid(ctx, pieceSize, pieceData) } func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error { diff --git a/node/modules/actorevent.go b/node/modules/actorevent.go index 4ce04cefd07..135a34e5be7 100644 --- a/node/modules/actorevent.go +++ b/node/modules/actorevent.go @@ -5,13 +5,12 @@ import ( "path/filepath" "time" - "github.com/multiformats/go-varint" "go.uber.org/fx" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/messagepool" @@ -24,20 +23,20 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -type EventAPI struct { +type EventHelperAPI struct { fx.In full.ChainAPI full.StateAPI } -var _ events.EventAPI = &EventAPI{} +var _ events.EventHelperAPI = &EventHelperAPI{} -func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.EthEvent, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI) (*full.EthEvent, error) { +func EthEventHandler(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *filter.EventFilterManager, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.EthEventHandler, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, fm *filter.EventFilterManager, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI) (*full.EthEventHandler, error) { ctx := helpers.LifecycleCtx(mctx, lc) - ee := &full.EthEvent{ + ee := &full.EthEventHandler{ Chain: cs, MaxFilterHeightRange: abi.ChainEpoch(cfg.Events.MaxFilterHeightRange), SubscribtionCtx: ctx, @@ -64,6 +63,41 @@ func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo }, }) + ee.TipSetFilterManager = &filter.TipSetFilterManager{ + MaxFilterResults: cfg.Events.MaxFilterResults, + } + ee.MemPoolFilterManager = &filter.MemPoolFilterManager{ + MaxFilterResults: cfg.Events.MaxFilterResults, + } + ee.EventFilterManager = fm + + lc.Append(fx.Hook{ + OnStart: func(context.Context) error { + ev, err := events.NewEvents(ctx, &evapi) + if err != nil { + return err + } + // ignore returned tipsets + _ = ev.Observe(ee.TipSetFilterManager) + + ch, err := mp.Updates(ctx) + if err != nil { + return err + } + go ee.MemPoolFilterManager.WaitForMpoolUpdates(ctx, ch) + + return nil + }, + }) + + return ee, nil + } +} + +func EventFilterManager(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, full.ChainAPI) (*filter.EventFilterManager, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, chainapi full.ChainAPI) (*filter.EventFilterManager, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + // Enable indexing of actor events var eventIndex *filter.EventIndex if !cfg.Events.DisableHistoricFilterAPI { @@ -91,11 +125,12 @@ func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo }) } - ee.EventFilterManager = &filter.EventFilterManager{ + fm := &filter.EventFilterManager{ ChainStore: cs, EventIndex: eventIndex, // will be nil unless EnableHistoricFilterAPI is true + // TODO: + // We don't need this address resolution anymore once https://github.com/filecoin-project/lotus/issues/11594 lands AddressResolver: func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { - // we only want to match using f4 addresses idAddr, err := address.NewIDAddress(uint64(emitter)) if err != nil { return address.Undef, false @@ -103,28 +138,14 @@ func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo actor, err := sm.LoadActor(ctx, idAddr, ts) if err != nil || actor.Address == nil { - return address.Undef, false + return idAddr, true } - // if robust address is not f4 then we won't match against it so bail early - if actor.Address.Protocol() != address.Delegated { - return address.Undef, false - } - // we have an f4 address, make sure it's assigned by the EAM - if namespace, _, err := varint.FromUvarint(actor.Address.Payload()); err != nil || namespace != builtintypes.EthereumAddressManagerActorID { - return address.Undef, false - } return *actor.Address, true }, MaxFilterResults: cfg.Events.MaxFilterResults, } - ee.TipSetFilterManager = &filter.TipSetFilterManager{ - MaxFilterResults: cfg.Events.MaxFilterResults, - } - ee.MemPoolFilterManager = &filter.MemPoolFilterManager{ - MaxFilterResults: cfg.Events.MaxFilterResults, - } lc.Append(fx.Hook{ OnStart: func(context.Context) error { @@ -132,20 +153,27 @@ func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo if err != nil { return err } - // ignore returned tipsets - _ = ev.Observe(ee.EventFilterManager) - _ = ev.Observe(ee.TipSetFilterManager) - - ch, err := mp.Updates(ctx) - if err != nil { - return err - } - go ee.MemPoolFilterManager.WaitForMpoolUpdates(ctx, ch) - + _ = ev.Observe(fm) return nil }, }) - return ee, nil + return fm, nil + } +} + +func ActorEventHandler(enable bool, fevmCfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *filter.EventFilterManager, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.ActorEventHandler, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, fm *filter.EventFilterManager, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI) (*full.ActorEventHandler, error) { + + if !enable || fevmCfg.Events.DisableRealTimeFilterAPI { + fm = nil + } + + return full.NewActorEventHandler( + cs, + fm, + time.Duration(build.BlockDelaySecs)*time.Second, + abi.ChainEpoch(fevmCfg.Events.MaxFilterHeightRange), + ), nil } } diff --git a/node/modules/dtypes/beacon.go b/node/modules/dtypes/beacon.go index 28bbdf281d3..91dd5cf57f9 100644 --- a/node/modules/dtypes/beacon.go +++ b/node/modules/dtypes/beacon.go @@ -13,4 +13,5 @@ type DrandConfig struct { Servers []string Relays []string ChainInfoJSON string + IsChained bool // Prior to Drand quicknet, beacons form a chain, post quicknet they do not (FIP-0063) } diff --git a/node/modules/ethmodule.go b/node/modules/ethmodule.go index 0255b61983f..b36416e4e56 100644 --- a/node/modules/ethmodule.go +++ b/node/modules/ethmodule.go @@ -21,8 +21,8 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI) (*full.EthModule, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI) (*full.EthModule, error) { +func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI) (*full.EthModule, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI) (*full.EthModule, error) { sqlitePath, err := r.SqlitePath() if err != nil { return nil, err diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 11635a24015..1b9988b9563 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -1023,6 +1023,11 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error TerminateBatchWait: config.Duration(cfg.TerminateBatchWait), MaxSectorProveCommitsSubmittedPerEpoch: cfg.MaxSectorProveCommitsSubmittedPerEpoch, UseSyntheticPoRep: cfg.UseSyntheticPoRep, + + RequireActivationSuccess: cfg.RequireActivationSuccess, + RequireActivationSuccessUpdate: cfg.RequireActivationSuccessUpdate, + RequireNotificationSuccess: cfg.RequireNotificationSuccess, + RequireNotificationSuccessUpdate: cfg.RequireNotificationSuccessUpdate, } c.SetSealingConfig(newCfg) }) @@ -1068,6 +1073,11 @@ func ToSealingConfig(dealmakingCfg config.DealmakingConfig, sealingCfg config.Se TerminateBatchMin: sealingCfg.TerminateBatchMin, TerminateBatchWait: time.Duration(sealingCfg.TerminateBatchWait), UseSyntheticPoRep: sealingCfg.UseSyntheticPoRep, + + RequireActivationSuccess: sealingCfg.RequireActivationSuccess, + RequireActivationSuccessUpdate: sealingCfg.RequireActivationSuccessUpdate, + RequireNotificationSuccess: sealingCfg.RequireNotificationSuccess, + RequireNotificationSuccessUpdate: sealingCfg.RequireNotificationSuccessUpdate, } } diff --git a/paychmgr/cbor_gen.go b/paychmgr/cbor_gen.go index f97c176a304..3f8aaa7b5a5 100644 --- a/paychmgr/cbor_gen.go +++ b/paychmgr/cbor_gen.go @@ -34,7 +34,7 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { } // t.Proof ([]uint8) (slice) - if len("Proof") > cbg.MaxLength { + if len("Proof") > 8192 { return xerrors.Errorf("Value in field \"Proof\" was too long") } @@ -45,7 +45,7 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Proof) > cbg.ByteArrayMaxLen { + if len(t.Proof) > 2097152 { return xerrors.Errorf("Byte array in field t.Proof was too long") } @@ -53,12 +53,12 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Proof[:]); err != nil { + if _, err := cw.Write(t.Proof); err != nil { return err } // t.Voucher (paych.SignedVoucher) (struct) - if len("Voucher") > cbg.MaxLength { + if len("Voucher") > 8192 { return xerrors.Errorf("Value in field \"Voucher\" was too long") } @@ -74,7 +74,7 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { } // t.Submitted (bool) (bool) - if len("Submitted") > cbg.MaxLength { + if len("Submitted") > 8192 { return xerrors.Errorf("Value in field \"Submitted\" was too long") } @@ -120,7 +120,7 @@ func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -137,7 +137,7 @@ func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Proof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -148,9 +148,10 @@ func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) (err error) { t.Proof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Proof[:]); err != nil { + if _, err := io.ReadFull(cr, t.Proof); err != nil { return err } + // t.Voucher (paych.SignedVoucher) (struct) case "Voucher": @@ -211,7 +212,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Amount (big.Int) (struct) - if len("Amount") > cbg.MaxLength { + if len("Amount") > 8192 { return xerrors.Errorf("Value in field \"Amount\" was too long") } @@ -227,7 +228,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Target (address.Address) (struct) - if len("Target") > cbg.MaxLength { + if len("Target") > 8192 { return xerrors.Errorf("Value in field \"Target\" was too long") } @@ -243,7 +244,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Channel (address.Address) (struct) - if len("Channel") > cbg.MaxLength { + if len("Channel") > 8192 { return xerrors.Errorf("Value in field \"Channel\" was too long") } @@ -259,7 +260,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Control (address.Address) (struct) - if len("Control") > cbg.MaxLength { + if len("Control") > 8192 { return xerrors.Errorf("Value in field \"Control\" was too long") } @@ -275,7 +276,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.NextLane (uint64) (uint64) - if len("NextLane") > cbg.MaxLength { + if len("NextLane") > 8192 { return xerrors.Errorf("Value in field \"NextLane\" was too long") } @@ -291,7 +292,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Settling (bool) (bool) - if len("Settling") > cbg.MaxLength { + if len("Settling") > 8192 { return xerrors.Errorf("Value in field \"Settling\" was too long") } @@ -307,7 +308,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Vouchers ([]*paychmgr.VoucherInfo) (slice) - if len("Vouchers") > cbg.MaxLength { + if len("Vouchers") > 8192 { return xerrors.Errorf("Value in field \"Vouchers\" was too long") } @@ -318,7 +319,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Vouchers) > cbg.MaxLength { + if len(t.Vouchers) > 8192 { return xerrors.Errorf("Slice value in field t.Vouchers was too long") } @@ -329,10 +330,11 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.ChannelID (string) (string) - if len("ChannelID") > cbg.MaxLength { + if len("ChannelID") > 8192 { return xerrors.Errorf("Value in field \"ChannelID\" was too long") } @@ -343,7 +345,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.ChannelID) > cbg.MaxLength { + if len(t.ChannelID) > 8192 { return xerrors.Errorf("Value in field t.ChannelID was too long") } @@ -355,7 +357,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.CreateMsg (cid.Cid) (struct) - if len("CreateMsg") > cbg.MaxLength { + if len("CreateMsg") > 8192 { return xerrors.Errorf("Value in field \"CreateMsg\" was too long") } @@ -377,7 +379,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Direction (uint64) (uint64) - if len("Direction") > cbg.MaxLength { + if len("Direction") > 8192 { return xerrors.Errorf("Value in field \"Direction\" was too long") } @@ -393,7 +395,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.AddFundsMsg (cid.Cid) (struct) - if len("AddFundsMsg") > cbg.MaxLength { + if len("AddFundsMsg") > 8192 { return xerrors.Errorf("Value in field \"AddFundsMsg\" was too long") } @@ -415,7 +417,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.PendingAmount (big.Int) (struct) - if len("PendingAmount") > cbg.MaxLength { + if len("PendingAmount") > 8192 { return xerrors.Errorf("Value in field \"PendingAmount\" was too long") } @@ -431,7 +433,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.AvailableAmount (big.Int) (struct) - if len("AvailableAmount") > cbg.MaxLength { + if len("AvailableAmount") > 8192 { return xerrors.Errorf("Value in field \"AvailableAmount\" was too long") } @@ -447,7 +449,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.PendingAvailableAmount (big.Int) (struct) - if len("PendingAvailableAmount") > cbg.MaxLength { + if len("PendingAvailableAmount") > 8192 { return xerrors.Errorf("Value in field \"PendingAvailableAmount\" was too long") } @@ -493,7 +495,7 @@ func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -593,7 +595,7 @@ func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Vouchers: array too large (%d)", extra) } @@ -631,14 +633,14 @@ func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.ChannelID (string) (string) case "ChannelID": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -758,7 +760,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { } // t.Err (string) (string) - if len("Err") > cbg.MaxLength { + if len("Err") > 8192 { return xerrors.Errorf("Value in field \"Err\" was too long") } @@ -769,7 +771,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Err) > cbg.MaxLength { + if len(t.Err) > 8192 { return xerrors.Errorf("Value in field t.Err was too long") } @@ -781,7 +783,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { } // t.MsgCid (cid.Cid) (struct) - if len("MsgCid") > cbg.MaxLength { + if len("MsgCid") > 8192 { return xerrors.Errorf("Value in field \"MsgCid\" was too long") } @@ -797,7 +799,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { } // t.Received (bool) (bool) - if len("Received") > cbg.MaxLength { + if len("Received") > 8192 { return xerrors.Errorf("Value in field \"Received\" was too long") } @@ -813,7 +815,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { } // t.ChannelID (string) (string) - if len("ChannelID") > cbg.MaxLength { + if len("ChannelID") > 8192 { return xerrors.Errorf("Value in field \"ChannelID\" was too long") } @@ -824,7 +826,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.ChannelID) > cbg.MaxLength { + if len(t.ChannelID) > 8192 { return xerrors.Errorf("Value in field t.ChannelID was too long") } @@ -866,7 +868,7 @@ func (t *MsgInfo) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -879,7 +881,7 @@ func (t *MsgInfo) UnmarshalCBOR(r io.Reader) (err error) { case "Err": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -921,7 +923,7 @@ func (t *MsgInfo) UnmarshalCBOR(r io.Reader) (err error) { case "ChannelID": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } diff --git a/storage/pipeline/cbor_gen.go b/storage/pipeline/cbor_gen.go index c832f8a144a..f32c48b4d46 100644 --- a/storage/pipeline/cbor_gen.go +++ b/storage/pipeline/cbor_gen.go @@ -14,7 +14,6 @@ import ( abi "github.com/filecoin-project/go-state-types/abi" - api "github.com/filecoin-project/lotus/api" storiface "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -36,7 +35,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.Log ([]sealing.Log) (slice) - if len("Log") > cbg.MaxLength { + if len("Log") > 8192 { return xerrors.Errorf("Value in field \"Log\" was too long") } @@ -47,7 +46,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Log) > cbg.MaxLength { + if len(t.Log) > 8192 { return xerrors.Errorf("Slice value in field t.Log was too long") } @@ -58,10 +57,11 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.CommD (cid.Cid) (struct) - if len("CommD") > cbg.MaxLength { + if len("CommD") > 8192 { return xerrors.Errorf("Value in field \"CommD\" was too long") } @@ -83,7 +83,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.CommR (cid.Cid) (struct) - if len("CommR") > cbg.MaxLength { + if len("CommR") > 8192 { return xerrors.Errorf("Value in field \"CommR\" was too long") } @@ -105,7 +105,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.Proof ([]uint8) (slice) - if len("Proof") > cbg.MaxLength { + if len("Proof") > 8192 { return xerrors.Errorf("Value in field \"Proof\" was too long") } @@ -116,7 +116,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Proof) > cbg.ByteArrayMaxLen { + if len(t.Proof) > 2097152 { return xerrors.Errorf("Byte array in field t.Proof was too long") } @@ -124,12 +124,12 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Proof[:]); err != nil { + if _, err := cw.Write(t.Proof); err != nil { return err } // t.State (sealing.SectorState) (string) - if len("State") > cbg.MaxLength { + if len("State") > 8192 { return xerrors.Errorf("Value in field \"State\" was too long") } @@ -140,7 +140,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.State) > cbg.MaxLength { + if len(t.State) > 8192 { return xerrors.Errorf("Value in field t.State was too long") } @@ -151,8 +151,8 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - // t.Pieces ([]api.SectorPiece) (slice) - if len("Pieces") > cbg.MaxLength { + // t.Pieces ([]sealing.SafeSectorPiece) (slice) + if len("Pieces") > 8192 { return xerrors.Errorf("Value in field \"Pieces\" was too long") } @@ -163,7 +163,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Pieces) > cbg.MaxLength { + if len(t.Pieces) > 8192 { return xerrors.Errorf("Slice value in field t.Pieces was too long") } @@ -174,10 +174,11 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Return (sealing.ReturnState) (string) - if len("Return") > cbg.MaxLength { + if len("Return") > 8192 { return xerrors.Errorf("Value in field \"Return\" was too long") } @@ -188,7 +189,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Return) > cbg.MaxLength { + if len(t.Return) > 8192 { return xerrors.Errorf("Value in field t.Return was too long") } @@ -200,7 +201,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.LastErr (string) (string) - if len("LastErr") > cbg.MaxLength { + if len("LastErr") > 8192 { return xerrors.Errorf("Value in field \"LastErr\" was too long") } @@ -211,7 +212,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.LastErr) > cbg.MaxLength { + if len(t.LastErr) > 8192 { return xerrors.Errorf("Value in field t.LastErr was too long") } @@ -222,8 +223,8 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - // t.CCPieces ([]api.SectorPiece) (slice) - if len("CCPieces") > cbg.MaxLength { + // t.CCPieces ([]sealing.SafeSectorPiece) (slice) + if len("CCPieces") > 8192 { return xerrors.Errorf("Value in field \"CCPieces\" was too long") } @@ -234,7 +235,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.CCPieces) > cbg.MaxLength { + if len(t.CCPieces) > 8192 { return xerrors.Errorf("Slice value in field t.CCPieces was too long") } @@ -245,10 +246,11 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.CCUpdate (bool) (bool) - if len("CCUpdate") > cbg.MaxLength { + if len("CCUpdate") > 8192 { return xerrors.Errorf("Value in field \"CCUpdate\" was too long") } @@ -264,7 +266,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.SeedEpoch (abi.ChainEpoch) (int64) - if len("SeedEpoch") > cbg.MaxLength { + if len("SeedEpoch") > 8192 { return xerrors.Errorf("Value in field \"SeedEpoch\" was too long") } @@ -286,7 +288,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.SeedValue (abi.InteractiveSealRandomness) (slice) - if len("SeedValue") > cbg.MaxLength { + if len("SeedValue") > 8192 { return xerrors.Errorf("Value in field \"SeedValue\" was too long") } @@ -297,7 +299,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.SeedValue) > cbg.ByteArrayMaxLen { + if len(t.SeedValue) > 2097152 { return xerrors.Errorf("Byte array in field t.SeedValue was too long") } @@ -305,12 +307,12 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.SeedValue[:]); err != nil { + if _, err := cw.Write(t.SeedValue); err != nil { return err } // t.SectorType (abi.RegisteredSealProof) (int64) - if len("SectorType") > cbg.MaxLength { + if len("SectorType") > 8192 { return xerrors.Errorf("Value in field \"SectorType\" was too long") } @@ -332,7 +334,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.TicketEpoch (abi.ChainEpoch) (int64) - if len("TicketEpoch") > cbg.MaxLength { + if len("TicketEpoch") > 8192 { return xerrors.Errorf("Value in field \"TicketEpoch\" was too long") } @@ -354,7 +356,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.TicketValue (abi.SealRandomness) (slice) - if len("TicketValue") > cbg.MaxLength { + if len("TicketValue") > 8192 { return xerrors.Errorf("Value in field \"TicketValue\" was too long") } @@ -365,7 +367,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.TicketValue) > cbg.ByteArrayMaxLen { + if len(t.TicketValue) > 2097152 { return xerrors.Errorf("Byte array in field t.TicketValue was too long") } @@ -373,12 +375,12 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.TicketValue[:]); err != nil { + if _, err := cw.Write(t.TicketValue); err != nil { return err } // t.CreationTime (int64) (int64) - if len("CreationTime") > cbg.MaxLength { + if len("CreationTime") > 8192 { return xerrors.Errorf("Value in field \"CreationTime\" was too long") } @@ -400,7 +402,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.SectorNumber (abi.SectorNumber) (uint64) - if len("SectorNumber") > cbg.MaxLength { + if len("SectorNumber") > 8192 { return xerrors.Errorf("Value in field \"SectorNumber\" was too long") } @@ -416,7 +418,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.TerminatedAt (abi.ChainEpoch) (int64) - if len("TerminatedAt") > cbg.MaxLength { + if len("TerminatedAt") > 8192 { return xerrors.Errorf("Value in field \"TerminatedAt\" was too long") } @@ -438,7 +440,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.UpdateSealed (cid.Cid) (struct) - if len("UpdateSealed") > cbg.MaxLength { + if len("UpdateSealed") > 8192 { return xerrors.Errorf("Value in field \"UpdateSealed\" was too long") } @@ -460,7 +462,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.CommitMessage (cid.Cid) (struct) - if len("CommitMessage") > cbg.MaxLength { + if len("CommitMessage") > 8192 { return xerrors.Errorf("Value in field \"CommitMessage\" was too long") } @@ -482,7 +484,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.InvalidProofs (uint64) (uint64) - if len("InvalidProofs") > cbg.MaxLength { + if len("InvalidProofs") > 8192 { return xerrors.Errorf("Value in field \"InvalidProofs\" was too long") } @@ -498,7 +500,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommit1Out (storiface.PreCommit1Out) (slice) - if len("PreCommit1Out") > cbg.MaxLength { + if len("PreCommit1Out") > 8192 { return xerrors.Errorf("Value in field \"PreCommit1Out\" was too long") } @@ -509,7 +511,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.PreCommit1Out) > cbg.ByteArrayMaxLen { + if len(t.PreCommit1Out) > 2097152 { return xerrors.Errorf("Byte array in field t.PreCommit1Out was too long") } @@ -517,12 +519,12 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.PreCommit1Out[:]); err != nil { + if _, err := cw.Write(t.PreCommit1Out); err != nil { return err } // t.FaultReportMsg (cid.Cid) (struct) - if len("FaultReportMsg") > cbg.MaxLength { + if len("FaultReportMsg") > 8192 { return xerrors.Errorf("Value in field \"FaultReportMsg\" was too long") } @@ -544,7 +546,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.UpdateUnsealed (cid.Cid) (struct) - if len("UpdateUnsealed") > cbg.MaxLength { + if len("UpdateUnsealed") > 8192 { return xerrors.Errorf("Value in field \"UpdateUnsealed\" was too long") } @@ -566,7 +568,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommit1Fails (uint64) (uint64) - if len("PreCommit1Fails") > cbg.MaxLength { + if len("PreCommit1Fails") > 8192 { return xerrors.Errorf("Value in field \"PreCommit1Fails\" was too long") } @@ -582,7 +584,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommit2Fails (uint64) (uint64) - if len("PreCommit2Fails") > cbg.MaxLength { + if len("PreCommit2Fails") > 8192 { return xerrors.Errorf("Value in field \"PreCommit2Fails\" was too long") } @@ -598,7 +600,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommitTipSet (types.TipSetKey) (struct) - if len("PreCommitTipSet") > cbg.MaxLength { + if len("PreCommitTipSet") > 8192 { return xerrors.Errorf("Value in field \"PreCommitTipSet\" was too long") } @@ -614,7 +616,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteDataCache (storiface.SectorLocation) (struct) - if len("RemoteDataCache") > cbg.MaxLength { + if len("RemoteDataCache") > 8192 { return xerrors.Errorf("Value in field \"RemoteDataCache\" was too long") } @@ -630,7 +632,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommitDeposit (big.Int) (struct) - if len("PreCommitDeposit") > cbg.MaxLength { + if len("PreCommitDeposit") > 8192 { return xerrors.Errorf("Value in field \"PreCommitDeposit\" was too long") } @@ -646,7 +648,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommitMessage (cid.Cid) (struct) - if len("PreCommitMessage") > cbg.MaxLength { + if len("PreCommitMessage") > 8192 { return xerrors.Errorf("Value in field \"PreCommitMessage\" was too long") } @@ -668,7 +670,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteDataSealed (storiface.SectorLocation) (struct) - if len("RemoteDataSealed") > cbg.MaxLength { + if len("RemoteDataSealed") > 8192 { return xerrors.Errorf("Value in field \"RemoteDataSealed\" was too long") } @@ -684,7 +686,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.TerminateMessage (cid.Cid) (struct) - if len("TerminateMessage") > cbg.MaxLength { + if len("TerminateMessage") > 8192 { return xerrors.Errorf("Value in field \"TerminateMessage\" was too long") } @@ -706,7 +708,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteDataUnsealed (storiface.SectorLocation) (struct) - if len("RemoteDataUnsealed") > cbg.MaxLength { + if len("RemoteDataUnsealed") > 8192 { return xerrors.Errorf("Value in field \"RemoteDataUnsealed\" was too long") } @@ -722,7 +724,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.ReplicaUpdateProof (storiface.ReplicaUpdateProof) (slice) - if len("ReplicaUpdateProof") > cbg.MaxLength { + if len("ReplicaUpdateProof") > 8192 { return xerrors.Errorf("Value in field \"ReplicaUpdateProof\" was too long") } @@ -733,7 +735,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.ReplicaUpdateProof) > cbg.ByteArrayMaxLen { + if len(t.ReplicaUpdateProof) > 2097152 { return xerrors.Errorf("Byte array in field t.ReplicaUpdateProof was too long") } @@ -741,12 +743,12 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.ReplicaUpdateProof[:]); err != nil { + if _, err := cw.Write(t.ReplicaUpdateProof); err != nil { return err } // t.RemoteDataFinalized (bool) (bool) - if len("RemoteDataFinalized") > cbg.MaxLength { + if len("RemoteDataFinalized") > 8192 { return xerrors.Errorf("Value in field \"RemoteDataFinalized\" was too long") } @@ -762,7 +764,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.ReplicaUpdateMessage (cid.Cid) (struct) - if len("ReplicaUpdateMessage") > cbg.MaxLength { + if len("ReplicaUpdateMessage") > 8192 { return xerrors.Errorf("Value in field \"ReplicaUpdateMessage\" was too long") } @@ -784,7 +786,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteCommit1Endpoint (string) (string) - if len("RemoteCommit1Endpoint") > cbg.MaxLength { + if len("RemoteCommit1Endpoint") > 8192 { return xerrors.Errorf("Value in field \"RemoteCommit1Endpoint\" was too long") } @@ -795,7 +797,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.RemoteCommit1Endpoint) > cbg.MaxLength { + if len(t.RemoteCommit1Endpoint) > 8192 { return xerrors.Errorf("Value in field t.RemoteCommit1Endpoint was too long") } @@ -807,7 +809,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteCommit2Endpoint (string) (string) - if len("RemoteCommit2Endpoint") > cbg.MaxLength { + if len("RemoteCommit2Endpoint") > 8192 { return xerrors.Errorf("Value in field \"RemoteCommit2Endpoint\" was too long") } @@ -818,7 +820,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.RemoteCommit2Endpoint) > cbg.MaxLength { + if len(t.RemoteCommit2Endpoint) > 8192 { return xerrors.Errorf("Value in field t.RemoteCommit2Endpoint was too long") } @@ -830,7 +832,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteSealingDoneEndpoint (string) (string) - if len("RemoteSealingDoneEndpoint") > cbg.MaxLength { + if len("RemoteSealingDoneEndpoint") > 8192 { return xerrors.Errorf("Value in field \"RemoteSealingDoneEndpoint\" was too long") } @@ -841,7 +843,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.RemoteSealingDoneEndpoint) > cbg.MaxLength { + if len(t.RemoteSealingDoneEndpoint) > 8192 { return xerrors.Errorf("Value in field t.RemoteSealingDoneEndpoint was too long") } @@ -883,7 +885,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -900,7 +902,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Log: array too large (%d)", extra) } @@ -928,9 +930,9 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.CommD (cid.Cid) (struct) case "CommD": @@ -985,7 +987,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Proof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -996,21 +998,22 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.Proof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Proof[:]); err != nil { + if _, err := io.ReadFull(cr, t.Proof); err != nil { return err } + // t.State (sealing.SectorState) (string) case "State": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } t.State = SectorState(sval) } - // t.Pieces ([]api.SectorPiece) (slice) + // t.Pieces ([]sealing.SafeSectorPiece) (slice) case "Pieces": maj, extra, err = cr.ReadHeader() @@ -1018,7 +1021,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Pieces: array too large (%d)", extra) } @@ -1027,7 +1030,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } if extra > 0 { - t.Pieces = make([]api.SectorPiece, extra) + t.Pieces = make([]SafeSectorPiece, extra) } for i := 0; i < int(extra); i++ { @@ -1046,14 +1049,14 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Return (sealing.ReturnState) (string) case "Return": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1064,14 +1067,14 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "LastErr": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } t.LastErr = string(sval) } - // t.CCPieces ([]api.SectorPiece) (slice) + // t.CCPieces ([]sealing.SafeSectorPiece) (slice) case "CCPieces": maj, extra, err = cr.ReadHeader() @@ -1079,7 +1082,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.CCPieces: array too large (%d)", extra) } @@ -1088,7 +1091,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } if extra > 0 { - t.CCPieces = make([]api.SectorPiece, extra) + t.CCPieces = make([]SafeSectorPiece, extra) } for i := 0; i < int(extra); i++ { @@ -1107,9 +1110,9 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.CCUpdate (bool) (bool) case "CCUpdate": @@ -1132,10 +1135,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "SeedEpoch": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1162,7 +1165,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.SeedValue: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -1173,17 +1176,18 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.SeedValue = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.SeedValue[:]); err != nil { + if _, err := io.ReadFull(cr, t.SeedValue); err != nil { return err } + // t.SectorType (abi.RegisteredSealProof) (int64) case "SectorType": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1206,10 +1210,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "TicketEpoch": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1236,7 +1240,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.TicketValue: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -1247,17 +1251,18 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.TicketValue = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.TicketValue[:]); err != nil { + if _, err := io.ReadFull(cr, t.TicketValue); err != nil { return err } + // t.CreationTime (int64) (int64) case "CreationTime": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1295,10 +1300,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "TerminatedAt": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1386,7 +1391,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.PreCommit1Out: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -1397,9 +1402,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.PreCommit1Out = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.PreCommit1Out[:]); err != nil { + if _, err := io.ReadFull(cr, t.PreCommit1Out); err != nil { return err } + // t.FaultReportMsg (cid.Cid) (struct) case "FaultReportMsg": @@ -1610,7 +1616,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.ReplicaUpdateProof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -1621,9 +1627,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.ReplicaUpdateProof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.ReplicaUpdateProof[:]); err != nil { + if _, err := io.ReadFull(cr, t.ReplicaUpdateProof); err != nil { return err } + // t.RemoteDataFinalized (bool) (bool) case "RemoteDataFinalized": @@ -1669,7 +1676,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "RemoteCommit1Endpoint": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1680,7 +1687,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "RemoteCommit2Endpoint": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1691,7 +1698,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "RemoteSealingDoneEndpoint": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1720,7 +1727,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { } // t.Kind (string) (string) - if len("Kind") > cbg.MaxLength { + if len("Kind") > 8192 { return xerrors.Errorf("Value in field \"Kind\" was too long") } @@ -1731,7 +1738,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { return err } - if len(t.Kind) > cbg.MaxLength { + if len(t.Kind) > 8192 { return xerrors.Errorf("Value in field t.Kind was too long") } @@ -1743,7 +1750,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { } // t.Trace (string) (string) - if len("Trace") > cbg.MaxLength { + if len("Trace") > 8192 { return xerrors.Errorf("Value in field \"Trace\" was too long") } @@ -1754,7 +1761,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { return err } - if len(t.Trace) > cbg.MaxLength { + if len(t.Trace) > 8192 { return xerrors.Errorf("Value in field t.Trace was too long") } @@ -1766,7 +1773,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { } // t.Message (string) (string) - if len("Message") > cbg.MaxLength { + if len("Message") > 8192 { return xerrors.Errorf("Value in field \"Message\" was too long") } @@ -1777,7 +1784,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { return err } - if len(t.Message) > cbg.MaxLength { + if len(t.Message) > 8192 { return xerrors.Errorf("Value in field t.Message was too long") } @@ -1789,7 +1796,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { } // t.Timestamp (uint64) (uint64) - if len("Timestamp") > cbg.MaxLength { + if len("Timestamp") > 8192 { return xerrors.Errorf("Value in field \"Timestamp\" was too long") } @@ -1836,7 +1843,7 @@ func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1849,7 +1856,7 @@ func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { case "Kind": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1860,7 +1867,7 @@ func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { case "Trace": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1871,7 +1878,7 @@ func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { case "Message": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } diff --git a/storage/pipeline/checks.go b/storage/pipeline/checks.go index ecd160231c1..1f21b9c636c 100644 --- a/storage/pipeline/checks.go +++ b/storage/pipeline/checks.go @@ -4,6 +4,7 @@ import ( "bytes" "context" + "github.com/ipfs/go-cid" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -12,9 +13,9 @@ import ( "github.com/filecoin-project/go-state-types/crypto" prooftypes "github.com/filecoin-project/go-state-types/proof" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" ) // TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting @@ -41,7 +42,7 @@ type ErrCommitWaitFailed struct{ error } type ErrBadRU struct{ error } type ErrBadPR struct{ error } -func checkPieces(ctx context.Context, maddr address.Address, sn abi.SectorNumber, pieces []api.SectorPiece, api SealingAPI, mustHaveDeals bool) error { +func checkPieces(ctx context.Context, maddr address.Address, sn abi.SectorNumber, pieces []SafeSectorPiece, api SealingAPI, mustHaveDeals bool) error { ts, err := api.ChainHead(ctx) if err != nil { return &ErrApi{xerrors.Errorf("getting chain head: %w", err)} @@ -51,43 +52,84 @@ func checkPieces(ctx context.Context, maddr address.Address, sn abi.SectorNumber var offset abi.PaddedPieceSize for i, p := range pieces { + p, i := p, i + // check that the piece is correctly aligned - if offset%p.Piece.Size != 0 { - return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d is not aligned: size=%xh offset=%xh off-by=%xh", sn, i, p.Piece.Size, offset, offset%p.Piece.Size)} - } - offset += p.Piece.Size - - // if no deal is associated with the piece, ensure that we added it as - // filler (i.e. ensure that it has a zero PieceCID) - if p.DealInfo == nil { - exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded()) - if !p.Piece.PieceCID.Equals(exp) { - return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sn, i, p.Piece.PieceCID)} - } - continue + if offset%p.Piece().Size != 0 { + return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d is not aligned: size=%xh offset=%xh off-by=%xh", sn, i, p.Piece().Size, offset, offset%p.Piece().Size)} } - - dealCount++ - - deal, err := api.StateMarketStorageDeal(ctx, p.DealInfo.DealID, ts.Key()) + offset += p.Piece().Size + + err := p.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(pi UniversalPieceInfo) error { + // if no deal is associated with the piece, ensure that we added it as + // filler (i.e. ensure that it has a zero PieceCID) + + exp := zerocomm.ZeroPieceCommitment(p.Piece().Size.Unpadded()) + if !p.Piece().PieceCID.Equals(exp) { + return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sn, i, p.Piece().PieceCID)} + } + + return nil + }, + BuiltinMarketHandler: func(pi UniversalPieceInfo) error { + dealCount++ + + deal, err := api.StateMarketStorageDeal(ctx, p.Impl().DealID, ts.Key()) + if err != nil { + return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.Impl().DealID, i, err)} + } + + if deal.Proposal.Provider != maddr { + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(pieces), sn, p.Impl().DealID, deal.Proposal.Provider, maddr)} + } + + if deal.Proposal.PieceCID != p.Piece().PieceCID { + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(pieces), sn, p.Impl().DealID, p.Impl().DealProposal.PieceCID, deal.Proposal.PieceCID)} + } + + if p.Piece().Size != deal.Proposal.PieceSize { + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(pieces), sn, p.Impl().DealID, p.Piece().Size, deal.Proposal.PieceSize)} + } + + if ts.Height() >= deal.Proposal.StartEpoch { + return &ErrExpiredDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(pieces), sn, p.Impl().DealID, deal.Proposal.StartEpoch, ts.Height())} + } + + return nil + }, + DDOHandler: func(pi UniversalPieceInfo) error { + dealCount++ + + // try to get allocation to see if that still works + all, err := pi.GetAllocation(ctx, api, ts.Key()) + if err != nil { + return xerrors.Errorf("getting deal %d allocation: %w", p.Impl().DealID, err) + } + if all != nil { + mid, err := address.IDFromAddress(maddr) + if err != nil { + return xerrors.Errorf("getting miner id: %w", err) + } + + if all.Provider != abi.ActorID(mid) { + return xerrors.Errorf("allocation provider doesn't match miner") + } + + if ts.Height() >= all.Expiration { + return &ErrExpiredDeals{xerrors.Errorf("piece allocation %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(pieces), sn, p.Impl().DealID, all.Expiration, ts.Height())} + } + + if all.Size < p.Piece().Size { + return &ErrInvalidDeals{xerrors.Errorf("piece allocation %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(pieces), sn, p.Impl().DealID, p.Piece().Size, all.Size)} + } + } + + return nil + }, + }) if err != nil { - return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)} - } - - if deal.Proposal.Provider != maddr { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(pieces), sn, p.DealInfo.DealID, deal.Proposal.Provider, maddr)} - } - - if deal.Proposal.PieceCID != p.Piece.PieceCID { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(pieces), sn, p.DealInfo.DealID, p.Piece.PieceCID, deal.Proposal.PieceCID)} - } - - if p.Piece.Size != deal.Proposal.PieceSize { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(pieces), sn, p.DealInfo.DealID, p.Piece.Size, deal.Proposal.PieceSize)} - } - - if ts.Height() >= deal.Proposal.StartEpoch { - return &ErrExpiredDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(pieces), sn, p.DealInfo.DealID, deal.Proposal.StartEpoch, ts.Height())} + return err } } @@ -106,8 +148,8 @@ func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, t return err } - if si.hasDeals() { - commD, err := api.StateComputeDataCID(ctx, maddr, si.SectorType, si.dealIDs(), tsk) + if si.hasData() { + commD, err := computeUnsealedCIDFromPieces(si) if err != nil { return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)} } @@ -223,8 +265,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, } // check that sector info is good after running a replica update -func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tsk types.TipSetKey, api SealingAPI) error { - +func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error { if err := checkPieces(ctx, maddr, si.SectorNumber, si.Pieces, api, true); err != nil { return err } @@ -232,9 +273,9 @@ func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInf return xerrors.Errorf("replica update on sector not marked for update") } - commD, err := api.StateComputeDataCID(ctx, maddr, si.SectorType, si.dealIDs(), tsk) + commD, err := computeUnsealedCIDFromPieces(si) if err != nil { - return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)} + return xerrors.Errorf("computing unsealed CID from pieces: %w", err) } if si.UpdateUnsealed == nil { @@ -253,5 +294,9 @@ func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInf } return nil +} +func computeUnsealedCIDFromPieces(si SectorInfo) (cid.Cid, error) { + pcs := si.pieceInfos() + return ffiwrapper.GenerateUnsealedCID(si.SectorType, pcs) } diff --git a/storage/pipeline/commit_batch.go b/storage/pipeline/commit_batch.go index 754f317630b..096f27e4c95 100644 --- a/storage/pipeline/commit_batch.go +++ b/storage/pipeline/commit_batch.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -57,6 +58,9 @@ type AggregateInput struct { Spt abi.RegisteredSealProof Info proof.AggregateSealVerifyInfo Proof []byte + + ActivationManifest miner.SectorActivationManifest + DealIDPrecommit bool } type CommitBatcher struct { @@ -209,13 +213,18 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, return nil, nil } - var res []sealiface.CommitBatchRes + var res, resV1 []sealiface.CommitBatchRes ts, err := b.api.ChainHead(b.mctx) if err != nil { return nil, err } + nv, err := b.api.StateNetworkVersion(b.mctx, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("getting network version: %s", err) + } + blackedOut := func() bool { const nv16BlackoutWindow = abi.ChainEpoch(20) // a magik number if ts.Height() <= build.UpgradeSkyrHeight && build.UpgradeSkyrHeight-ts.Height() < nv16BlackoutWindow { @@ -232,25 +241,67 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, } } + if nv >= MinDDONetworkVersion { + // After nv21, we have a new ProveCommitSectors2 method, which supports + // batching without aggregation, but it doesn't support onboarding + // sectors which were precommitted with DealIDs in the precommit message. + // We prefer it for all other sectors, so first we use the new processBatchV2 + + var sectors []abi.SectorNumber + for sn := range b.todo { + sectors = append(sectors, sn) + } + res, err = b.processBatchV2(cfg, sectors, nv, !individual) + if err != nil { + err = xerrors.Errorf("processBatchV2: %w", err) + } + + // Mark sectors as done + for _, r := range res { + if err != nil { + r.Error = err.Error() + } + + for _, sn := range r.Sectors { + for _, ch := range b.waiting[sn] { + ch <- r // buffered + } + + delete(b.waiting, sn) + delete(b.todo, sn) + delete(b.cutoffs, sn) + } + } + } + + if err != nil { + log.Warnf("CommitBatcher maybeStartBatch processBatch-ddo %v", err) + } + + if err != nil && len(res) == 0 { + return nil, err + } + if individual { - res, err = b.processIndividually(cfg) + resV1, err = b.processIndividually(cfg) } else { var sectors []abi.SectorNumber for sn := range b.todo { sectors = append(sectors, sn) } - res, err = b.processBatch(cfg, sectors) + resV1, err = b.processBatchV1(cfg, sectors, nv) } if err != nil { log.Warnf("CommitBatcher maybeStartBatch individual:%v processBatch %v", individual, err) } - if err != nil && len(res) == 0 { + if err != nil && len(resV1) == 0 { return nil, err } - for _, r := range res { + // Mark the rest as processed + for _, r := range resV1 { if err != nil { r.Error = err.Error() } @@ -266,10 +317,170 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, } } + res = append(res, resV1...) + return res, nil } -func (b *CommitBatcher) processBatch(cfg sealiface.Config, sectors []abi.SectorNumber) ([]sealiface.CommitBatchRes, error) { +// processBatchV2 processes a batch of sectors after nv22. It will always send +// ProveCommitSectors3Params which may contain either individual proofs or an +// aggregate proof depending on SP condition and network conditions. +func (b *CommitBatcher) processBatchV2(cfg sealiface.Config, sectors []abi.SectorNumber, nv network.Version, aggregate bool) ([]sealiface.CommitBatchRes, error) { + ts, err := b.api.ChainHead(b.mctx) + if err != nil { + return nil, err + } + + total := len(sectors) + + res := sealiface.CommitBatchRes{ + FailedSectors: map[abi.SectorNumber]string{}, + } + + params := miner.ProveCommitSectors3Params{ + RequireActivationSuccess: cfg.RequireActivationSuccess, + RequireNotificationSuccess: cfg.RequireNotificationSuccess, + } + + infos := make([]proof.AggregateSealVerifyInfo, 0, total) + collateral := big.Zero() + + for _, sector := range sectors { + if b.todo[sector].DealIDPrecommit { + // can't process sectors precommitted with deal IDs with ProveCommitSectors2 + continue + } + + res.Sectors = append(res.Sectors, sector) + + sc, err := b.getSectorCollateral(sector, ts.Key()) + if err != nil { + res.FailedSectors[sector] = err.Error() + continue + } + + collateral = big.Add(collateral, sc) + + params.SectorActivations = append(params.SectorActivations, b.todo[sector].ActivationManifest) + params.SectorProofs = append(params.SectorProofs, b.todo[sector].Proof) + + infos = append(infos, b.todo[sector].Info) + } + + if len(infos) == 0 { + return nil, nil + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Number < infos[j].Number + }) + + proofs := make([][]byte, 0, total) + for _, info := range infos { + proofs = append(proofs, b.todo[info.Number].Proof) + } + + needFunds := collateral + + if aggregate { + params.SectorProofs = nil // can't be set when aggregating + arp, err := b.aggregateProofType(nv) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting aggregate proof type: %w", err) + } + params.AggregateProofType = &arp + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting miner id: %w", err) + } + + params.AggregateProof, err = b.prover.AggregateSealProofs(proof.AggregateSealVerifyProofAndInfos{ + Miner: abi.ActorID(mid), + SealProof: b.todo[infos[0].Number].Spt, + AggregateProof: arp, + Infos: infos, + }, proofs) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("aggregating proofs: %w", err) + } + + aggFeeRaw, err := policy.AggregateProveCommitNetworkFee(nv, len(infos), ts.MinTicketBlock().ParentBaseFee) + if err != nil { + res.Error = err.Error() + log.Errorf("getting aggregate commit network fee: %s", err) + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting aggregate commit network fee: %s", err) + } + + aggFee := big.Div(big.Mul(aggFeeRaw, aggFeeNum), aggFeeDen) + + needFunds = big.Add(collateral, aggFee) + } + + needFunds, err = collateralSendAmount(b.mctx, b.api, b.maddr, cfg, needFunds) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, err + } + + maxFee := b.feeCfg.MaxCommitBatchGasFee.FeeForSectors(len(infos)) + goodFunds := big.Add(maxFee, needFunds) + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, types.EmptyTSK) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err) + } + + from, _, err := b.addrSel.AddressFor(b.mctx, b.api, mi, api.CommitAddr, goodFunds, needFunds) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err) + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't serialize ProveCommitSectors2Params: %w", err) + } + + _, err = simulateMsgGas(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.ProveCommitSectors3, needFunds, maxFee, enc.Bytes()) + + if err != nil && (!api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) || len(sectors) < miner.MinAggregatedSectors*2) { + log.Errorf("simulating CommitBatch message failed: %s", err) + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("simulating CommitBatch message failed: %w", err) + } + + msgTooLarge := len(enc.Bytes()) > (messagepool.MaxMessageSize - 128) + + // If we're out of gas, split the batch in half and evaluate again + if api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) || msgTooLarge { + log.Warnf("CommitAggregate message ran out of gas or is too large, splitting batch in half and trying again (sectors: %d, params: %d)", len(sectors), len(enc.Bytes())) + mid := len(sectors) / 2 + ret0, _ := b.processBatchV2(cfg, sectors[:mid], nv, aggregate) + ret1, _ := b.processBatchV2(cfg, sectors[mid:], nv, aggregate) + + return append(ret0, ret1...), nil + } + + mcid, err := sendMsg(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.ProveCommitSectors3, needFunds, maxFee, enc.Bytes()) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("sending message failed (params size: %d, sectors: %d, agg: %t): %w", len(enc.Bytes()), len(sectors), aggregate, err) + } + + res.Msg = &mcid + + log.Infow("Sent ProveCommitSectors2 message", "cid", mcid, "from", from, "todo", total, "sectors", len(infos)) + + return []sealiface.CommitBatchRes{res}, nil +} + +// processBatchV1 processes a batch of sectors before nv22. It always sends out an aggregate message. +func (b *CommitBatcher) processBatchV1(cfg sealiface.Config, sectors []abi.SectorNumber, nv network.Version) ([]sealiface.CommitBatchRes, error) { ts, err := b.api.ChainHead(b.mctx) if err != nil { return nil, err @@ -322,13 +533,6 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config, sectors []abi.SectorN return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting miner id: %w", err) } - nv, err := b.api.StateNetworkVersion(b.mctx, ts.Key()) - if err != nil { - res.Error = err.Error() - log.Errorf("getting network version: %s", err) - return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting network version: %s", err) - } - arp, err := b.aggregateProofType(nv) if err != nil { res.Error = err.Error() @@ -396,8 +600,8 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config, sectors []abi.SectorN if api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) { log.Warnf("CommitAggregate message ran out of gas, splitting batch in half and trying again (sectors: %d)", len(sectors)) mid := len(sectors) / 2 - ret0, _ := b.processBatch(cfg, sectors[:mid]) - ret1, _ := b.processBatch(cfg, sectors[mid:]) + ret0, _ := b.processBatchV1(cfg, sectors[:mid], nv) + ret1, _ := b.processBatchV1(cfg, sectors[mid:], nv) return append(ret0, ret1...), nil } @@ -484,6 +688,10 @@ func (b *CommitBatcher) processIndividually(cfg sealiface.Config) ([]sealiface.C } func (b *CommitBatcher) processSingle(cfg sealiface.Config, mi api.MinerInfo, avail *abi.TokenAmount, sn abi.SectorNumber, info AggregateInput, tsk types.TipSetKey) (cid.Cid, error) { + return b.processSingleV1(cfg, mi, avail, sn, info, tsk) +} + +func (b *CommitBatcher) processSingleV1(cfg sealiface.Config, mi api.MinerInfo, avail *abi.TokenAmount, sn abi.SectorNumber, info AggregateInput, tsk types.TipSetKey) (cid.Cid, error) { enc := new(bytes.Buffer) params := &miner.ProveCommitSectorParams{ SectorNumber: sn, @@ -646,11 +854,15 @@ func (b *CommitBatcher) getCommitCutoff(si SectorInfo) (time.Time, error) { cutoffEpoch := pci.PreCommitEpoch + mpcd for _, p := range si.Pieces { - if p.DealInfo == nil { + if !p.HasDealInfo() { continue } - startEpoch := p.DealInfo.DealSchedule.StartEpoch + startEpoch, err := p.StartEpoch() + if err != nil { + log.Errorf("getting deal start epoch: %s", err) + return time.Now(), err + } if startEpoch < cutoffEpoch { cutoffEpoch = startEpoch } diff --git a/storage/pipeline/commit_batch_test.go b/storage/pipeline/commit_batch_test.go deleted file mode 100644 index 5ae2f171ae2..00000000000 --- a/storage/pipeline/commit_batch_test.go +++ /dev/null @@ -1,498 +0,0 @@ -// stm: #unit -package sealing_test - -import ( - "bytes" - "context" - "sort" - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/network" - prooftypes "github.com/filecoin-project/go-state-types/proof" - miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/ctladdr" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" - "github.com/filecoin-project/lotus/storage/pipeline/mocks" - "github.com/filecoin-project/lotus/storage/pipeline/sealiface" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -func TestCommitBatcher(t *testing.T) { - //stm: @CHAIN_STATE_MINER_PRE_COM_INFO_001, @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - ctx := context.Background() - - as := asel(func(ctx context.Context, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return t0123, big.Zero(), nil - }) - - maxBatch := miner5.MaxAggregatedSectors - minBatch := miner5.MinAggregatedSectors - - cfg := func() (sealiface.Config, error) { - return sealiface.Config{ - MaxWaitDealsSectors: 2, - MaxSealingSectors: 0, - MaxSealingSectorsForDeals: 0, - WaitDealsDelay: time.Hour * 6, - AlwaysKeepUnsealedCopy: true, - - MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, - PreCommitBatchWait: 24 * time.Hour, - PreCommitBatchSlack: 3 * time.Hour, - - AggregateCommits: true, - MinCommitBatch: minBatch, - MaxCommitBatch: maxBatch, - CommitBatchWait: 24 * time.Hour, - CommitBatchSlack: 1 * time.Hour, - - AggregateAboveBaseFee: types.BigMul(types.PicoFil, types.NewInt(150)), // 0.15 nFIL - BatchPreCommitAboveBaseFee: types.BigMul(types.PicoFil, types.NewInt(150)), // 0.15 nFIL - - TerminateBatchMin: 1, - TerminateBatchMax: 100, - TerminateBatchWait: 5 * time.Minute, - }, nil - } - - type promise func(t *testing.T) - type action func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise - - actions := func(as ...action) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - var ps []promise - for _, a := range as { - p := a(t, s, pcb) - if p != nil { - ps = append(ps, p) - } - } - - if len(ps) > 0 { - return func(t *testing.T) { - for _, p := range ps { - p(t) - } - } - } - return nil - } - } - - addSector := func(sn abi.SectorNumber, aboveBalancer bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - var pcres sealiface.CommitBatchRes - var pcerr error - done := sync.Mutex{} - done.Lock() - - si := pipeline.SectorInfo{ - SectorNumber: sn, - } - - basefee := types.PicoFil - if aboveBalancer { - basefee = types.NanoFil - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil) - s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&minertypes.SectorPreCommitOnChainInfo{ - PreCommitDeposit: big.Zero(), - }, nil) - - go func() { - defer done.Unlock() - pcres, pcerr = pcb.AddCommit(ctx, si, pipeline.AggregateInput{ - Info: prooftypes.AggregateSealVerifyInfo{ - Number: sn, - }, - }) - }() - - return func(t *testing.T) { - done.Lock() - require.NoError(t, pcerr) - require.Empty(t, pcres.Error) - require.Contains(t, pcres.Sectors, si.SectorNumber) - } - } - } - - addSectors := func(sectors []abi.SectorNumber, aboveBalancer bool) action { - as := make([]action, len(sectors)) - for i, sector := range sectors { - as[i] = addSector(sector, aboveBalancer) - } - return actions(as...) - } - - waitPending := func(n int) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - require.Eventually(t, func() bool { - p, err := pcb.Pending(ctx) - require.NoError(t, err) - return len(p) == n - }, time.Second*5, 10*time.Millisecond) - - return nil - } - } - - //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001, @CHAIN_STATE_MINER_GET_COLLATERAL_001 - expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil) - - ti := len(expect) - batch := false - if ti >= minBatch { - batch = true - ti = 1 - } - - basefee := types.PicoFil - if aboveBalancer { - basefee = types.NanoFil - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil) - /*if batch { - s.EXPECT().ChainBaseFee(gomock.Any(), gomock.Any()).Return(basefee, nil) - }*/ - - if !aboveBalancer { - batch = false - ti = len(expect) - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil) - - pciC := len(expect) - if failOnePCI { - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), abi.SectorNumber(1), gomock.Any()).Return(nil, nil).Times(1) // not found - pciC = len(expect) - 1 - if !batch { - ti-- - } - } - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&minertypes.SectorPreCommitOnChainInfo{ - PreCommitDeposit: big.Zero(), - }, nil).Times(pciC) - s.EXPECT().StateMinerInitialPledgeCollateral(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(big.Zero(), nil).Times(pciC) - - if batch { - s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.Message{GasLimit: 100000}, nil) - } - - s.EXPECT().MpoolPushMessage(gomock.Any(), funMatcher(func(i interface{}) bool { - b := i.(*types.Message) - if batch { - var params miner5.ProveCommitAggregateParams - require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params))) - for _, number := range expect { - set, err := params.SectorNumbers.IsSet(uint64(number)) - require.NoError(t, err) - require.True(t, set) - } - } else { - var params miner5.ProveCommitSectorParams - require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params))) - } - return true - }), gomock.Any()).Return(dummySmsg, nil).Times(ti) - return nil - } - } - - expectProcessBatch := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool, gasOverLimit bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil) - - ti := len(expect) - batch := false - if ti >= minBatch { - batch = true - ti = 1 - } - - if !aboveBalancer { - batch = false - ti = len(expect) - } - - pciC := len(expect) - if failOnePCI { - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), abi.SectorNumber(1), gomock.Any()).Return(nil, nil).Times(1) // not found - pciC = len(expect) - 1 - if !batch { - ti-- - } - } - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&minertypes.SectorPreCommitOnChainInfo{ - PreCommitDeposit: big.Zero(), - }, nil).Times(pciC) - s.EXPECT().StateMinerInitialPledgeCollateral(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(big.Zero(), nil).Times(pciC) - - if batch { - s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version18, nil) - if gasOverLimit { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, &api.ErrOutOfGas{}) - } else { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.Message{GasLimit: 100000}, nil) - } - - } - return nil - } - } - - flush := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - _ = expectSend(expect, aboveBalancer, failOnePCI)(t, s, pcb) - - batch := len(expect) >= minBatch && aboveBalancer - - r, err := pcb.Flush(ctx) - require.NoError(t, err) - if batch { - require.Len(t, r, 1) - require.Empty(t, r[0].Error) - sort.Slice(r[0].Sectors, func(i, j int) bool { - return r[0].Sectors[i] < r[0].Sectors[j] - }) - require.Equal(t, expect, r[0].Sectors) - if !failOnePCI { - require.Len(t, r[0].FailedSectors, 0) - } else { - require.Len(t, r[0].FailedSectors, 1) - _, found := r[0].FailedSectors[1] - require.True(t, found) - } - } else { - require.Len(t, r, len(expect)) - for _, res := range r { - require.Len(t, res.Sectors, 1) - require.Empty(t, res.Error) - } - sort.Slice(r, func(i, j int) bool { - return r[i].Sectors[0] < r[j].Sectors[0] - }) - for i, res := range r { - require.Equal(t, abi.SectorNumber(i), res.Sectors[0]) - if failOnePCI && res.Sectors[0] == 1 { - require.Len(t, res.FailedSectors, 1) - _, found := res.FailedSectors[1] - require.True(t, found) - } else { - require.Empty(t, res.FailedSectors) - } - } - } - - return nil - } - } - - getSectors := func(n int) []abi.SectorNumber { - out := make([]abi.SectorNumber, n) - for i := range out { - out[i] = abi.SectorNumber(i) - } - return out - } - - tcs := map[string]struct { - actions []action - }{ - "addSingle-aboveBalancer": { - actions: []action{ - addSector(0, true), - waitPending(1), - flush([]abi.SectorNumber{0}, true, false), - }, - }, - "addTwo-aboveBalancer": { - actions: []action{ - addSectors(getSectors(2), true), - waitPending(2), - flush(getSectors(2), true, false), - }, - }, - "addAte-aboveBalancer": { - actions: []action{ - addSectors(getSectors(8), true), - waitPending(8), - flush(getSectors(8), true, false), - }, - }, - "addMax-aboveBalancer": { - actions: []action{ - expectSend(getSectors(maxBatch), true, false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addMax-aboveBalancer-gasAboveLimit": { - actions: []action{ - expectProcessBatch(getSectors(maxBatch), true, false, true), - expectSend(getSectors(maxBatch)[:maxBatch/2], true, false), - expectSend(getSectors(maxBatch)[maxBatch/2:], true, false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addSingle-belowBalancer": { - actions: []action{ - addSector(0, false), - waitPending(1), - flush([]abi.SectorNumber{0}, false, false), - }, - }, - "addTwo-belowBalancer": { - actions: []action{ - addSectors(getSectors(2), false), - waitPending(2), - flush(getSectors(2), false, false), - }, - }, - "addAte-belowBalancer": { - actions: []action{ - addSectors(getSectors(8), false), - waitPending(8), - flush(getSectors(8), false, false), - }, - }, - "addMax-belowBalancer": { - actions: []action{ - expectSend(getSectors(maxBatch), false, false), - addSectors(getSectors(maxBatch), false), - }, - }, - - "addAte-aboveBalancer-failOne": { - actions: []action{ - addSectors(getSectors(8), true), - waitPending(8), - flush(getSectors(8), true, true), - }, - }, - "addAte-belowBalancer-failOne": { - actions: []action{ - addSectors(getSectors(8), false), - waitPending(8), - flush(getSectors(8), false, true), - }, - }, - } - - for name, tc := range tcs { - tc := tc - - t.Run(name, func(t *testing.T) { - // create go mock controller here - mockCtrl := gomock.NewController(t) - // when test is done, assert expectations on all mock objects. - defer mockCtrl.Finish() - - // create them mocks - pcapi := mocks.NewMockCommitBatcherApi(mockCtrl) - - pcb := pipeline.NewCommitBatcher(ctx, t0123, pcapi, as, fc, cfg, &fakeProver{}) - - var promises []promise - - for _, a := range tc.actions { - p := a(t, pcapi, pcb) - if p != nil { - promises = append(promises, p) - } - } - - for _, p := range promises { - p(t) - } - - err := pcb.Stop(ctx) - require.NoError(t, err) - }) - } -} - -type fakeProver struct{} - -func (f fakeProver) AggregateSealProofs(aggregateInfo prooftypes.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { - return []byte("Trust me, I'm a proof"), nil -} - -var _ storiface.Prover = &fakeProver{} - -var dummyAddr = func() address.Address { - a, _ := address.NewFromString("t00") - return a -}() - -func makeBFTs(t *testing.T, basefee abi.TokenAmount, h abi.ChainEpoch) *types.TipSet { - dummyCid, _ := cid.Parse("bafkqaaa") - - var ts, err = types.NewTipSet([]*types.BlockHeader{ - { - Height: h, - Miner: dummyAddr, - - Parents: []cid.Cid{}, - - Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, - - ParentStateRoot: dummyCid, - Messages: dummyCid, - ParentMessageReceipts: dummyCid, - - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - - ParentBaseFee: basefee, - }, - }) - if t != nil { - require.NoError(t, err) - } - - return ts -} - -func makeTs(t *testing.T, h abi.ChainEpoch) *types.TipSet { - return makeBFTs(t, big.NewInt(0), h) -} - -var dummySmsg = &types.SignedMessage{ - Message: types.Message{ - From: dummyAddr, - To: dummyAddr, - }, - Signature: crypto.Signature{Type: crypto.SigTypeBLS}, -} - -type asel func(ctx context.Context, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) - -func (s asel) AddressFor(ctx context.Context, _ ctladdr.NodeApi, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return s(ctx, mi, use, goodFunds, minFunds) -} - -var _ pipeline.AddressSelector = asel(nil) diff --git a/storage/pipeline/currentdealinfo_test.go b/storage/pipeline/currentdealinfo_test.go index 21141a35d57..1ea05dc35ef 100644 --- a/storage/pipeline/currentdealinfo_test.go +++ b/storage/pipeline/currentdealinfo_test.go @@ -80,21 +80,21 @@ func TestGetCurrentDealInfo(t *testing.T) { } successDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } earlierDeal := &api.MarketDeal{ Proposal: otherProposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } anotherDeal := &api.MarketDeal{ Proposal: anotherProposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, diff --git a/storage/pipeline/fsm_events.go b/storage/pipeline/fsm_events.go index a798a884b48..94cd53e829d 100644 --- a/storage/pipeline/fsm_events.go +++ b/storage/pipeline/fsm_events.go @@ -88,7 +88,7 @@ func (evt SectorAddPiece) apply(state *SectorInfo) { } type SectorPieceAdded struct { - NewPieces []api.SectorPiece + NewPieces []SafeSectorPiece } func (evt SectorPieceAdded) apply(state *SectorInfo) { @@ -114,9 +114,11 @@ type SectorPacked struct{ FillerPieces []abi.PieceInfo } func (evt SectorPacked) apply(state *SectorInfo) { for idx := range evt.FillerPieces { - state.Pieces = append(state.Pieces, api.SectorPiece{ - Piece: evt.FillerPieces[idx], - DealInfo: nil, // filler pieces don't have deals associated with them + state.Pieces = append(state.Pieces, SafeSectorPiece{ + real: api.SectorPiece{ + Piece: evt.FillerPieces[idx], + DealInfo: nil, // filler pieces don't have deals associated with them + }, }) } } @@ -419,7 +421,8 @@ type SectorUpdateDealIDs struct { func (evt SectorUpdateDealIDs) apply(state *SectorInfo) { for i, id := range evt.Updates { - state.Pieces[i].DealInfo.DealID = id + // NOTE: all update deals are builtin-market deals + state.Pieces[i].real.DealInfo.DealID = id } } diff --git a/storage/pipeline/input.go b/storage/pipeline/input.go index b595f533d73..53c96fc55ba 100644 --- a/storage/pipeline/input.go +++ b/storage/pipeline/input.go @@ -5,7 +5,6 @@ import ( "sort" "time" - "github.com/ipfs/go-cid" "go.uber.org/zap" "golang.org/x/xerrors" @@ -13,14 +12,15 @@ import ( "github.com/filecoin-project/go-padreader" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" @@ -32,10 +32,15 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e var used abi.UnpaddedPieceSize var lastDealEnd abi.ChainEpoch for _, piece := range sector.Pieces { - used += piece.Piece.Size.Unpadded() + used += piece.Piece().Size.Unpadded() - if piece.DealInfo != nil && piece.DealInfo.DealProposal.EndEpoch > lastDealEnd { - lastDealEnd = piece.DealInfo.DealProposal.EndEpoch + endEpoch, err := piece.EndEpoch() + if err != nil { + return xerrors.Errorf("piece.EndEpoch: %w", err) + } + + if piece.HasDealInfo() && endEpoch > lastDealEnd { + lastDealEnd = endEpoch } } @@ -65,9 +70,9 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e if _, has := m.openSectors[sid]; !has { m.openSectors[sid] = &openSector{ used: used, - maybeAccept: func(cid cid.Cid) error { + maybeAccept: func(pk piece.PieceKey) error { // todo check deal start deadline (configurable) - m.assignedPieces[sid] = append(m.assignedPieces[sid], cid) + m.assignedPieces[sid] = append(m.assignedPieces[sid], pk) return ctx.Send(SectorAddPiece{}) }, @@ -94,7 +99,7 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, used abi.UnpaddedPieceSize) (bool, error) { log := log.WithOptions(zap.Fields( zap.Uint64("sector", uint64(sector.SectorNumber)), - zap.Int("deals", len(sector.dealIDs())), + zap.Int("dataPieces", len(sector.nonPaddingPieceInfos())), )) now := time.Now() @@ -117,7 +122,7 @@ func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, return false, xerrors.Errorf("getting per-sector deal limit: %w", err) } - if len(sector.dealIDs()) >= maxDeals { + if len(sector.nonPaddingPieceInfos()) >= maxDeals { // can't accept more deals log.Infow("starting to seal deal sector", "trigger", "maxdeals") return true, ctx.Send(SectorStartPacking{}) @@ -146,13 +151,24 @@ func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, var dealSafeSealEpoch abi.ChainEpoch for _, piece := range sector.Pieces { - if piece.DealInfo == nil { + if !piece.HasDealInfo() { continue } - dealSafeSealEpoch = piece.DealInfo.DealProposal.StartEpoch - cfg.StartEpochSealingBuffer + startEpoch, err := piece.StartEpoch() + if err != nil { + log.Errorw("failed to get start epoch for deal", "piece", piece.String(), "error", err) + continue // not ideal, but skipping the check should break things less + } + + dealSafeSealEpoch = startEpoch - cfg.StartEpochSealingBuffer + + alloc, err := piece.GetAllocation(ctx.Context(), m.Api, types.EmptyTSK) + if err != nil { + log.Errorw("failed to get allocation for deal", "piece", piece.String(), "error", err) + continue // not ideal, but skipping the check should break things less + } - alloc, _ := m.Api.StateGetAllocationForPendingDeal(ctx.Context(), piece.DealInfo.DealID, types.EmptyTSK) // alloc is nil if this is not a verified deal in nv17 or later if alloc == nil { continue @@ -210,8 +226,8 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er var offset abi.UnpaddedPieceSize pieceSizes := make([]abi.UnpaddedPieceSize, len(sector.Pieces)) for i, p := range sector.Pieces { - pieceSizes[i] = p.Piece.Size.Unpadded() - offset += p.Piece.Size.Unpadded() + pieceSizes[i] = p.Piece().Size.Unpadded() + offset += p.Piece().Size.Unpadded() } maxDeals, err := getDealPerSectorLimit(ssize) @@ -227,7 +243,7 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er return xerrors.Errorf("piece %s assigned to sector %d not found", piece, sector.SectorNumber) } - if len(sector.dealIDs())+(i+1) > maxDeals { + if len(sector.nonPaddingPieceInfos())+(i+1) > maxDeals { // todo: this is rather unlikely to happen, but in case it does, return the deal to waiting queue instead of failing it deal.accepted(sector.SectorNumber, offset, xerrors.Errorf("too many deals assigned to sector %d, dropping deal", sector.SectorNumber)) continue @@ -263,8 +279,10 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er } pieceSizes = append(pieceSizes, p.Unpadded()) - res.NewPieces = append(res.NewPieces, api.SectorPiece{ - Piece: ppi, + res.NewPieces = append(res.NewPieces, SafeSectorPiece{ + api.SectorPiece{ + Piece: ppi, + }, }) } @@ -278,22 +296,26 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er deal.accepted(sector.SectorNumber, offset, err) return ctx.Send(SectorAddPieceFailed{err}) } - if !ppi.PieceCID.Equals(deal.deal.DealProposal.PieceCID) { - err = xerrors.Errorf("got unexpected piece CID: expected:%s, got:%s", deal.deal.DealProposal.PieceCID, ppi.PieceCID) + if !ppi.PieceCID.Equals(deal.deal.PieceCID()) { + err = xerrors.Errorf("got unexpected piece CID: expected:%s, got:%s", deal.deal.PieceCID(), ppi.PieceCID) deal.accepted(sector.SectorNumber, offset, err) return ctx.Send(SectorAddPieceFailed{err}) } - log.Infow("deal added to a sector", "deal", deal.deal.DealID, "sector", sector.SectorNumber, "piece", ppi.PieceCID) + log.Infow("deal added to a sector", "pieceID", deal.deal.String(), "sector", sector.SectorNumber, "piece", ppi.PieceCID) deal.accepted(sector.SectorNumber, offset, nil) offset += deal.size pieceSizes = append(pieceSizes, deal.size) - res.NewPieces = append(res.NewPieces, api.SectorPiece{ - Piece: ppi, - DealInfo: &deal.deal, + dinfo := deal.deal.Impl() + + res.NewPieces = append(res.NewPieces, SafeSectorPiece{ + api.SectorPiece{ + Piece: ppi, + DealInfo: &dinfo, + }, }) } @@ -304,8 +326,13 @@ func (m *Sealing) handleAddPieceFailed(ctx statemachine.Context, sector SectorIn return ctx.Send(SectorRetryWaitDeals{}) } -func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, deal api.PieceDealInfo) (api.SectorOffset, error) { - log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid) +func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, pieceInfo piece.PieceDealInfo) (api.SectorOffset, error) { + return m.sectorAddPieceToAny(ctx, size, data, &pieceInfo) +} + +func (m *Sealing) sectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, pieceInfo UniversalPieceInfo) (api.SectorOffset, error) { + log.Infof("Adding piece %s", pieceInfo.String()) + if (padreader.PaddedSize(uint64(size))) != size { return api.SectorOffset{}, xerrors.Errorf("cannot allocate unpadded piece") } @@ -324,10 +351,6 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec return api.SectorOffset{}, xerrors.Errorf("piece cannot fit into a sector") } - if _, err := deal.DealProposal.Cid(); err != nil { - return api.SectorOffset{}, xerrors.Errorf("getting proposal CID: %w", err) - } - cfg, err := m.getConfig() if err != nil { return api.SectorOffset{}, xerrors.Errorf("getting config: %w", err) @@ -337,19 +360,34 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec if err != nil { return api.SectorOffset{}, xerrors.Errorf("couldnt get chain head: %w", err) } - if ts.Height()+cfg.StartEpochSealingBuffer > deal.DealProposal.StartEpoch { + + nv, err := m.Api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("getting network version: %w", err) + } + + if err := pieceInfo.Valid(nv); err != nil { + return api.SectorOffset{}, xerrors.Errorf("piece metadata invalid: %w", err) + } + + startEpoch, err := pieceInfo.StartEpoch() + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("getting last start epoch: %w", err) + } + + if ts.Height()+cfg.StartEpochSealingBuffer > startEpoch { return api.SectorOffset{}, xerrors.Errorf( "cannot add piece for deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d", - deal.DealProposal.PieceCID, ts.Height(), deal.DealProposal.StartEpoch) + pieceInfo.PieceCID(), ts.Height(), startEpoch) } - claimTerms, err := m.getClaimTerms(ctx, deal, ts.Key()) + claimTerms, err := m.getClaimTerms(ctx, pieceInfo, ts.Key()) if err != nil { return api.SectorOffset{}, err } m.inputLk.Lock() - if pp, exist := m.pendingPieces[proposalCID(deal)]; exist { + if pp, exist := m.pendingPieces[pieceInfo.Key()]; exist { m.inputLk.Unlock() // we already have a pre-existing add piece call for this deal, let's wait for it to finish and see if it's successful @@ -366,7 +404,7 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec } // addPendingPiece takes over m.inputLk - pp := m.addPendingPiece(ctx, size, data, deal, claimTerms, sp) + pp := m.addPendingPiece(ctx, size, data, pieceInfo, claimTerms, sp) res, err := waitAddPieceResp(ctx, pp) if err != nil { @@ -375,32 +413,41 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err } -func (m *Sealing) getClaimTerms(ctx context.Context, deal api.PieceDealInfo, tsk types.TipSetKey) (pieceClaimBounds, error) { - nv, err := m.Api.StateNetworkVersion(ctx, tsk) +func (m *Sealing) getClaimTerms(ctx context.Context, deal UniversalPieceInfo, tsk types.TipSetKey) (pieceClaimBounds, error) { + + all, err := deal.GetAllocation(ctx, m.Api, tsk) if err != nil { return pieceClaimBounds{}, err } - - if nv >= network.Version17 { - all, err := m.Api.StateGetAllocationForPendingDeal(ctx, deal.DealID, tsk) + if all != nil { + startEpoch, err := deal.StartEpoch() if err != nil { return pieceClaimBounds{}, err } - if all != nil { - return pieceClaimBounds{ - claimTermEnd: deal.DealProposal.StartEpoch + all.TermMax, - }, nil - } + + return pieceClaimBounds{ + claimTermEnd: startEpoch + all.TermMax, + }, nil + } + + nv, err := m.Api.StateNetworkVersion(ctx, tsk) + if err != nil { + return pieceClaimBounds{}, err + } + + endEpoch, err := deal.EndEpoch() + if err != nil { + return pieceClaimBounds{}, err } // no allocation for this deal, so just use a really high number for "term end" return pieceClaimBounds{ - claimTermEnd: deal.DealProposal.EndEpoch + policy.GetSectorMaxLifetime(abi.RegisteredSealProof_StackedDrg32GiBV1_1, network.Version17), + claimTermEnd: endEpoch + policy.GetSectorMaxLifetime(abi.RegisteredSealProof_StackedDrg32GiBV1_1, nv), }, nil } // called with m.inputLk; transfers the lock to another goroutine! -func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, deal api.PieceDealInfo, ct pieceClaimBounds, sp abi.RegisteredSealProof) *pendingPiece { +func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, deal UniversalPieceInfo, ct pieceClaimBounds, sp abi.RegisteredSealProof) *pendingPiece { doneCh := make(chan struct{}) pp := &pendingPiece{ size: size, @@ -417,14 +464,12 @@ func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSiz close(pp.doneCh) } - log.Debugw("new pending piece", "dealId", deal.DealID, - "piece", deal.DealProposal.PieceCID, - "size", size, - "dealStart", deal.DealSchedule.StartEpoch, - "dealEnd", deal.DealSchedule.EndEpoch, + log.Debugw("new pending piece", "pieceID", deal.String(), + "dealStart", result.Wrap(deal.StartEpoch()), + "dealEnd", result.Wrap(deal.EndEpoch()), "termEnd", ct.claimTermEnd) - m.pendingPieces[proposalCID(deal)] = pp + m.pendingPieces[deal.Key()] = pp go func() { defer m.inputLk.Unlock() if err := m.updateInput(ctx, sp); err != nil { @@ -489,7 +534,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e type match struct { sector abi.SectorID - deal cid.Cid + deal piece.PieceKey dealEnd abi.ChainEpoch claimTermEnd abi.ChainEpoch @@ -499,7 +544,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e } var matches []match - toAssign := map[cid.Cid]struct{}{} // used to maybe create new sectors + toAssign := map[piece.PieceKey]struct{}{} // used to maybe create new sectors // todo: this is distinctly O(n^2), may need to be optimized for tiny deals and large scale miners // (unlikely to be a problem now) @@ -523,12 +568,18 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e continue } + endEpoch, err := piece.deal.EndEpoch() + if err != nil { + log.Errorf("failed to get end epoch for deal %s", piece.deal) + continue + } + if piece.size <= avail { // (note: if we have enough space for the piece, we also have enough space for inter-piece padding) matches = append(matches, match{ sector: id, deal: proposalCid, - dealEnd: piece.deal.DealProposal.EndEpoch, + dealEnd: endEpoch, claimTermEnd: piece.claimTerms.claimTermEnd, size: piece.size, @@ -609,7 +660,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e } // pendingPieceIndex is an index in the Sealing.pendingPieces map -type pendingPieceIndex cid.Cid +type pendingPieceIndex piece.PieceKey type pieceBound struct { epoch abi.ChainEpoch @@ -633,13 +684,21 @@ func (m *Sealing) pendingPieceEpochBounds() []pieceBound { continue } + endEpoch, err := piece.deal.EndEpoch() + if err != nil { + // this really should never happen, at this point we have validated + // the piece enough times + log.Errorf("failed to get end epoch for deal %s: %v", ppi, err) + continue + } + // start bound on deal end - if boundsByEpoch[piece.deal.DealProposal.EndEpoch] == nil { - boundsByEpoch[piece.deal.DealProposal.EndEpoch] = &pieceBound{ - epoch: piece.deal.DealProposal.EndEpoch, + if boundsByEpoch[endEpoch] == nil { + boundsByEpoch[endEpoch] = &pieceBound{ + epoch: endEpoch, } } - boundsByEpoch[piece.deal.DealProposal.EndEpoch].boundStart = append(boundsByEpoch[piece.deal.DealProposal.EndEpoch].boundStart, pendingPieceIndex(ppi)) + boundsByEpoch[endEpoch].boundStart = append(boundsByEpoch[endEpoch].boundStart, pendingPieceIndex(ppi)) // end bound on term max if boundsByEpoch[piece.claimTerms.claimTermEnd] == nil { @@ -662,10 +721,10 @@ func (m *Sealing) pendingPieceEpochBounds() []pieceBound { var curBoundBytes abi.UnpaddedPieceSize for i, bound := range out { for _, ppi := range bound.boundStart { - curBoundBytes += m.pendingPieces[cid.Cid(ppi)].size + curBoundBytes += m.pendingPieces[piece.PieceKey(ppi)].size } for _, ppi := range bound.boundEnd { - curBoundBytes -= m.pendingPieces[cid.Cid(ppi)].size + curBoundBytes -= m.pendingPieces[piece.PieceKey(ppi)].size } out[i].dealBytesInBound = curBoundBytes @@ -896,15 +955,17 @@ func (m *Sealing) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showO deals := make([]abi.DealID, len(info.Pieces)) pieces := make([]api.SectorPiece, len(info.Pieces)) for i, piece := range info.Pieces { - pieces[i].Piece = piece.Piece - if piece.DealInfo == nil { + // todo make this work with DDO deals in some reasonable way + + pieces[i].Piece = piece.Piece() + if !piece.HasDealInfo() || piece.Impl().PublishCid == nil { continue } - pdi := *piece.DealInfo // copy + pdi := piece.DealInfo().Impl() // copy pieces[i].DealInfo = &pdi - deals[i] = piece.DealInfo.DealID + deals[i] = piece.DealInfo().Impl().DealID } log := make([]api.SectorLog, len(info.Log)) @@ -955,14 +1016,4 @@ func (m *Sealing) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showO return sInfo, nil } -func proposalCID(deal api.PieceDealInfo) cid.Cid { - pc, err := deal.DealProposal.Cid() - if err != nil { - log.Errorf("DealProposal.Cid error: %+v", err) - return cid.Undef - } - - return pc -} - var _ sectorblocks.SectorBuilder = &Sealing{} diff --git a/storage/pipeline/mocks/api.go b/storage/pipeline/mocks/api.go index 126dbba7da5..3990f1a2fd3 100644 --- a/storage/pipeline/mocks/api.go +++ b/storage/pipeline/mocks/api.go @@ -9,13 +9,14 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" address "github.com/filecoin-project/go-address" bitfield "github.com/filecoin-project/go-bitfield" abi "github.com/filecoin-project/go-state-types/abi" big "github.com/filecoin-project/go-state-types/big" - miner "github.com/filecoin-project/go-state-types/builtin/v12/miner" + miner "github.com/filecoin-project/go-state-types/builtin/v13/miner" miner0 "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" crypto "github.com/filecoin-project/go-state-types/crypto" @@ -65,6 +66,21 @@ func (mr *MockSealingAPIMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockSealingAPI)(nil).ChainGetMessage), arg0, arg1) } +// ChainHasObj mocks base method. +func (m *MockSealingAPI) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHasObj indicates an expected call of ChainHasObj. +func (mr *MockSealingAPIMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockSealingAPI)(nil).ChainHasObj), arg0, arg1) +} + // ChainHead mocks base method. func (m *MockSealingAPI) ChainHead(arg0 context.Context) (*types.TipSet, error) { m.ctrl.T.Helper() @@ -80,6 +96,20 @@ func (mr *MockSealingAPIMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockSealingAPI)(nil).ChainHead), arg0) } +// ChainPutObj mocks base method. +func (m *MockSealingAPI) ChainPutObj(arg0 context.Context, arg1 blocks.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainPutObj", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainPutObj indicates an expected call of ChainPutObj. +func (mr *MockSealingAPIMockRecorder) ChainPutObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainPutObj", reflect.TypeOf((*MockSealingAPI)(nil).ChainPutObj), arg0, arg1) +} + // ChainReadObj mocks base method. func (m *MockSealingAPI) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) { m.ctrl.T.Helper() @@ -140,19 +170,34 @@ func (mr *MockSealingAPIMockRecorder) StateAccountKey(arg0, arg1, arg2 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockSealingAPI)(nil).StateAccountKey), arg0, arg1, arg2) } -// StateComputeDataCID mocks base method. -func (m *MockSealingAPI) StateComputeDataCID(arg0 context.Context, arg1 address.Address, arg2 abi.RegisteredSealProof, arg3 []abi.DealID, arg4 types.TipSetKey) (cid.Cid, error) { +// StateGetActor mocks base method. +func (m *MockSealingAPI) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.ActorV5, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.ActorV5) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetActor indicates an expected call of StateGetActor. +func (mr *MockSealingAPIMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockSealingAPI)(nil).StateGetActor), arg0, arg1, arg2) +} + +// StateGetAllocation mocks base method. +func (m *MockSealingAPI) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StateComputeDataCID", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(cid.Cid) + ret := m.ctrl.Call(m, "StateGetAllocation", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*verifreg.Allocation) ret1, _ := ret[1].(error) return ret0, ret1 } -// StateComputeDataCID indicates an expected call of StateComputeDataCID. -func (mr *MockSealingAPIMockRecorder) StateComputeDataCID(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +// StateGetAllocation indicates an expected call of StateGetAllocation. +func (mr *MockSealingAPIMockRecorder) StateGetAllocation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateComputeDataCID", reflect.TypeOf((*MockSealingAPI)(nil).StateComputeDataCID), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocation", reflect.TypeOf((*MockSealingAPI)(nil).StateGetAllocation), arg0, arg1, arg2, arg3) } // StateGetAllocationForPendingDeal mocks base method. @@ -170,6 +215,21 @@ func (mr *MockSealingAPIMockRecorder) StateGetAllocationForPendingDeal(arg0, arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockSealingAPI)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2) } +// StateGetAllocationIdForPendingDeal mocks base method. +func (m *MockSealingAPI) StateGetAllocationIdForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (verifreg.AllocationId, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocationIdForPendingDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(verifreg.AllocationId) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocationIdForPendingDeal indicates an expected call of StateGetAllocationIdForPendingDeal. +func (mr *MockSealingAPIMockRecorder) StateGetAllocationIdForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationIdForPendingDeal", reflect.TypeOf((*MockSealingAPI)(nil).StateGetAllocationIdForPendingDeal), arg0, arg1, arg2) +} + // StateGetRandomnessFromBeacon mocks base method. func (m *MockSealingAPI) StateGetRandomnessFromBeacon(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) { m.ctrl.T.Helper() @@ -440,6 +500,21 @@ func (mr *MockSealingAPIMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockSealingAPI)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) } +// StateVMCirculatingSupplyInternal mocks base method. +func (m *MockSealingAPI) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (api.CirculatingSupply, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1) + ret0, _ := ret[0].(api.CirculatingSupply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal. +func (mr *MockSealingAPIMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockSealingAPI)(nil).StateVMCirculatingSupplyInternal), arg0, arg1) +} + // StateWaitMsg mocks base method. func (m *MockSealingAPI) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) { m.ctrl.T.Helper() diff --git a/storage/pipeline/mocks/mock_precommit_batcher.go b/storage/pipeline/mocks/mock_precommit_batcher.go index 68cce7fb0f3..fd46f601b77 100644 --- a/storage/pipeline/mocks/mock_precommit_batcher.go +++ b/storage/pipeline/mocks/mock_precommit_batcher.go @@ -103,6 +103,21 @@ func (mr *MockPreCommitBatcherApiMockRecorder) StateAccountKey(arg0, arg1, arg2 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateAccountKey), arg0, arg1, arg2) } +// StateGetAllocation mocks base method. +func (m *MockPreCommitBatcherApi) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocation", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocation indicates an expected call of StateGetAllocation. +func (mr *MockPreCommitBatcherApiMockRecorder) StateGetAllocation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocation", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateGetAllocation), arg0, arg1, arg2, arg3) +} + // StateGetAllocationForPendingDeal mocks base method. func (m *MockPreCommitBatcherApi) StateGetAllocationForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*verifreg.Allocation, error) { m.ctrl.T.Helper() diff --git a/storage/pipeline/piece/cbor_gen.go b/storage/pipeline/piece/cbor_gen.go new file mode 100644 index 00000000000..ccf44e54b29 --- /dev/null +++ b/storage/pipeline/piece/cbor_gen.go @@ -0,0 +1,451 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package piece + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" + + abi "github.com/filecoin-project/go-state-types/abi" + miner "github.com/filecoin-project/go-state-types/builtin/v13/miner" + market "github.com/filecoin-project/go-state-types/builtin/v9/market" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{166}); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > 8192 { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > 8192 { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.DealProposal (market.DealProposal) (struct) + if len("DealProposal") > 8192 { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.DealSchedule (piece.DealSchedule) (struct) + if len("DealSchedule") > 8192 { + return xerrors.Errorf("Value in field \"DealSchedule\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealSchedule")); err != nil { + return err + } + + if err := t.DealSchedule.MarshalCBOR(cw); err != nil { + return err + } + + // t.KeepUnsealed (bool) (bool) + if len("KeepUnsealed") > 8192 { + return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { + return err + } + if _, err := cw.WriteString(string("KeepUnsealed")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { + return err + } + + // t.PieceActivationManifest (miner.PieceActivationManifest) (struct) + if len("PieceActivationManifest") > 8192 { + return xerrors.Errorf("Value in field \"PieceActivationManifest\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceActivationManifest"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceActivationManifest")); err != nil { + return err + } + + if err := t.PieceActivationManifest.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceDealInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.DealProposal (market.DealProposal) (struct) + case "DealProposal": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.DealProposal) + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.DealSchedule (piece.DealSchedule) (struct) + case "DealSchedule": + + { + + if err := t.DealSchedule.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) + } + + } + // t.KeepUnsealed (bool) (bool) + case "KeepUnsealed": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.KeepUnsealed = false + case 21: + t.KeepUnsealed = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.PieceActivationManifest (miner.PieceActivationManifest) (struct) + case "PieceActivationManifest": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PieceActivationManifest = new(miner.PieceActivationManifest) + if err := t.PieceActivationManifest.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PieceActivationManifest pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealSchedule) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.EndEpoch (abi.ChainEpoch) (int64) + if len("EndEpoch") > 8192 { + return xerrors.Errorf("Value in field \"EndEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("EndEpoch")); err != nil { + return err + } + + if t.EndEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { + return err + } + } + + // t.StartEpoch (abi.ChainEpoch) (int64) + if len("StartEpoch") > 8192 { + return xerrors.Errorf("Value in field \"StartEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("StartEpoch")); err != nil { + return err + } + + if t.StartEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { + return err + } + } + + return nil +} + +func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealSchedule{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.EndEpoch (abi.ChainEpoch) (int64) + case "EndEpoch": + { + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + var extraI int64 + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EndEpoch = abi.ChainEpoch(extraI) + } + // t.StartEpoch (abi.ChainEpoch) (int64) + case "StartEpoch": + { + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + var extraI int64 + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartEpoch = abi.ChainEpoch(extraI) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/storage/pipeline/piece/piece_info.go b/storage/pipeline/piece/piece_info.go new file mode 100644 index 00000000000..7ee8f70292e --- /dev/null +++ b/storage/pipeline/piece/piece_info.go @@ -0,0 +1,186 @@ +package piece + +import ( + "context" + "fmt" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" +) + +// DealInfo is a tuple of deal identity and its schedule +type PieceDealInfo struct { + // "Old" builtin-market deal info + PublishCid *cid.Cid + DealID abi.DealID + DealProposal *market.DealProposal + + // Common deal info, required for all pieces + // TODO: https://github.com/filecoin-project/lotus/issues/11237 + DealSchedule DealSchedule + + // Direct Data Onboarding + // When PieceActivationManifest is set, builtin-market deal info must not be set + PieceActivationManifest *miner.PieceActivationManifest + + // Best-effort deal asks + KeepUnsealed bool +} + +// DealSchedule communicates the time interval of a storage deal. The deal must +// appear in a sealed (proven) sector no later than StartEpoch, otherwise it +// is invalid. +type DealSchedule struct { + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch +} + +func (ds *PieceDealInfo) isBuiltinMarketDeal() bool { + return ds.PublishCid != nil +} + +// Valid validates the deal info after being accepted through RPC, checks that +// the deal metadata is well-formed. +func (ds *PieceDealInfo) Valid(nv network.Version) error { + hasLegacyDealInfo := ds.PublishCid != nil && ds.DealID != 0 && ds.DealProposal != nil + hasPieceActivationManifest := ds.PieceActivationManifest != nil + + if hasLegacyDealInfo && hasPieceActivationManifest { + return xerrors.Errorf("piece deal info has both legacy deal info and piece activation manifest") + } + + if !hasLegacyDealInfo && !hasPieceActivationManifest { + return xerrors.Errorf("piece deal info has neither legacy deal info nor piece activation manifest") + } + + if hasLegacyDealInfo { + if _, err := ds.DealProposal.Cid(); err != nil { + return xerrors.Errorf("checking proposal CID: %w", err) + } + } + + if ds.DealSchedule.StartEpoch <= 0 { + return xerrors.Errorf("invalid deal start epoch %d", ds.DealSchedule.StartEpoch) + } + if ds.DealSchedule.EndEpoch <= 0 { + return xerrors.Errorf("invalid deal end epoch %d", ds.DealSchedule.EndEpoch) + } + if ds.DealSchedule.EndEpoch <= ds.DealSchedule.StartEpoch { + return xerrors.Errorf("invalid deal end epoch %d (start %d)", ds.DealSchedule.EndEpoch, ds.DealSchedule.StartEpoch) + } + + if hasPieceActivationManifest { + if nv < network.Version22 { + return xerrors.Errorf("direct-data-onboarding pieces aren't accepted before network version 22") + } + + // todo any more checks seem reasonable to put here? + } + + return nil +} + +type AllocationAPI interface { + StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) +} + +func (ds *PieceDealInfo) GetAllocation(ctx context.Context, aapi AllocationAPI, tsk types.TipSetKey) (*verifregtypes.Allocation, error) { + switch { + case ds.isBuiltinMarketDeal(): + return aapi.StateGetAllocationForPendingDeal(ctx, ds.DealID, tsk) + default: + if ds.PieceActivationManifest.VerifiedAllocationKey == nil { + return nil, nil + } + + caddr, err := address.NewIDAddress(uint64(ds.PieceActivationManifest.VerifiedAllocationKey.Client)) + if err != nil { + return nil, err + } + + all, err := aapi.StateGetAllocation(ctx, caddr, verifregtypes.AllocationId(ds.PieceActivationManifest.VerifiedAllocationKey.ID), tsk) + if err != nil { + return nil, err + } + + if all == nil { + return nil, nil + } + + if all.Client != ds.PieceActivationManifest.VerifiedAllocationKey.Client { + return nil, xerrors.Errorf("allocation client mismatch: %d != %d", all.Client, ds.PieceActivationManifest.VerifiedAllocationKey.Client) + } + + return all, nil + } +} + +// StartEpoch returns the last epoch in which the sector containing this deal +// must be sealed (committed) in order for the deal to be valid. +func (ds *PieceDealInfo) StartEpoch() (abi.ChainEpoch, error) { + switch { + case ds.isBuiltinMarketDeal(): + return ds.DealSchedule.StartEpoch, nil + default: + // note - when implementing make sure to cache any dynamically computed values + // todo do we want a smarter mechanism here + return ds.DealSchedule.StartEpoch, nil + } +} + +// EndEpoch returns the minimum epoch until which the sector containing this +// deal must be committed until. +func (ds *PieceDealInfo) EndEpoch() (abi.ChainEpoch, error) { + switch { + case ds.isBuiltinMarketDeal(): + return ds.DealSchedule.EndEpoch, nil + default: + // note - when implementing make sure to cache any dynamically computed values + // todo do we want a smarter mechanism here + return ds.DealSchedule.StartEpoch, nil + } +} + +func (ds *PieceDealInfo) PieceCID() cid.Cid { + switch { + case ds.isBuiltinMarketDeal(): + return ds.DealProposal.PieceCID + default: + return ds.PieceActivationManifest.CID + } +} + +func (ds *PieceDealInfo) String() string { + switch { + case ds.isBuiltinMarketDeal(): + return fmt.Sprintf("BuiltinMarket{DealID: %d, PieceCID: %s, PublishCid: %s}", ds.DealID, ds.DealProposal.PieceCID, ds.PublishCid) + default: + // todo check that VAlloc doesn't print as a pointer + return fmt.Sprintf("DirectDataOnboarding{PieceCID: %s, VAllloc: %x}", ds.PieceActivationManifest.CID, ds.PieceActivationManifest.VerifiedAllocationKey) + } +} + +func (ds *PieceDealInfo) KeepUnsealedRequested() bool { + return ds.KeepUnsealed +} + +type PieceKey string + +// Key returns a unique identifier for this deal info, for use in maps. +func (ds *PieceDealInfo) Key() PieceKey { + return PieceKey(ds.String()) +} + +func (ds *PieceDealInfo) Impl() PieceDealInfo { + return *ds +} diff --git a/storage/pipeline/pledge.go b/storage/pipeline/pledge.go new file mode 100644 index 00000000000..04567fca1b2 --- /dev/null +++ b/storage/pipeline/pledge.go @@ -0,0 +1,114 @@ +package sealing + +import ( + "context" + + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/types" +) + +var initialPledgeNum = types.NewInt(110) +var initialPledgeDen = types.NewInt(100) + +func (m *Sealing) pledgeForPower(ctx context.Context, addedPower abi.StoragePower) (abi.TokenAmount, error) { + store := adt.WrapStore(ctx, cbor.NewCborStore(bstore.NewAPIBlockstore(m.Api))) + + // load power actor + var ( + powerSmoothed builtin.FilterEstimate + pledgeCollateral abi.TokenAmount + ) + if act, err := m.Api.StateGetActor(ctx, power.Address, types.EmptyTSK); err != nil { + return types.EmptyInt, xerrors.Errorf("loading power actor: %w", err) + } else if s, err := power.Load(store, act); err != nil { + return types.EmptyInt, xerrors.Errorf("loading power actor state: %w", err) + } else if p, err := s.TotalPowerSmoothed(); err != nil { + return types.EmptyInt, xerrors.Errorf("failed to determine total power: %w", err) + } else if c, err := s.TotalLocked(); err != nil { + return types.EmptyInt, xerrors.Errorf("failed to determine pledge collateral: %w", err) + } else { + powerSmoothed = p + pledgeCollateral = c + } + + // load reward actor + rewardActor, err := m.Api.StateGetActor(ctx, reward.Address, types.EmptyTSK) + if err != nil { + return types.EmptyInt, xerrors.Errorf("loading reward actor: %w", err) + } + + rewardState, err := reward.Load(store, rewardActor) + if err != nil { + return types.EmptyInt, xerrors.Errorf("loading reward actor state: %w", err) + } + + // get circulating supply + circSupply, err := m.Api.StateVMCirculatingSupplyInternal(ctx, types.EmptyTSK) + if err != nil { + return big.Zero(), xerrors.Errorf("getting circulating supply: %w", err) + } + + // do the calculation + initialPledge, err := rewardState.InitialPledgeForPower( + addedPower, + pledgeCollateral, + &powerSmoothed, + circSupply.FilCirculating, + ) + if err != nil { + return big.Zero(), xerrors.Errorf("calculating initial pledge: %w", err) + } + + return types.BigDiv(types.BigMul(initialPledge, initialPledgeNum), initialPledgeDen), nil +} + +func (m *Sealing) sectorWeight(ctx context.Context, sector SectorInfo, expiration abi.ChainEpoch) (abi.StoragePower, error) { + spt, err := m.currentSealProof(ctx) + if err != nil { + return types.EmptyInt, xerrors.Errorf("getting seal proof type: %w", err) + } + + ssize, err := spt.SectorSize() + if err != nil { + return types.EmptyInt, xerrors.Errorf("getting sector size: %w", err) + } + + ts, err := m.Api.ChainHead(ctx) + if err != nil { + return types.EmptyInt, xerrors.Errorf("getting chain head: %w", err) + } + + // get verified deal infos + var w, vw = big.Zero(), big.Zero() + + for _, piece := range sector.Pieces { + if !piece.HasDealInfo() { + // todo StateMinerInitialPledgeCollateral doesn't add cc/padding to non-verified weight, is that correct? + continue + } + + alloc, err := piece.GetAllocation(ctx, m.Api, ts.Key()) + if err != nil || alloc == nil { + w = big.Add(w, abi.NewStoragePower(int64(piece.Piece().Size))) + continue + } + + vw = big.Add(vw, abi.NewStoragePower(int64(piece.Piece().Size))) + } + + // load market actor + duration := expiration - ts.Height() + sectorWeight := builtin.QAPowerForWeight(ssize, duration, w, vw) + + return sectorWeight, nil +} diff --git a/storage/pipeline/precommit_batch.go b/storage/pipeline/precommit_batch.go index 3a86c8628e0..099988010db 100644 --- a/storage/pipeline/precommit_batch.go +++ b/storage/pipeline/precommit_batch.go @@ -36,6 +36,7 @@ type PreCommitBatcherApi interface { ChainHead(ctx context.Context) (*types.TipSet, error) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) // Address selector WalletBalance(context.Context, address.Address) (types.BigInt, error) @@ -428,11 +429,18 @@ func (b *PreCommitBatcher) Stop(ctx context.Context) error { func getDealStartCutoff(si SectorInfo) abi.ChainEpoch { cutoffEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback for _, p := range si.Pieces { - if p.DealInfo == nil { + if !p.HasDealInfo() { + continue + } + + startEpoch, err := p.StartEpoch() + if err != nil { + // almost definitely can't happen, but if it does there's less harm in + // just logging the error and moving on + log.Errorw("failed to get deal start epoch", "error", err) continue } - startEpoch := p.DealInfo.DealSchedule.StartEpoch if startEpoch < cutoffEpoch { cutoffEpoch = startEpoch } @@ -444,15 +452,19 @@ func getDealStartCutoff(si SectorInfo) abi.ChainEpoch { func (b *PreCommitBatcher) getAllocationCutoff(si SectorInfo) abi.ChainEpoch { cutoff := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback for _, p := range si.Pieces { - if p.DealInfo == nil { + if !p.HasDealInfo() { continue } - alloc, _ := b.api.StateGetAllocationForPendingDeal(b.mctx, p.DealInfo.DealID, types.EmptyTSK) + alloc, err := p.GetAllocation(b.mctx, b.api, types.EmptyTSK) + if err != nil { + log.Errorw("failed to get deal allocation", "error", err) + } // alloc is nil if this is not a verified deal in nv17 or later if alloc == nil { continue } + if alloc.Expiration < cutoff { cutoff = alloc.Expiration } diff --git a/storage/pipeline/precommit_batch_test.go b/storage/pipeline/precommit_batch_test.go deleted file mode 100644 index 1f3aaf24472..00000000000 --- a/storage/pipeline/precommit_batch_test.go +++ /dev/null @@ -1,291 +0,0 @@ -// stm: #unit -package sealing_test - -import ( - "bytes" - "context" - "sort" - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/network" - miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/config" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" - "github.com/filecoin-project/lotus/storage/pipeline/mocks" - "github.com/filecoin-project/lotus/storage/pipeline/sealiface" -) - -var fc = config.MinerFeeConfig{ - MaxPreCommitGasFee: types.FIL(types.FromFil(1)), - MaxCommitGasFee: types.FIL(types.FromFil(1)), - MaxTerminateGasFee: types.FIL(types.FromFil(1)), - MaxPreCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, - MaxCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, -} - -func TestPrecommitBatcher(t *testing.T) { - //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - ctx := context.Background() - - as := asel(func(ctx context.Context, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return t0123, big.Zero(), nil - }) - - maxBatch := miner6.PreCommitSectorBatchMaxSize - - cfg := func() (sealiface.Config, error) { - return sealiface.Config{ - MaxWaitDealsSectors: 2, - MaxSealingSectors: 0, - MaxSealingSectorsForDeals: 0, - WaitDealsDelay: time.Hour * 6, - AlwaysKeepUnsealedCopy: true, - - MaxPreCommitBatch: maxBatch, - PreCommitBatchWait: 24 * time.Hour, - PreCommitBatchSlack: 3 * time.Hour, - BatchPreCommitAboveBaseFee: big.NewInt(10000), - - AggregateCommits: true, - MinCommitBatch: miner6.MinAggregatedSectors, - MaxCommitBatch: miner6.MaxAggregatedSectors, - CommitBatchWait: 24 * time.Hour, - CommitBatchSlack: 1 * time.Hour, - - TerminateBatchMin: 1, - TerminateBatchMax: 100, - TerminateBatchWait: 5 * time.Minute, - }, nil - } - - type promise func(t *testing.T) - type action func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise - - actions := func(as ...action) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - var ps []promise - for _, a := range as { - p := a(t, s, pcb) - if p != nil { - ps = append(ps, p) - } - } - - if len(ps) > 0 { - return func(t *testing.T) { - for _, p := range ps { - p(t) - } - } - } - return nil - } - } - - addSector := func(sn abi.SectorNumber, aboveBalancer bool) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - var pcres sealiface.PreCommitBatchRes - var pcerr error - done := sync.Mutex{} - done.Lock() - - si := pipeline.SectorInfo{ - SectorNumber: sn, - } - - basefee := big.NewInt(9999) - if aboveBalancer { - basefee = big.NewInt(10001) - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil).MaxTimes(2) // once in AddPreCommit - - go func() { - defer done.Unlock() - pcres, pcerr = pcb.AddPreCommit(ctx, si, big.Zero(), &minertypes.SectorPreCommitInfo{ - SectorNumber: si.SectorNumber, - SealedCID: fakePieceCid(t), - DealIDs: nil, - Expiration: 0, - }) - }() - - return func(t *testing.T) { - done.Lock() - require.NoError(t, pcerr) - require.Empty(t, pcres.Error) - require.Contains(t, pcres.Sectors, si.SectorNumber) - } - } - } - - addSectors := func(sectors []abi.SectorNumber, aboveBalancer bool) action { - as := make([]action, len(sectors)) - for i, sector := range sectors { - as[i] = addSector(sector, aboveBalancer) - } - return actions(as...) - } - - waitPending := func(n int) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - require.Eventually(t, func() bool { - p, err := pcb.Pending(ctx) - require.NoError(t, err) - return len(p) == n - }, time.Second*5, 10*time.Millisecond) - - return nil - } - } - - //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 - expectSend := func(expect []abi.SectorNumber, gasOverLimit bool) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil) - if gasOverLimit { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, &api.ErrOutOfGas{}) - } else { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.Message{GasLimit: 100000}, nil) - } - - if !gasOverLimit { - s.EXPECT().MpoolPushMessage(gomock.Any(), funMatcher(func(i interface{}) bool { - b := i.(*types.Message) - var params miner6.PreCommitSectorBatchParams - require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params))) - for s, number := range expect { - require.Equal(t, number, params.Sectors[s].SectorNumber) - } - return true - }), gomock.Any()).Return(dummySmsg, nil) - } - return nil - } - } - - expectInitialCalls := func() action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, big.NewInt(10001), 1), nil) - return nil - } - } - - flush := func(expect []abi.SectorNumber) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - _ = expectInitialCalls()(t, s, pcb) - _ = expectSend(expect, false)(t, s, pcb) - - r, err := pcb.Flush(ctx) - require.NoError(t, err) - require.Len(t, r, 1) - require.Empty(t, r[0].Error) - sort.Slice(r[0].Sectors, func(i, j int) bool { - return r[0].Sectors[i] < r[0].Sectors[j] - }) - require.Equal(t, expect, r[0].Sectors) - - return nil - } - } - - getSectors := func(n int) []abi.SectorNumber { - out := make([]abi.SectorNumber, n) - for i := range out { - out[i] = abi.SectorNumber(i) - } - return out - } - - tcs := map[string]struct { - actions []action - }{ - "addSingle": { - actions: []action{ - addSector(0, true), - waitPending(1), - flush([]abi.SectorNumber{0}), - }, - }, - "addMax": { - actions: []action{ - expectInitialCalls(), - expectSend(getSectors(maxBatch), false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addMax-gasAboveLimit": { - actions: []action{ - expectInitialCalls(), - expectSend(getSectors(maxBatch), true), - expectSend(getSectors(maxBatch)[:maxBatch/2], false), - expectSend(getSectors(maxBatch)[maxBatch/2:], false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addOne-belowBaseFee": { - actions: []action{ - expectSend(getSectors(1), false), - addSectors(getSectors(1), false), - }, - }, - } - - for name, tc := range tcs { - tc := tc - - t.Run(name, func(t *testing.T) { - // create go mock controller here - mockCtrl := gomock.NewController(t) - // when test is done, assert expectations on all mock objects. - defer mockCtrl.Finish() - - // create them mocks - pcapi := mocks.NewMockPreCommitBatcherApi(mockCtrl) - pcapi.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version20, nil).AnyTimes() - - pcb := pipeline.NewPreCommitBatcher(ctx, t0123, pcapi, as, fc, cfg) - - var promises []promise - - for _, a := range tc.actions { - p := a(t, pcapi, pcb) - if p != nil { - promises = append(promises, p) - } - } - - for _, p := range promises { - p(t) - } - - err := pcb.Stop(ctx) - require.NoError(t, err) - }) - } -} - -type funMatcher func(interface{}) bool - -func (funMatcher) Matches(interface{}) bool { - return true -} - -func (funMatcher) String() string { - return "fun" -} diff --git a/storage/pipeline/precommit_policy.go b/storage/pipeline/precommit_policy.go index 6e234f93094..6df44d40704 100644 --- a/storage/pipeline/precommit_policy.go +++ b/storage/pipeline/precommit_policy.go @@ -9,7 +9,6 @@ import ( "github.com/filecoin-project/go-state-types/builtin/v8/miner" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" @@ -17,7 +16,7 @@ import ( ) type PreCommitPolicy interface { - Expiration(ctx context.Context, ps ...api.SectorPiece) (abi.ChainEpoch, error) + Expiration(ctx context.Context, ps ...SafeSectorPiece) (abi.ChainEpoch, error) } type Chain interface { @@ -60,7 +59,7 @@ func NewBasicPreCommitPolicy(api Chain, cfgGetter dtypes.GetSealingConfigFunc, p // Expiration produces the pre-commit sector expiration epoch for an encoded // replica containing the provided enumeration of pieces and deals. -func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...api.SectorPiece) (abi.ChainEpoch, error) { +func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...SafeSectorPiece) (abi.ChainEpoch, error) { ts, err := p.api.ChainHead(ctx) if err != nil { return 0, err @@ -69,17 +68,22 @@ func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...api.SectorP var end *abi.ChainEpoch for _, p := range ps { - if p.DealInfo == nil { + if !p.HasDealInfo() { continue } - if p.DealInfo.DealSchedule.EndEpoch < ts.Height() { + endEpoch, err := p.EndEpoch() + if err != nil { + return 0, xerrors.Errorf("failed to get end epoch: %w", err) + } + + if endEpoch < ts.Height() { log.Warnf("piece schedule %+v ended before current epoch %d", p, ts.Height()) continue } - if end == nil || *end < p.DealInfo.DealSchedule.EndEpoch { - tmp := p.DealInfo.DealSchedule.EndEpoch + if end == nil || *end < endEpoch { + tmp := endEpoch end = &tmp } } diff --git a/storage/pipeline/precommit_policy_test.go b/storage/pipeline/precommit_policy_test.go index 7865560dec1..ec2a61ff2d4 100644 --- a/storage/pipeline/precommit_policy_test.go +++ b/storage/pipeline/precommit_policy_test.go @@ -11,6 +11,8 @@ import ( commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" @@ -20,6 +22,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" pipeline "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" ) @@ -47,6 +50,39 @@ func (f *fakeChain) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey return build.TestNetworkVersion, nil } +func makeBFTs(t *testing.T, basefee abi.TokenAmount, h abi.ChainEpoch) *types.TipSet { + dummyCid, _ := cid.Parse("bafkqaaa") + + var ts, err = types.NewTipSet([]*types.BlockHeader{ + { + Height: h, + Miner: builtin.SystemActorAddr, + + Parents: []cid.Cid{}, + + Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, + + ParentStateRoot: dummyCid, + Messages: dummyCid, + ParentMessageReceipts: dummyCid, + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + + ParentBaseFee: basefee, + }, + }) + if t != nil { + require.NoError(t, err) + } + + return ts +} + +func makeTs(t *testing.T, h abi.ChainEpoch) *types.TipSet { + return makeBFTs(t, big.NewInt(0), h) +} + func (f *fakeChain) ChainHead(ctx context.Context) (*types.TipSet, error) { return makeTs(nil, f.h), nil } @@ -58,6 +94,10 @@ func fakePieceCid(t *testing.T) cid.Cid { return fakePieceCid } +func cidPtr(c cid.Cid) *cid.Cid { + return &c +} + func TestBasicPolicyEmptySector(t *testing.T) { cfg := fakeConfigGetter(nil) h := abi.ChainEpoch(55) @@ -97,33 +137,35 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) { h: abi.ChainEpoch(55), }, cfg, 2) longestDealEpochEnd := abi.ChainEpoch(547300) - pieces := []api.SectorPiece{ - { + pieces := []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(42), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(42), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(70), EndEpoch: abi.ChainEpoch(547275), }, }, - }, - { + }), + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(43), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(43), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(80), EndEpoch: longestDealEpochEnd, }, }, - }, + }), } exp, err := policy.Expiration(context.Background(), pieces...) @@ -138,20 +180,21 @@ func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) { h: abi.ChainEpoch(55), }, cfg, 0) - pieces := []api.SectorPiece{ - { + pieces := []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(44), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(44), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(10), }, }, - }, + }), } exp, err := pcp.Expiration(context.Background(), pieces...) @@ -170,27 +213,28 @@ func TestMissingDealIsIgnored(t *testing.T) { h: abi.ChainEpoch(55), }, cfg, 0) - pieces := []api.SectorPiece{ - { + pieces := []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(44), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(44), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(547300), }, }, - }, - { + }), + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, DealInfo: nil, - }, + }), } exp, err := policy.Expiration(context.Background(), pieces...) diff --git a/storage/pipeline/receive.go b/storage/pipeline/receive.go index 8427eba54f5..231afbc39c2 100644 --- a/storage/pipeline/receive.go +++ b/storage/pipeline/receive.go @@ -86,6 +86,11 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta return SectorInfo{}, xerrors.Errorf("getting chain head: %w", err) } + nv, err := m.Api.StateNetworkVersion(ctx, ts.Key()) + if err != nil { + return SectorInfo{}, xerrors.Errorf("getting network version: %w", err) + } + var info SectorInfo var validatePoRep bool @@ -217,9 +222,24 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta info.State = ReceiveSector info.SectorNumber = meta.Sector.Number - info.Pieces = meta.Pieces + info.Pieces = make([]SafeSectorPiece, len(meta.Pieces)) info.SectorType = meta.Type + for i, piece := range meta.Pieces { + info.Pieces[i] = SafeSectorPiece{ + real: piece, + } + + if !info.Pieces[i].HasDealInfo() { + continue // cc + } + + err := info.Pieces[i].DealInfo().Valid(nv) + if err != nil { + return SectorInfo{}, xerrors.Errorf("piece %d deal info invalid: %w", i, err) + } + } + if meta.RemoteSealingDoneEndpoint != "" { // validate the url if _, err := url.Parse(meta.RemoteSealingDoneEndpoint); err != nil { @@ -229,7 +249,7 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta info.RemoteSealingDoneEndpoint = meta.RemoteSealingDoneEndpoint } - if err := checkPieces(ctx, m.maddr, meta.Sector.Number, meta.Pieces, m.Api, false); err != nil { + if err := checkPieces(ctx, m.maddr, meta.Sector.Number, info.Pieces, m.Api, false); err != nil { return SectorInfo{}, xerrors.Errorf("checking pieces: %w", err) } diff --git a/storage/pipeline/sealiface/config.go b/storage/pipeline/sealiface/config.go index e41b143ec20..2ac6e0d588f 100644 --- a/storage/pipeline/sealiface/config.go +++ b/storage/pipeline/sealiface/config.go @@ -62,4 +62,9 @@ type Config struct { TerminateBatchWait time.Duration UseSyntheticPoRep bool + + RequireActivationSuccess bool + RequireActivationSuccessUpdate bool + RequireNotificationSuccess bool + RequireNotificationSuccessUpdate bool } diff --git a/storage/pipeline/sealing.go b/storage/pipeline/sealing.go index 936bd8b39e1..75791fae8c0 100644 --- a/storage/pipeline/sealing.go +++ b/storage/pipeline/sealing.go @@ -5,6 +5,7 @@ import ( "sync" "time" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" @@ -25,12 +26,15 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" + "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/storage/ctladdr" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/storiface" @@ -49,7 +53,6 @@ type SealingAPI interface { StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) - StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorLocation, error) StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) @@ -72,6 +75,13 @@ type SealingAPI interface { ChainReadObj(context.Context, cid.Cid) ([]byte, error) StateMinerAllocated(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) + ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) + ChainPutObj(ctx context.Context, block blocks.Block) error // Address selector WalletBalance(context.Context, address.Address) (types.BigInt, error) @@ -110,8 +120,8 @@ type Sealing struct { inputLk sync.Mutex openSectors map[abi.SectorID]*openSector sectorTimers map[abi.SectorID]*time.Timer - pendingPieces map[cid.Cid]*pendingPiece - assignedPieces map[abi.SectorID][]cid.Cid + pendingPieces map[piece.PieceKey]*pendingPiece + assignedPieces map[abi.SectorID][]piece.PieceKey nextDealSector *abi.SectorNumber // used to prevent a race where we could create a new sector more than once available map[abi.SectorID]struct{} @@ -139,16 +149,16 @@ type openSector struct { number abi.SectorNumber ccUpdate bool - maybeAccept func(cid.Cid) error // called with inputLk + maybeAccept func(key piece.PieceKey) error // called with inputLk } func (o *openSector) checkDealAssignable(piece *pendingPiece, expF expFn) (bool, error) { log := log.With( "sector", o.number, - "deal", piece.deal.DealID, - "dealEnd", piece.deal.DealProposal.EndEpoch, - "dealStart", piece.deal.DealProposal.StartEpoch, + "piece", piece.deal.String(), + "dealEnd", result.Wrap(piece.deal.EndEpoch()), + "dealStart", result.Wrap(piece.deal.StartEpoch()), "dealClaimEnd", piece.claimTerms.claimTermEnd, "lastAssignedDealEnd", o.lastDealEnd, @@ -181,7 +191,12 @@ func (o *openSector) checkDealAssignable(piece *pendingPiece, expF expFn) (bool, return false, nil } - if sectorExpiration < piece.deal.DealProposal.EndEpoch { + endEpoch, err := piece.deal.EndEpoch() + if err != nil { + return false, xerrors.Errorf("failed to get end epoch: %w", err) + } + + if sectorExpiration < endEpoch { log.Debugw("deal not assignable to sector", "reason", "sector expiration less than deal expiration") return false, nil } @@ -205,7 +220,7 @@ type pendingPiece struct { resp *pieceAcceptResp size abi.UnpaddedPieceSize - deal api.PieceDealInfo + deal UniversalPieceInfo claimTerms pieceClaimBounds @@ -215,10 +230,10 @@ type pendingPiece struct { accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error) } -func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sealer.SectorManager, verif storiface.Verifier, prov storiface.Prover, pcp PreCommitPolicy, gc dtypes.GetSealingConfigFunc, journal journal.Journal, addrSel AddressSelector) *Sealing { +func New(mctx context.Context, sapi SealingAPI, fc config.MinerFeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sealer.SectorManager, verif storiface.Verifier, prov storiface.Prover, pcp PreCommitPolicy, gc dtypes.GetSealingConfigFunc, journal journal.Journal, addrSel AddressSelector) *Sealing { s := &Sealing{ - Api: api, - DealInfo: &CurrentDealInfoManager{api}, + Api: sapi, + DealInfo: &CurrentDealInfoManager{sapi}, ds: ds, @@ -232,8 +247,8 @@ func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events openSectors: map[abi.SectorID]*openSector{}, sectorTimers: map[abi.SectorID]*time.Timer{}, - pendingPieces: map[cid.Cid]*pendingPiece{}, - assignedPieces: map[abi.SectorID][]cid.Cid{}, + pendingPieces: map[piece.PieceKey]*pendingPiece{}, + assignedPieces: map[abi.SectorID][]piece.PieceKey{}, available: map[abi.SectorID]struct{}{}, @@ -242,9 +257,9 @@ func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events addrSel: addrSel, - terminator: NewTerminationBatcher(mctx, maddr, api, addrSel, fc, gc), - precommiter: NewPreCommitBatcher(mctx, maddr, api, addrSel, fc, gc), - commiter: NewCommitBatcher(mctx, maddr, api, addrSel, fc, gc, prov), + terminator: NewTerminationBatcher(mctx, maddr, sapi, addrSel, fc, gc), + precommiter: NewPreCommitBatcher(mctx, maddr, sapi, addrSel, fc, gc), + commiter: NewCommitBatcher(mctx, maddr, sapi, addrSel, fc, gc, prov), getConfig: gc, diff --git a/storage/pipeline/states_failed.go b/storage/pipeline/states_failed.go index 3323c4c9bc4..3e4ea4dde1f 100644 --- a/storage/pipeline/states_failed.go +++ b/storage/pipeline/states_failed.go @@ -235,7 +235,7 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect return nil } - if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, ts.Key(), m.Api); err != nil { + if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, m.Api); err != nil { switch err.(type) { case *ErrApi: log.Errorf("handleSubmitReplicaUpdateFailed: api error, not proceeding: %+v", err) @@ -265,7 +265,7 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect } if !active { err := xerrors.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber) - log.Errorf(err.Error()) + log.Errorf("%s", err) return ctx.Send(SectorAbortUpgrade{err}) } @@ -466,7 +466,7 @@ func (m *Sealing) handleAbortUpgrade(ctx statemachine.Context, sector SectorInfo // failWith is a mutator or global mutator func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, sector SectorInfo, failWith interface{}) error { - toFix, paddingPieces, err := recoveryPiecesToFix(ctx.Context(), m.Api, sector, m.maddr) + toFix, nonBuiltinMarketPieces, err := recoveryPiecesToFix(ctx.Context(), m.Api, sector, m.maddr) if err != nil { return err } @@ -478,33 +478,35 @@ func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, secto updates := map[int]abi.DealID{} for _, i := range toFix { + // note: all toFix pieces are builtin-market pieces + p := sector.Pieces[i] - if p.DealInfo.PublishCid == nil { + if p.Impl().PublishCid == nil { // TODO: check if we are in an early enough state try to remove this piece - log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID) + log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID) // Not much to do here (and this can only happen for old spacerace sectors) return ctx.Send(failWith) } var dp *market.DealProposal - if p.DealInfo.DealProposal != nil { - mdp := *p.DealInfo.DealProposal + if p.Impl().DealProposal != nil { + mdp := *p.Impl().DealProposal dp = &mdp } - res, err := m.DealInfo.GetCurrentDealInfo(ctx.Context(), ts.Key(), dp, *p.DealInfo.PublishCid) + res, err := m.DealInfo.GetCurrentDealInfo(ctx.Context(), ts.Key(), dp, *p.Impl().PublishCid) if err != nil { failed[i] = xerrors.Errorf("getting current deal info for piece %d: %w", i, err) continue } if res.MarketDeal == nil { - failed[i] = xerrors.Errorf("nil market deal (%d,%d,%d,%s)", i, sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID) + failed[i] = xerrors.Errorf("nil market deal (%d,%d,%d,%s)", i, sector.SectorNumber, p.Impl().DealID, p.Impl().DealProposal.PieceCID) continue } - if res.MarketDeal.Proposal.PieceCID != p.Piece.PieceCID { - failed[i] = xerrors.Errorf("recovered piece (%d) deal in sector %d (dealid %d) has different PieceCID %s != %s", i, sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, res.MarketDeal.Proposal.PieceCID) + if res.MarketDeal.Proposal.PieceCID != p.PieceCID() { + failed[i] = xerrors.Errorf("recovered piece (%d) deal in sector %d (dealid %d) has different PieceCID %s != %s", i, sector.SectorNumber, p.Impl().DealID, p.Impl().DealProposal.PieceCID, res.MarketDeal.Proposal.PieceCID) continue } @@ -517,7 +519,7 @@ func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, secto merr = multierror.Append(merr, e) } - if len(failed)+paddingPieces == len(sector.Pieces) { + if len(failed)+nonBuiltinMarketPieces == len(sector.Pieces) { log.Errorf("removing sector %d: all deals expired or unrecoverable: %+v", sector.SectorNumber, merr) return ctx.Send(failWith) } @@ -542,6 +544,7 @@ func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{xerrors.New("failed recovering deal ids")}) } +// recoveryPiecesToFix returns the list of sector piece indexes to fix, and the number of non-builtin-market pieces func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) { ts, err := api.ChainHead(ctx) if err != nil { @@ -549,51 +552,68 @@ func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, } var toFix []int - paddingPieces := 0 + nonBuiltinMarketPieces := 0 for i, p := range sector.Pieces { - // if no deal is associated with the piece, ensure that we added it as - // filler (i.e. ensure that it has a zero PieceCID) - if p.DealInfo == nil { - exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded()) - if !p.Piece.PieceCID.Equals(exp) { - return nil, 0, xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID) - } - paddingPieces++ - continue - } + i, p := i, p + + err := p.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + // if no deal is associated with the piece, ensure that we added it as + // filler (i.e. ensure that it has a zero PieceCID) + exp := zerocomm.ZeroPieceCommitment(p.Piece().Size.Unpadded()) + if !info.PieceCID().Equals(exp) { + return xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece().PieceCID) + } + nonBuiltinMarketPieces++ + return nil + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + deal, err := api.StateMarketStorageDeal(ctx, p.DealInfo().Impl().DealID, ts.Key()) + if err != nil { + log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo().Impl().DealID, i, err) + toFix = append(toFix, i) + return nil + } - deal, err := api.StateMarketStorageDeal(ctx, p.DealInfo.DealID, ts.Key()) - if err != nil { - log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err) - toFix = append(toFix, i) - continue - } + if deal.Proposal.Provider != maddr { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, deal.Proposal.Provider, maddr) + toFix = append(toFix, i) + return nil + } - if deal.Proposal.Provider != maddr { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, deal.Proposal.Provider, maddr) - toFix = append(toFix, i) - continue - } + if deal.Proposal.PieceCID != p.Piece().PieceCID { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, p.Piece().PieceCID, deal.Proposal.PieceCID) + toFix = append(toFix, i) + return nil + } - if deal.Proposal.PieceCID != p.Piece.PieceCID { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, deal.Proposal.PieceCID) - toFix = append(toFix, i) - continue - } + if p.Piece().Size != deal.Proposal.PieceSize { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, p.Piece().Size, deal.Proposal.PieceSize) + toFix = append(toFix, i) + return nil + } - if p.Piece.Size != deal.Proposal.PieceSize { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, deal.Proposal.PieceSize) - toFix = append(toFix, i) - continue - } + if ts.Height() >= deal.Proposal.StartEpoch { + // TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces + // (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval) + return xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, deal.Proposal.StartEpoch, ts.Height()) + } + + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + // DDO pieces have no repair strategy - if ts.Height() >= deal.Proposal.StartEpoch { - // TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces - // (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval) - return nil, 0, xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, deal.Proposal.StartEpoch, ts.Height()) + nonBuiltinMarketPieces++ + return nil + }, + }) + + if err != nil { + return nil, 0, xerrors.Errorf("checking piece %d: %w", i, err) } } - return toFix, paddingPieces, nil + return toFix, nonBuiltinMarketPieces, nil } diff --git a/storage/pipeline/states_failed_test.go b/storage/pipeline/states_failed_test.go index f6846c8f5f1..bc658d59ba0 100644 --- a/storage/pipeline/states_failed_test.go +++ b/storage/pipeline/states_failed_test.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" pipeline "github.com/filecoin-project/lotus/storage/pipeline" "github.com/filecoin-project/lotus/storage/pipeline/mocks" + "github.com/filecoin-project/lotus/storage/pipeline/piece" ) func TestStateRecoverDealIDs(t *testing.T) { @@ -76,16 +77,16 @@ func TestStateRecoverDealIDs(t *testing.T) { // TODO sctx should satisfy an interface so it can be useable for mocking. This will fail because we are passing in an empty context now to get this to build. // https://github.com/filecoin-project/lotus/issues/7867 err := fakeSealing.HandleRecoverDealIDs(statemachine.Context{}, pipeline.SectorInfo{ - Pieces: []api2.SectorPiece{ - { - DealInfo: &api2.PieceDealInfo{ + Pieces: []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api2.SectorPiece{ + DealInfo: &piece.PieceDealInfo{ DealID: dealId, PublishCid: &pc, }, Piece: abi.PieceInfo{ PieceCID: idCid("oldPieceCID"), }, - }, + }), }, }) require.NoError(t, err) diff --git a/storage/pipeline/states_replica_update.go b/storage/pipeline/states_replica_update.go index 6717f49a6a6..380078e75c0 100644 --- a/storage/pipeline/states_replica_update.go +++ b/storage/pipeline/states_replica_update.go @@ -22,7 +22,7 @@ import ( func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { // if the sector ended up not having any deals, abort the upgrade - if !sector.hasDeals() { + if !sector.hasData() { return ctx.Send(SectorAbortUpgrade{xerrors.New("sector had no deals")}) } @@ -58,7 +58,7 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect } if !active { err := xerrors.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber) - log.Errorf(err.Error()) + log.Errorf("%s", err) return ctx.Send(SectorAbortUpgrade{err}) } @@ -82,14 +82,13 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect } func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { - ts, err := m.Api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) return nil } - if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, ts.Key(), m.Api); err != nil { + if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, m.Api); err != nil { return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } @@ -114,24 +113,8 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec log.Errorf("failed to get update proof type from seal proof: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - enc := new(bytes.Buffer) - params := &miner.ProveReplicaUpdatesParams{ - Updates: []miner.ReplicaUpdate{ - { - SectorID: sector.SectorNumber, - Deadline: sl.Deadline, - Partition: sl.Partition, - NewSealedSectorCID: *sector.UpdateSealed, - Deals: sector.dealIDs(), - UpdateProofType: updateProof, - ReplicaProof: sector.ReplicaUpdateProof, - }, - }, - } - if err := params.MarshalCBOR(enc); err != nil { - log.Errorf("failed to serialize update replica params: %w", err) - return ctx.Send(SectorSubmitReplicaUpdateFailed{}) - } + + // figure out from address and collateral cfg, err := m.getConfig() if err != nil { @@ -140,34 +123,24 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec onChainInfo, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, ts.Key()) if err != nil { - log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) - return nil + log.Errorf("failed to get sector info: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } if onChainInfo == nil { - return xerrors.Errorf("sector not found %d", sector.SectorNumber) + log.Errorw("on chain info was nil", "sector", sector.SectorNumber) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - sp, err := m.currentSealProof(ctx.Context()) + weightUpdate, err := m.sectorWeight(ctx.Context(), sector, onChainInfo.Expiration) if err != nil { - log.Errorf("sealer failed to return current seal proof not proceeding: %+v", err) - return nil - } - virtualPCI := miner.SectorPreCommitInfo{ - SealProof: sp, - SectorNumber: sector.SectorNumber, - SealedCID: *sector.UpdateSealed, - //SealRandEpoch: 0, - DealIDs: sector.dealIDs(), - Expiration: onChainInfo.Expiration, - //ReplaceCapacity: false, - //ReplaceSectorDeadline: 0, - //ReplaceSectorPartition: 0, - //ReplaceSectorNumber: 0, + log.Errorf("failed to get sector weight: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - collateral, err := m.Api.StateMinerInitialPledgeCollateral(ctx.Context(), m.maddr, virtualPCI, ts.Key()) + collateral, err := m.pledgeForPower(ctx.Context(), weightUpdate) if err != nil { - return xerrors.Errorf("getting initial pledge collateral: %w", err) + log.Errorf("failed to get pledge for power: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } collateral = big.Sub(collateral, onChainInfo.InitialPledge) @@ -194,13 +167,81 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec log.Errorf("no good address to send replica update message from: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveReplicaUpdates, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) + + // figure out message type + + pams, deals, err := m.processPieces(ctx.Context(), sector) + if err != nil { + log.Errorf("failed to process pieces: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + if len(pams) > 0 { + // PRU3 + + params := &miner.ProveReplicaUpdates3Params{ + SectorUpdates: []miner.SectorUpdateManifest{ + { + Sector: sector.SectorNumber, + Deadline: sl.Deadline, + Partition: sl.Partition, + NewSealedCID: *sector.UpdateSealed, + Pieces: pams, + }, + }, + SectorProofs: [][]byte{sector.ReplicaUpdateProof}, + UpdateProofsType: updateProof, + //AggregateProof + //AggregateProofType + RequireActivationSuccess: cfg.RequireActivationSuccessUpdate, + RequireNotificationSuccess: cfg.RequireNotificationSuccessUpdate, + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + log.Errorf("failed to serialize update replica params: %w", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveReplicaUpdates3, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid}) + } + + // PRU2 + params := &miner.ProveReplicaUpdatesParams2{ + Updates: []miner.ReplicaUpdate2{ + { + SectorID: sector.SectorNumber, + Deadline: sl.Deadline, + Partition: sl.Partition, + NewSealedSectorCID: *sector.UpdateSealed, + NewUnsealedSectorCID: *sector.UpdateUnsealed, + UpdateProofType: updateProof, + ReplicaProof: sector.ReplicaUpdateProof, + Deals: deals, + }, + }, + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + log.Errorf("failed to serialize update replica params: %w", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveReplicaUpdates2, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) if err != nil { log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid}) + } func (m *Sealing) handleWaitMutable(ctx statemachine.Context, sector SectorInfo) error { diff --git a/storage/pipeline/states_sealing.go b/storage/pipeline/states_sealing.go index 5c91161efb1..aef394789d1 100644 --- a/storage/pipeline/states_sealing.go +++ b/storage/pipeline/states_sealing.go @@ -12,11 +12,16 @@ import ( "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" + miner2 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" + "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/network" @@ -25,6 +30,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" @@ -32,6 +38,8 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/storiface" ) +const MinDDONetworkVersion = network.Version22 + var DealSectorPriority = 1024 var MaxTicketAge = policy.MaxPreCommitRandomnessLookback @@ -59,7 +67,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err m.cleanupAssignedDeals(sector) // if this is a snapdeals sector, but it ended up not having any deals, abort the upgrade - if sector.State == SnapDealsPacking && !sector.hasDeals() { + if sector.State == SnapDealsPacking && !sector.hasData() { return ctx.Send(SectorAbortUpgrade{xerrors.New("sector had no deals")}) } @@ -67,7 +75,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err var allocated abi.UnpaddedPieceSize for _, piece := range sector.Pieces { - allocated += piece.Piece.Size.Unpadded() + allocated += piece.Piece().Size.Unpadded() } ssize, err := sector.SectorType.SectorSize() @@ -417,11 +425,47 @@ func (m *Sealing) preCommitInfo(ctx statemachine.Context, sector SectorInfo) (*m SealedCID: *sector.CommR, SealRandEpoch: sector.TicketEpoch, - DealIDs: sector.dealIDs(), } - if sector.hasDeals() { + if sector.hasData() { + // only CC sectors don't have UnsealedCID params.UnsealedCid = sector.CommD + + // true when the sector has non-builtin-marked data + sectorIsDDO := false + + for _, piece := range sector.Pieces { + err := piece.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + return nil // ignore + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + if sectorIsDDO { + return nil // will be passed later in the Commit message + } + params.DealIDs = append(params.DealIDs, info.Impl().DealID) + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + if nv < MinDDONetworkVersion { + return xerrors.Errorf("DDO sectors are not supported on network version %d", nv) + } + + log.Infow("DDO piece in sector", "sector", sector.SectorNumber, "piece", info.String()) + + sectorIsDDO = true + + // DDO sectors don't carry DealIDs, we will pass those + // deals in the Commit message later + params.DealIDs = nil + return nil + }, + }) + + if err != nil { + return nil, big.Zero(), types.EmptyTSK, xerrors.Errorf("handleDealInfo: %w", err) + } + } } collateral, err := m.Api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, ts.Key()) @@ -572,10 +616,6 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) return xerrors.Errorf("getting config: %w", err) } - log.Info("scheduling seal proof computation...") - - log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%s; d:%s", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD) - if sector.CommD == nil || sector.CommR == nil { return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) } @@ -700,6 +740,8 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) } func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo) error { + // TODO: Deprecate this path, always go through batcher, just respect the AggregateCommits config in there + cfg, err := m.getConfig() if err != nil { return xerrors.Errorf("getting config: %w", err) @@ -783,11 +825,113 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo }) } +// processPieces returns either: +// - a list of piece activation manifests +// - a list of deal IDs, if all non-filler pieces are deal-id pieces +func (m *Sealing) processPieces(ctx context.Context, sector SectorInfo) ([]miner.PieceActivationManifest, []abi.DealID, error) { + pams := make([]miner.PieceActivationManifest, 0, len(sector.Pieces)) + dealIDs := make([]abi.DealID, 0, len(sector.Pieces)) + var hasDDO bool + + for _, piece := range sector.Pieces { + piece := piece + + // first figure out if this is a ddo sector + err := piece.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + // Fillers are implicit (todo review: Are they??) + return nil + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + hasDDO = true + return nil + }, + }) + if err != nil { + return nil, nil, xerrors.Errorf("handleDealInfo: %w", err) + } + } + for _, piece := range sector.Pieces { + piece := piece + + err := piece.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + // Fillers are implicit (todo review: Are they??) + return nil + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + if hasDDO { + alloc, err := m.Api.StateGetAllocationIdForPendingDeal(ctx, info.Impl().DealID, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting allocation for deal %d: %w", info.Impl().DealID, err) + } + clid, err := m.Api.StateLookupID(ctx, info.Impl().DealProposal.Client, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting client address for deal %d: %w", info.Impl().DealID, err) + } + + clientId, err := address.IDFromAddress(clid) + if err != nil { + return xerrors.Errorf("getting client address for deal %d: %w", info.Impl().DealID, err) + } + + var vac *miner2.VerifiedAllocationKey + if alloc != verifreg.NoAllocationID { + vac = &miner2.VerifiedAllocationKey{ + Client: abi.ActorID(clientId), + ID: verifreg13.AllocationId(alloc), + } + } + + payload, err := cborutil.Dump(info.Impl().DealID) + if err != nil { + return xerrors.Errorf("serializing deal id: %w", err) + } + + pams = append(pams, miner.PieceActivationManifest{ + CID: piece.Piece().PieceCID, + Size: piece.Piece().Size, + VerifiedAllocationKey: vac, + Notify: []miner2.DataActivationNotification{ + { + Address: market.Address, + Payload: payload, + }, + }, + }) + + return nil + } + + dealIDs = append(dealIDs, info.Impl().DealID) + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + pams = append(pams, *piece.Impl().PieceActivationManifest) + return nil + }, + }) + if err != nil { + return nil, nil, xerrors.Errorf("handleDealInfo: %w", err) + } + } + + return pams, dealIDs, nil +} + func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector SectorInfo) error { if sector.CommD == nil || sector.CommR == nil { return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) } + pams, dealIDs, err := m.processPieces(ctx.Context(), sector) + if err != nil { + return err + } + res, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{ Info: proof.AggregateSealVerifyInfo{ Number: sector.SectorNumber, @@ -796,8 +940,14 @@ func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector S SealedCID: *sector.CommR, UnsealedCID: *sector.CommD, }, - Proof: sector.Proof, // todo: this correct?? + Proof: sector.Proof, Spt: sector.SectorType, + + ActivationManifest: miner2.SectorActivationManifest{ + SectorNumber: sector.SectorNumber, + Pieces: pams, + }, + DealIDPrecommit: len(dealIDs) > 0, }) if err != nil || res.Error != "" { @@ -875,7 +1025,7 @@ func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorIn return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)}) } - if cfg.MakeCCSectorsAvailable && !sector.hasDeals() { + if cfg.MakeCCSectorsAvailable && !sector.hasData() { return ctx.Send(SectorFinalizedAvailable{}) } return ctx.Send(SectorFinalized{}) diff --git a/storage/pipeline/types.go b/storage/pipeline/types.go index e752eb2b93b..48ae60546be 100644 --- a/storage/pipeline/types.go +++ b/storage/pipeline/types.go @@ -2,14 +2,20 @@ package sealing import ( "context" + "encoding/json" + "io" "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -41,6 +47,20 @@ const ( RetCommitFailed = ReturnState(CommitFailed) ) +type UniversalPieceInfo interface { + Impl() piece.PieceDealInfo + String() string + Key() piece.PieceKey + + Valid(nv network.Version) error + StartEpoch() (abi.ChainEpoch, error) + EndEpoch() (abi.ChainEpoch, error) + PieceCID() cid.Cid + KeepUnsealedRequested() bool + + GetAllocation(ctx context.Context, aapi piece.AllocationAPI, tsk types.TipSetKey) (*verifreg.Allocation, error) +} + type SectorInfo struct { State SectorState SectorNumber abi.SectorNumber @@ -49,7 +69,7 @@ type SectorInfo struct { // Packing CreationTime int64 // unix seconds - Pieces []api.SectorPiece + Pieces []SafeSectorPiece // PreCommit1 TicketValue abi.SealRandomness @@ -79,7 +99,7 @@ type SectorInfo struct { // CCUpdate CCUpdate bool - CCPieces []api.SectorPiece + CCPieces []SafeSectorPiece UpdateSealed *cid.Cid UpdateUnsealed *cid.Cid ReplicaUpdateProof storiface.ReplicaUpdateProof @@ -113,18 +133,19 @@ type SectorInfo struct { func (t *SectorInfo) pieceInfos() []abi.PieceInfo { out := make([]abi.PieceInfo, len(t.Pieces)) for i, p := range t.Pieces { - out[i] = p.Piece + out[i] = p.Piece() } return out } -func (t *SectorInfo) dealIDs() []abi.DealID { - out := make([]abi.DealID, 0, len(t.Pieces)) - for _, p := range t.Pieces { - if p.DealInfo == nil { +func (t *SectorInfo) nonPaddingPieceInfos() []abi.PieceInfo { + out := make([]abi.PieceInfo, len(t.Pieces)) + for i, p := range t.Pieces { + if !p.HasDealInfo() { continue } - out = append(out, p.DealInfo.DealID) + + out[i] = p.Piece() } return out } @@ -132,14 +153,14 @@ func (t *SectorInfo) dealIDs() []abi.DealID { func (t *SectorInfo) existingPieceSizes() []abi.UnpaddedPieceSize { out := make([]abi.UnpaddedPieceSize, len(t.Pieces)) for i, p := range t.Pieces { - out[i] = p.Piece.Size.Unpadded() + out[i] = p.Piece().Size.Unpadded() } return out } -func (t *SectorInfo) hasDeals() bool { +func (t *SectorInfo) hasData() bool { for _, piece := range t.Pieces { - if piece.DealInfo != nil { + if piece.HasDealInfo() { return true } } @@ -151,7 +172,7 @@ func (t *SectorInfo) sealingCtx(ctx context.Context) context.Context { // TODO: can also take start epoch into account to give priority to sectors // we need sealed sooner - if t.hasDeals() { + if t.hasData() { return sealer.WithPriority(ctx, DealSectorPriority) } @@ -160,19 +181,19 @@ func (t *SectorInfo) sealingCtx(ctx context.Context) context.Context { // Returns list of offset/length tuples of sector data ranges which clients // requested to keep unsealed -func (t *SectorInfo) keepUnsealedRanges(pieces []api.SectorPiece, invert, alwaysKeep bool) []storiface.Range { +func (t *SectorInfo) keepUnsealedRanges(pieces []SafeSectorPiece, invert, alwaysKeep bool) []storiface.Range { var out []storiface.Range var at abi.UnpaddedPieceSize for _, piece := range pieces { - psize := piece.Piece.Size.Unpadded() + psize := piece.Piece().Size.Unpadded() at += psize - if piece.DealInfo == nil { + if !piece.HasDealInfo() { continue } - keep := piece.DealInfo.KeepUnsealed || alwaysKeep + keep := piece.DealInfo().KeepUnsealedRequested() || alwaysKeep if keep == invert { continue @@ -195,3 +216,110 @@ type SealingStateEvt struct { After SectorState Error string } + +// SafeSectorPiece is a wrapper around SectorPiece which makes it hard to misuse +// especially by making it hard to access raw Deal / DDO info +type SafeSectorPiece struct { + real api.SectorPiece +} + +func SafePiece(piece api.SectorPiece) SafeSectorPiece { + return SafeSectorPiece{piece} +} + +var _ UniversalPieceInfo = &SafeSectorPiece{} + +func (sp *SafeSectorPiece) Piece() abi.PieceInfo { + return sp.real.Piece +} + +func (sp *SafeSectorPiece) HasDealInfo() bool { + return sp.real.DealInfo != nil +} + +func (sp *SafeSectorPiece) DealInfo() UniversalPieceInfo { + return sp.real.DealInfo +} + +// cbor passthrough +func (sp *SafeSectorPiece) UnmarshalCBOR(r io.Reader) (err error) { + return sp.real.UnmarshalCBOR(r) +} + +func (sp *SafeSectorPiece) MarshalCBOR(w io.Writer) error { + return sp.real.MarshalCBOR(w) +} + +// json passthrough +func (sp *SafeSectorPiece) UnmarshalJSON(b []byte) error { + return json.Unmarshal(b, &sp.real) +} + +func (sp *SafeSectorPiece) MarshalJSON() ([]byte, error) { + return json.Marshal(sp.real) +} + +type handleDealInfoParams struct { + FillerHandler func(UniversalPieceInfo) error + BuiltinMarketHandler func(UniversalPieceInfo) error + DDOHandler func(UniversalPieceInfo) error +} + +func (sp *SafeSectorPiece) handleDealInfo(params handleDealInfoParams) error { + if !sp.HasDealInfo() { + if params.FillerHandler == nil { + return xerrors.Errorf("FillerHandler is not provided") + } + return params.FillerHandler(sp) + } + + if sp.real.DealInfo.PublishCid != nil { + if params.BuiltinMarketHandler == nil { + return xerrors.Errorf("BuiltinMarketHandler is not provided") + } + return params.BuiltinMarketHandler(sp) + } + + if params.DDOHandler == nil { + return xerrors.Errorf("DDOHandler is not provided") + } + return params.DDOHandler(sp) +} + +// SectorPiece Proxy + +func (sp *SafeSectorPiece) Impl() piece.PieceDealInfo { + return sp.real.DealInfo.Impl() +} + +func (sp *SafeSectorPiece) String() string { + return sp.real.DealInfo.String() +} + +func (sp *SafeSectorPiece) Key() piece.PieceKey { + return sp.real.DealInfo.Key() +} + +func (sp *SafeSectorPiece) Valid(nv network.Version) error { + return sp.real.DealInfo.Valid(nv) +} + +func (sp *SafeSectorPiece) StartEpoch() (abi.ChainEpoch, error) { + return sp.real.DealInfo.StartEpoch() +} + +func (sp *SafeSectorPiece) EndEpoch() (abi.ChainEpoch, error) { + return sp.real.DealInfo.EndEpoch() +} + +func (sp *SafeSectorPiece) PieceCID() cid.Cid { + return sp.real.DealInfo.PieceCID() +} + +func (sp *SafeSectorPiece) KeepUnsealedRequested() bool { + return sp.real.DealInfo.KeepUnsealedRequested() +} + +func (sp *SafeSectorPiece) GetAllocation(ctx context.Context, aapi piece.AllocationAPI, tsk types.TipSetKey) (*verifreg.Allocation, error) { + return sp.real.DealInfo.GetAllocation(ctx, aapi, tsk) +} diff --git a/storage/pipeline/types_test.go b/storage/pipeline/types_test.go index b8fbb113af1..d92b68d5597 100644 --- a/storage/pipeline/types_test.go +++ b/storage/pipeline/types_test.go @@ -13,6 +13,7 @@ import ( tutils "github.com/filecoin-project/specs-actors/v2/support/testing" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/storage/pipeline/piece" ) func TestSectorInfoSerialization(t *testing.T) { @@ -23,9 +24,9 @@ func TestSectorInfoSerialization(t *testing.T) { t.Fatal(err) } - dealInfo := api.PieceDealInfo{ + dealInfo := piece.PieceDealInfo{ DealID: d, - DealSchedule: api.DealSchedule{ + DealSchedule: piece.DealSchedule{ StartEpoch: 0, EndEpoch: 100, }, @@ -43,13 +44,13 @@ func TestSectorInfoSerialization(t *testing.T) { si := &SectorInfo{ State: "stateful", SectorNumber: 234, - Pieces: []api.SectorPiece{{ + Pieces: []SafeSectorPiece{{real: api.SectorPiece{ Piece: abi.PieceInfo{ Size: 5, PieceCID: dummyCid, }, DealInfo: &dealInfo, - }}, + }}}, CommD: &dummyCid, CommR: nil, Proof: nil, @@ -77,8 +78,8 @@ func TestSectorInfoSerialization(t *testing.T) { assert.Equal(t, si.State, si2.State) assert.Equal(t, si.SectorNumber, si2.SectorNumber) - assert.Equal(t, si.Pieces[0].DealInfo.DealID, si2.Pieces[0].DealInfo.DealID) - assert.Equal(t, si.Pieces[0].DealInfo.DealProposal.PieceCID, si2.Pieces[0].DealInfo.DealProposal.PieceCID) + assert.Equal(t, si.Pieces[0].Impl().DealID, si2.Pieces[0].Impl().DealID) + assert.Equal(t, si.Pieces[0].Impl().DealProposal.PieceCID, si2.Pieces[0].Impl().DealProposal.PieceCID) assert.Equal(t, *si.CommD, *si2.CommD) assert.DeepEqual(t, si.TicketValue, si2.TicketValue) assert.Equal(t, si.TicketEpoch, si2.TicketEpoch) diff --git a/storage/pipeline/upgrade_queue.go b/storage/pipeline/upgrade_queue.go index 9d9e1ca46c6..5e3392a9f18 100644 --- a/storage/pipeline/upgrade_queue.go +++ b/storage/pipeline/upgrade_queue.go @@ -21,7 +21,7 @@ func (m *Sealing) MarkForUpgrade(ctx context.Context, id abi.SectorNumber) error return xerrors.Errorf("unable to snap-up sectors not in the 'Proving' state") } - if si.hasDeals() { + if si.hasData() { return xerrors.Errorf("not a committed-capacity sector, has deals") } diff --git a/storage/sealer/cbor_gen.go b/storage/sealer/cbor_gen.go index 22da1b52081..e4b8e644dad 100644 --- a/storage/sealer/cbor_gen.go +++ b/storage/sealer/cbor_gen.go @@ -33,7 +33,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { } // t.ID (storiface.CallID) (struct) - if len("ID") > cbg.MaxLength { + if len("ID") > 8192 { return xerrors.Errorf("Value in field \"ID\" was too long") } @@ -49,7 +49,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { } // t.State (sealer.CallState) (uint64) - if len("State") > cbg.MaxLength { + if len("State") > 8192 { return xerrors.Errorf("Value in field \"State\" was too long") } @@ -65,7 +65,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { } // t.Result (sealer.ManyBytes) (struct) - if len("Result") > cbg.MaxLength { + if len("Result") > 8192 { return xerrors.Errorf("Value in field \"Result\" was too long") } @@ -81,7 +81,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { } // t.RetType (sealer.ReturnType) (string) - if len("RetType") > cbg.MaxLength { + if len("RetType") > 8192 { return xerrors.Errorf("Value in field \"RetType\" was too long") } @@ -92,7 +92,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { return err } - if len(t.RetType) > cbg.MaxLength { + if len(t.RetType) > 8192 { return xerrors.Errorf("Value in field t.RetType was too long") } @@ -134,7 +134,7 @@ func (t *Call) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -192,7 +192,7 @@ func (t *Call) UnmarshalCBOR(r io.Reader) (err error) { case "RetType": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -221,7 +221,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.ID (sealer.WorkID) (struct) - if len("ID") > cbg.MaxLength { + if len("ID") > 8192 { return xerrors.Errorf("Value in field \"ID\" was too long") } @@ -237,7 +237,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.Status (sealer.WorkStatus) (string) - if len("Status") > cbg.MaxLength { + if len("Status") > 8192 { return xerrors.Errorf("Value in field \"Status\" was too long") } @@ -248,7 +248,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { return err } - if len(t.Status) > cbg.MaxLength { + if len(t.Status) > 8192 { return xerrors.Errorf("Value in field t.Status was too long") } @@ -260,7 +260,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.StartTime (int64) (int64) - if len("StartTime") > cbg.MaxLength { + if len("StartTime") > 8192 { return xerrors.Errorf("Value in field \"StartTime\" was too long") } @@ -282,7 +282,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.WorkError (string) (string) - if len("WorkError") > cbg.MaxLength { + if len("WorkError") > 8192 { return xerrors.Errorf("Value in field \"WorkError\" was too long") } @@ -293,7 +293,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { return err } - if len(t.WorkError) > cbg.MaxLength { + if len(t.WorkError) > 8192 { return xerrors.Errorf("Value in field t.WorkError was too long") } @@ -305,7 +305,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.WorkerCall (storiface.CallID) (struct) - if len("WorkerCall") > cbg.MaxLength { + if len("WorkerCall") > 8192 { return xerrors.Errorf("Value in field \"WorkerCall\" was too long") } @@ -321,7 +321,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.WorkerHostname (string) (string) - if len("WorkerHostname") > cbg.MaxLength { + if len("WorkerHostname") > 8192 { return xerrors.Errorf("Value in field \"WorkerHostname\" was too long") } @@ -332,7 +332,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { return err } - if len(t.WorkerHostname) > cbg.MaxLength { + if len(t.WorkerHostname) > 8192 { return xerrors.Errorf("Value in field t.WorkerHostname was too long") } @@ -374,7 +374,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -397,7 +397,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) { case "Status": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -408,10 +408,10 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) { case "StartTime": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -434,7 +434,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) { case "WorkError": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -455,7 +455,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) { case "WorkerHostname": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -484,7 +484,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { } // t.Method (sealtasks.TaskType) (string) - if len("Method") > cbg.MaxLength { + if len("Method") > 8192 { return xerrors.Errorf("Value in field \"Method\" was too long") } @@ -495,7 +495,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { return err } - if len(t.Method) > cbg.MaxLength { + if len(t.Method) > 8192 { return xerrors.Errorf("Value in field t.Method was too long") } @@ -507,7 +507,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { } // t.Params (string) (string) - if len("Params") > cbg.MaxLength { + if len("Params") > 8192 { return xerrors.Errorf("Value in field \"Params\" was too long") } @@ -518,7 +518,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { return err } - if len(t.Params) > cbg.MaxLength { + if len(t.Params) > 8192 { return xerrors.Errorf("Value in field t.Params was too long") } @@ -560,7 +560,7 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -573,7 +573,7 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) (err error) { case "Method": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -584,7 +584,7 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) (err error) { case "Params": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } diff --git a/storage/sealer/storiface/cbor_gen.go b/storage/sealer/storiface/cbor_gen.go index 0b42136ead3..79d6219eb95 100644 --- a/storage/sealer/storiface/cbor_gen.go +++ b/storage/sealer/storiface/cbor_gen.go @@ -31,7 +31,7 @@ func (t *CallID) MarshalCBOR(w io.Writer) error { } // t.ID (uuid.UUID) (array) - if len("ID") > cbg.MaxLength { + if len("ID") > 8192 { return xerrors.Errorf("Value in field \"ID\" was too long") } @@ -42,7 +42,7 @@ func (t *CallID) MarshalCBOR(w io.Writer) error { return err } - if len(t.ID) > cbg.ByteArrayMaxLen { + if len(t.ID) > 2097152 { return xerrors.Errorf("Byte array in field t.ID was too long") } @@ -55,7 +55,7 @@ func (t *CallID) MarshalCBOR(w io.Writer) error { } // t.Sector (abi.SectorID) (struct) - if len("Sector") > cbg.MaxLength { + if len("Sector") > 8192 { return xerrors.Errorf("Value in field \"Sector\" was too long") } @@ -101,7 +101,7 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -118,19 +118,17 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.ID: byte array too large (%d)", extra) } if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - if extra != 16 { return fmt.Errorf("expected array to have 16 elements") } t.ID = [16]uint8{} - if _, err := io.ReadFull(cr, t.ID[:]); err != nil { return err } @@ -166,7 +164,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { } // t.Key (string) (string) - if len("Key") > cbg.MaxLength { + if len("Key") > 8192 { return xerrors.Errorf("Value in field \"Key\" was too long") } @@ -177,7 +175,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { return err } - if len(t.Key) > cbg.MaxLength { + if len(t.Key) > 8192 { return xerrors.Errorf("Value in field t.Key was too long") } @@ -189,7 +187,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { } // t.Value (string) (string) - if len("Value") > cbg.MaxLength { + if len("Value") > 8192 { return xerrors.Errorf("Value in field \"Value\" was too long") } @@ -200,7 +198,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { return err } - if len(t.Value) > cbg.MaxLength { + if len(t.Value) > 8192 { return xerrors.Errorf("Value in field t.Value was too long") } @@ -242,7 +240,7 @@ func (t *SecDataHttpHeader) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -255,7 +253,7 @@ func (t *SecDataHttpHeader) UnmarshalCBOR(r io.Reader) (err error) { case "Key": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -266,7 +264,7 @@ func (t *SecDataHttpHeader) UnmarshalCBOR(r io.Reader) (err error) { case "Value": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -295,7 +293,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { } // t.URL (string) (string) - if len("URL") > cbg.MaxLength { + if len("URL") > 8192 { return xerrors.Errorf("Value in field \"URL\" was too long") } @@ -306,7 +304,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { return err } - if len(t.URL) > cbg.MaxLength { + if len(t.URL) > 8192 { return xerrors.Errorf("Value in field t.URL was too long") } @@ -318,7 +316,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { } // t.Local (bool) (bool) - if len("Local") > cbg.MaxLength { + if len("Local") > 8192 { return xerrors.Errorf("Value in field \"Local\" was too long") } @@ -334,7 +332,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { } // t.Headers ([]storiface.SecDataHttpHeader) (slice) - if len("Headers") > cbg.MaxLength { + if len("Headers") > 8192 { return xerrors.Errorf("Value in field \"Headers\" was too long") } @@ -345,7 +343,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { return err } - if len(t.Headers) > cbg.MaxLength { + if len(t.Headers) > 8192 { return xerrors.Errorf("Slice value in field t.Headers was too long") } @@ -356,6 +354,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -389,7 +388,7 @@ func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -402,7 +401,7 @@ func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { case "URL": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -435,7 +434,7 @@ func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Headers: array too large (%d)", extra) } @@ -463,6 +462,7 @@ func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { } } + } } diff --git a/storage/sectorblocks/blocks.go b/storage/sectorblocks/blocks.go index 1593174bd8f..4b84e18fbcb 100644 --- a/storage/sectorblocks/blocks.go +++ b/storage/sectorblocks/blocks.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -48,7 +49,7 @@ func DsKeyToDealID(key datastore.Key) (uint64, error) { } type SectorBuilder interface { - SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d piece.PieceDealInfo) (api.SectorOffset, error) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) } @@ -100,7 +101,7 @@ func (st *SectorBlocks) writeRef(ctx context.Context, dealID abi.DealID, sectorI return st.keys.Put(ctx, DealIDToDsKey(dealID), newRef) // TODO: batch somehow } -func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { +func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d piece.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { so, err := st.SectorBuilder.SectorAddPieceToAny(ctx, size, r, d) if err != nil { return 0, 0, err diff --git a/storage/wdpost/wdpost_run_test.go b/storage/wdpost/wdpost_run_test.go index a6b37fd542d..381f0e818bd 100644 --- a/storage/wdpost/wdpost_run_test.go +++ b/storage/wdpost/wdpost_run_test.go @@ -16,7 +16,6 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" - miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner" minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" @@ -28,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" @@ -74,13 +74,13 @@ func (m *mockStorageMinerAPI) StateMinerPartitions(ctx context.Context, a addres return m.partitions, nil } -func (m *mockStorageMinerAPI) StateMinerSectors(ctx context.Context, address address.Address, snos *bitfield.BitField, key types.TipSetKey) ([]*miner12.SectorOnChainInfo, error) { - var sis []*miner12.SectorOnChainInfo +func (m *mockStorageMinerAPI) StateMinerSectors(ctx context.Context, address address.Address, snos *bitfield.BitField, key types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + var sis []*miner.SectorOnChainInfo if snos == nil { panic("unsupported") } _ = snos.ForEach(func(i uint64) error { - sis = append(sis, &miner12.SectorOnChainInfo{ + sis = append(sis, &miner.SectorOnChainInfo{ SectorNumber: abi.SectorNumber(i), }) return nil